id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3211910 | import torch
from registry import registry
from models.model_base import Model, StandardTransform, StandardNormalization
from mldb.utils import load_model_state_dict
model_params = {
'resnet18_ssl': { 'arch': 'resnet18',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnet18_swsl': { 'arch': 'resnet18',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnet50_ssl': { 'arch': 'resnet50',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnet50_swsl': { 'arch': 'resnet50',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext50_32x4d_ssl': { 'arch': 'resnext50_32x4d',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext50_32x4d_swsl': { 'arch': 'resnext50_32x4d',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x4d_ssl': { 'arch': 'resnext101_32x4d',
'eval_batch_size': 32,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x4d_swsl': { 'arch': 'resnext101_32x4d',
'eval_batch_size': 32,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x8d_ssl': { 'arch': 'resnext101_32x8d',
'eval_batch_size': 16,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x8d_swsl': { 'arch': 'resnext101_32x8d',
'eval_batch_size': 16,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x16d_ssl': { 'arch': 'resnext101_32x16d',
'eval_batch_size': 16,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
# 'resnext101_32x16d_swsl': { 'arch': 'resnext101_32x16d',
# 'eval_batch_size': 16,
# 'img_crop_size': 224,
# 'img_resize_size': 256,
# 'mean': [0.485, 0.456, 0.406],
# 'std': [0.229, 0.224, 0.225]}
}
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', name)
load_model_state_dict(model, name)
return model
return classifier_loader
for name, d in model_params.items():
registry.add_model(
Model(
name = name,
arch = d['arch'],
transform = StandardTransform(d['img_resize_size'], d['img_crop_size']),
normalization = StandardNormalization(d['mean'], d['std']),
classifier_loader = gen_classifier_loader(name, d),
eval_batch_size = d['eval_batch_size'],
adversarial_batch_size = d['adversarial_batch_size'] if 'adversarial_batch_size' in d else None
)
)
| StarcoderdataPython |
214013 | <reponame>matu3ba/cports
pkgname = "libffi8"
pkgver = "3.4.2"
pkgrel = 0
build_style = "gnu_configure"
configure_args = [
"--includedir=/usr/include", "--disable-multi-os-directory", "--with-pic"
]
hostmakedepends = ["pkgconf"]
# actually only on x86 and arm (tramp.c code) but it does not hurt
makedepends = ["linux-headers"]
checkdepends = ["dejagnu"]
pkgdesc = "Library supporting Foreign Function Interfaces"
maintainer = "q66 <<EMAIL>>"
license = "MIT"
url = "http://sourceware.org/libffi"
source = f"https://github.com/libffi/libffi/releases/download/v{pkgver}/libffi-{pkgver}.tar.gz"
sha256 = "540fb721619a6aba3bdeef7d940d8e9e0e6d2c193595bc243241b77ff9e93620"
# missing checkdepends for now
options = ["!check"]
def post_install(self):
self.install_license("LICENSE")
@subpackage("libffi-devel")
def _devel(self):
return self.default_devel(man = True, extra = ["usr/share/info"])
| StarcoderdataPython |
12827057 | import math
import torch
from torch.nn.modules.loss import _Loss
__all__ = ['DiagLoss', 'ADD_loss', 'WingLoss', 'LossManager']
class DiagLoss(_Loss):
__constants__ = ['reduction']
def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super().__init__(size_average, reduce, reduction)
self.l1_loss = torch.nn.SmoothL1Loss(beta=.4)
def forward(self, input_: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
diag_pr = compute_diag(input_)
diag_tr = compute_diag(target)
diag_diff = self.l1_loss(diag_pr, diag_tr)
return diag_diff
class ADD_loss(_Loss):
def forward(self, input_: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
# find distance between each point of the input and target. Sum it for each
# instance and mean it over all instances
return torch.mean(torch.sum(torch.linalg.norm(input_-target, dim=2), dim=1))
class WingLoss(_Loss):
def __init__(self, size_average=None, reduce=None, w=0.05, eps=2, reduction: str = 'mean') -> None:
super().__init__(size_average, reduce, reduction)
self.w = w
self.eps = eps
def forward(self, input_: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
wing_const = self.w - self.wing_core(self.w, self.w, self.eps)
loss = torch.abs(input_ - target)
loss[loss < self.w] = self.wing_core(loss[loss < self.w], self.w, self.eps)
loss[loss >= self.w] -= wing_const
# diag_dist = compute_diag(target)
# loss /= diag_dist.view(input_.size(0),1,1)
return torch.mean(loss)
@staticmethod
def wing_core(x, w, eps):
"""Calculates the wing function from https://arxiv.org/pdf/1711.06753.pdf"""
if isinstance(x, float):
return w*math.log(1. + x / eps)
return w*torch.log(1. + x / eps)
def compute_diag(input_: torch.Tensor):
x0 = torch.min(input_[:,:,0], dim=1).values
y0 = torch.min(input_[:,:,1], dim=1).values
x1 = torch.max(input_[:,:,0], dim=1).values
y1 = torch.max(input_[:,:,1], dim=1).values
diag = torch.sqrt((x1 - x0)**2 + (y1 - y0)**2)
return diag
class LossManager:
def __init__(self, criterions, coefficients, alwa):
self.reg_criterions, self.class_criterions = criterions
self.reg_coeffs, self.class_coeffs = coefficients
assert len(self.reg_coeffs) == len(self.reg_criterions)
assert len(self.class_coeffs) == len(self.class_criterions)
assert self.reg_criterions
self.use_alwa = alwa.use
if alwa.use:
assert self.class_criterions
assert self.reg_coeffs[0] == self.class_coeffs[0] == 1.
# init lambdas for alwa algorithm
self.lam_cls = alwa.lam_cls
self.lam_reg = alwa.lam_reg
self.s_cls = list()
self.s_reg = list()
self.C = alwa.C
self.alwa_version = 'ver_1' if alwa.compute_std else 'ver_2'
def parse_losses(self, pred_kp, gt_kp,
pred_cats, gt_cats, iter_):
class_loss = []
regress_loss = []
# compute losses
if self.class_criterions:
for k, cr in zip(self.class_coeffs, self.class_criterions):
class_loss.append(cr(pred_cats, gt_cats) * k)
else:
class_loss = torch.zeros(1, requires_grad=True)
for k, cr in zip(self.reg_coeffs, self.reg_criterions):
regress_loss.append(cr(pred_kp, gt_kp) * k)
reg_loss = sum(regress_loss)
cls_loss = sum(class_loss)
# compute alwa algo or just return sum of losses
if not self.use_alwa:
return sum(regress_loss) + sum(class_loss)
self.s_cls.append(self.lam_cls*cls_loss)
self.s_reg.append(self.lam_reg*reg_loss)
if iter_ % self.C == 0 and iter_ != 0:
cls_mean = torch.mean(torch.stack(self.s_cls))
cls_std = torch.std(torch.stack(self.s_cls))
reg_mean = torch.mean(torch.stack(self.s_reg))
reg_std = torch.std(torch.stack(self.s_reg))
self.s_cls.clear()
self.s_reg.clear()
if self.alwa_version == 'ver_1':
cls = cls_mean + cls_std
reg = reg_mean + reg_std
else:
cls = cls_mean
reg = reg_mean
if cls > reg:
self.lam_cls = (1 - (cls - reg)/cls).item()
print(f"classification coefficient changed : {self.lam_cls}")
return self.lam_reg * sum(regress_loss) + self.lam_cls * sum(class_loss)
| StarcoderdataPython |
12805822 | import json
import sys
import matplotlib.pyplot as plt
def main(path):
vals = {}
with open(path) as f:
vals = json.load(f)
plt.plot(vals['gp_x'], vals['gp_y'])
plt.title('Uncertainty curve')
plt.xlabel('Training points')
plt.ylabel('Average uncertainty')
plt.show()
if __name__ == '__main__':
main(sys.argv[1])
| StarcoderdataPython |
1976492 | <filename>day4-5.py
# Twowaits
Twowaits Problem
from collections import Counter
votes =['aman','ammy','amaan','amann','aaman','amu',
'ammu','amy','ammmyy','ammyy','aman','parna','naman']
#Count the votes for persons and stores in the dictionary
vote_count=Counter(votes)
#Find the maximum number of votes
max_votes=max(vote_count.values())
#Search for people having maximum votes and store in a list
lst=[i for i in vote_count.keys() if vote_count[i]==max_votes]
#Sort the list and print lexicographical smallest name
print(sorted(lst)[0])
| StarcoderdataPython |
239171 | import unittest
import sequeval
class IndexListTestSuite(unittest.TestCase):
def test_setitem(self):
indexlist = sequeval.IndexList()
indexlist.append(None)
indexlist[0] = 'Zero'
self.assertEqual(indexlist[0], 'Zero')
self.assertEqual(indexlist.index('Zero'), 0)
def test_append(self):
indexlist = sequeval.IndexList()
indexlist.append('Zero')
indexlist.append('One')
indexlist.append('Zero')
self.assertEqual(indexlist[0], 'Zero')
self.assertEqual(indexlist[1], 'One')
self.assertEqual(indexlist.index('Zero'), 0)
self.assertEqual(indexlist.index('One'), 1)
def test_delete(self):
indexlist = sequeval.IndexList()
indexlist.append('Zero')
del indexlist[0]
self.assertEqual(indexlist[0], 'Zero')
self.assertEqual(indexlist.index('Zero'), 0)
def test_len(self):
indexlist = sequeval.IndexList()
indexlist.append('Zero')
indexlist.append('One')
indexlist.append('Zero')
self.assertEqual(len(indexlist), 2)
def test_insert(self):
indexlist = sequeval.IndexList()
indexlist.insert(1, 'Zero')
self.assertEqual(indexlist[0], 'Zero')
self.assertEqual(indexlist.index('Zero'), 0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6575370 | <filename>setup.py
from setuptools import setup, find_packages
# allows to get version via python setup.py --version
__version__ = "0.1.0"
setup(
name="torchfid",
version=__version__,
description="Installable FID score calculation",
url="https://github.com/rromb/torchfid",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=['torchfid'],
install_requires=[
"tqdm",
"numpy",
"scipy",
"torch",
"torchvision"
],
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
],
)
| StarcoderdataPython |
249868 | # Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
from ZSI.wstools.Utility import SplitQName
from ZSI.wstools.Namespaces import WSDL
from ZSI import *
from ZSI.client import *
from ZSI.TC import Any
from ZSI.typeinterpreter import BaseTypeInterpreter
import wstools
from wstools.Utility import DOM
from urlparse import urlparse
import weakref
class ServiceProxy:
"""A ServiceProxy provides a convenient way to call a remote web
service that is described with WSDL. The proxy exposes methods
that reflect the methods of the remote web service."""
def __init__(self, wsdl, service=None, port=None, tracefile=None,
typesmodule=None, nsdict=None, soapAction=None, ns=None, op_ns=None, use_wsdl=False):
"""
Instance data
use_wsdl -- if True try to construct XML Instance from
information in WSDL.
"""
if not hasattr(wsdl, 'targetNamespace'):
wsdl = wstools.WSDLTools.WSDLReader().loadFromURL(wsdl)
# for item in wsdl.types.items():
# self._serializer.loadSchema(item)
self._service = wsdl.services[service or 0]
self.__doc__ = self._service.documentation
self._port = self._service.ports[port or 0]
self._name = self._service.name
self._wsdl = wsdl
self._tracefile = tracefile
self._typesmodule = typesmodule
self._nsdict = nsdict or {}
self._soapAction = soapAction
self._ns = ns
self._op_ns = op_ns
self._use_wsdl = use_wsdl
binding = self._port.getBinding()
portType = binding.getPortType()
for item in portType.operations:
callinfo = wstools.WSDLTools.callInfoFromWSDL(self._port, item.name)
method = MethodProxy(self, callinfo)
setattr(self, item.name, method)
def _call(self, name, *args, **kwargs):
"""Call the named remote web service method."""
if len(args) and len(kwargs):
raise TypeError(
'Use positional or keyword argument only.'
)
callinfo = getattr(self, name).callinfo
soapAction = callinfo.soapAction
url = callinfo.location
(protocol, host, uri, query, fragment, identifier) = urlparse(url)
port = '80'
if host.find(':') >= 0:
host, port = host.split(':')
binding = Binding(host=host, tracefile=self._tracefile,
ssl=(protocol == 'https'),
port=port, url=None, typesmodule=self._typesmodule,
nsdict=self._nsdict, soapaction=self._soapAction,
ns=self._ns, op_ns=self._op_ns)
if self._use_wsdl:
request, response = self._getTypeCodes(callinfo)
if len(kwargs): args = kwargs
if request is None:
request = Any(oname=name)
binding.Send(url=uri, opname=None, obj=args,
nsdict=self._nsdict, soapaction=soapAction, requesttypecode=request)
return binding.Receive(replytype=response)
apply(getattr(binding, callinfo.methodName), args)
return binding.Receive()
def _getTypeCodes(self, callinfo):
"""Returns typecodes representing input and output messages, if request and/or
response fails to be generated return None for either or both.
callinfo -- WSDLTools.SOAPCallInfo instance describing an operation.
"""
prefix = None
self._resetPrefixDict()
if callinfo.use == 'encoded':
prefix = self._getPrefix(callinfo.namespace)
try:
requestTC = self._getTypeCode(parameters=callinfo.getInParameters(), literal=(callinfo.use=='literal'))
except EvaluateException, ex:
print "DEBUG: Request Failed to generate --", ex
requestTC = None
self._resetPrefixDict()
try:
replyTC = self._getTypeCode(parameters=callinfo.getOutParameters(), literal=(callinfo.use=='literal'))
except EvaluateException, ex:
print "DEBUG: Response Failed to generate --", ex
replyTC = None
request = response = None
if callinfo.style == 'rpc':
if requestTC: request = TC.Struct(pyclass=None, ofwhat=requestTC, pname=callinfo.methodName)
if replyTC: response = TC.Struct(pyclass=None, ofwhat=replyTC, pname='%sResponse' %callinfo.methodName)
else:
if requestTC: request = requestTC[0]
if replyTC: response = replyTC[0]
#THIS IS FOR RPC/ENCODED, DOC/ENCODED Wrapper
if request and prefix and callinfo.use == 'encoded':
request.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \
%{'prefix':prefix, 'name':request.oname, 'namespaceURI':callinfo.namespace}
return request, response
def _getTypeCode(self, parameters, literal=False):
"""Returns typecodes representing a parameter set
parameters -- list of WSDLTools.ParameterInfo instances representing
the parts of a WSDL Message.
"""
ofwhat = []
for part in parameters:
namespaceURI,localName = part.type
if part.element_type:
#global element
element = self._wsdl.types[namespaceURI].elements[localName]
tc = self._getElement(element, literal=literal, local=False, namespaceURI=namespaceURI)
else:
#local element
name = part.name
typeClass = self._getTypeClass(namespaceURI, localName)
if not typeClass:
tp = self._wsdl.types[namespaceURI].types[localName]
tc = self._getType(tp, name, literal, local=True, namespaceURI=namespaceURI)
else:
tc = typeClass(name)
ofwhat.append(tc)
return ofwhat
def _globalElement(self, typeCode, namespaceURI, literal):
"""namespaces typecodes representing global elements with
literal encoding.
typeCode -- typecode representing an element.
namespaceURI -- namespace
literal -- True/False
"""
if literal:
typeCode.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \
%{'prefix':self._getPrefix(namespaceURI), 'name':typeCode.oname, 'namespaceURI':namespaceURI}
def _getPrefix(self, namespaceURI):
"""Retrieves a prefix/namespace mapping.
namespaceURI -- namespace
"""
prefixDict = self._getPrefixDict()
if prefixDict.has_key(namespaceURI):
prefix = prefixDict[namespaceURI]
else:
prefix = 'ns1'
while prefix in prefixDict.values():
prefix = 'ns%d' %int(prefix[-1]) + 1
prefixDict[namespaceURI] = prefix
return prefix
def _getPrefixDict(self):
"""Used to hide the actual prefix dictionary.
"""
if not hasattr(self, '_prefixDict'):
self.__prefixDict = {}
return self.__prefixDict
def _resetPrefixDict(self):
"""Clears the prefix dictionary, this needs to be done
before creating a new typecode for a message
(ie. before, and after creating a new message typecode)
"""
self._getPrefixDict().clear()
def _getElement(self, element, literal=False, local=False, namespaceURI=None):
"""Returns a typecode instance representing the passed in element.
element -- XMLSchema.ElementDeclaration instance
literal -- literal encoding?
local -- is locally defined?
namespaceURI -- namespace
"""
if not element.isElement():
raise TypeError, 'Expecting an ElementDeclaration'
tc = None
elementName = element.getAttribute('name')
tp = element.getTypeDefinition('type')
typeObj = None
if not (tp or element.content):
nsuriType,localName = element.getAttribute('type')
typeClass = self._getTypeClass(nsuriType,localName)
typeObj = typeClass(elementName)
elif not tp:
tp = element.content
if not typeObj:
typeObj = self._getType(tp, elementName, literal, local, namespaceURI)
minOccurs = int(element.getAttribute('minOccurs'))
typeObj.optional = not minOccurs
maxOccurs = element.getAttribute('maxOccurs')
typeObj.repeatable = (maxOccurs == 'unbounded') or (int(maxOccurs) > 1)
return typeObj
def _getType(self, tp, name, literal, local, namespaceURI):
"""Returns a typecode instance representing the passed in type and name.
tp -- XMLSchema.TypeDefinition instance
name -- element name
literal -- literal encoding?
local -- is locally defined?
namespaceURI -- namespace
"""
ofwhat = []
if not (tp.isDefinition() and tp.isComplex()):
raise EvaluateException, 'only supporting complexType definition'
elif tp.content.isComplex():
if hasattr(tp.content, 'derivation') and tp.content.derivation.isRestriction():
derived = tp.content.derivation
typeClass = self._getTypeClass(*derived.getAttribute('base'))
if typeClass == TC.Array:
attrs = derived.attr_content[0].attributes[WSDL.BASE]
prefix, localName = SplitQName(attrs['arrayType'])
nsuri = derived.attr_content[0].getXMLNS(prefix=prefix)
localName = localName.split('[')[0]
simpleTypeClass = self._getTypeClass(namespaceURI=nsuri, localName=localName)
if simpleTypeClass:
ofwhat = simpleTypeClass()
else:
tp = self._wsdl.types[nsuri].types[localName]
ofwhat = self._getType(tp=tp, name=None, literal=literal, local=True, namespaceURI=nsuri)
else:
raise EvaluateException, 'only support soapenc:Array restrictions'
return typeClass(atype=name, ofwhat=ofwhat, pname=name, childNames='item')
else:
raise EvaluateException, 'complexContent only supported for soapenc:Array derivations'
elif tp.content.isModelGroup():
modelGroup = tp.content
for item in modelGroup.content:
ofwhat.append(self._getElement(item, literal=literal, local=True))
tc = TC.Struct(pyclass=None, ofwhat=ofwhat, pname=name)
if not local:
self._globalElement(tc, namespaceURI=namespaceURI, literal=literal)
return tc
raise EvaluateException, 'only supporting complexType w/ model group, or soapenc:Array restriction'
def _getTypeClass(self, namespaceURI, localName):
"""Returns a typecode class representing the type we are looking for.
localName -- name of the type we are looking for.
namespaceURI -- defining XMLSchema targetNamespace.
"""
bti = BaseTypeInterpreter()
simpleTypeClass = bti.get_typeclass(localName, namespaceURI)
return simpleTypeClass
class MethodProxy:
""" """
def __init__(self, parent, callinfo):
self.__name__ = callinfo.methodName
self.__doc__ = callinfo.documentation
self.callinfo = callinfo
self.parent = weakref.ref(parent)
def __call__(self, *args, **kwargs):
return self.parent()._call(self.__name__, *args, **kwargs)
| StarcoderdataPython |
3475253 | <gh_stars>1-10
"""Three-dimensional mobjects."""
from __future__ import annotations
__all__ = [
"ThreeDVMobject",
"Surface",
"ParametricSurface",
"Sphere",
"Dot3D",
"Cube",
"Prism",
"Cone",
"Arrow3D",
"Cylinder",
"Line3D",
"Torus",
]
from typing import *
import numpy as np
from colour import Color
from manim.mobject.opengl_compatibility import ConvertToOpenGL
from .. import config
from ..constants import *
from ..mobject.geometry import Circle, Square
from ..mobject.mobject import *
from ..mobject.opengl_mobject import OpenGLMobject
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from ..utils.color import *
from ..utils.deprecation import deprecated
from ..utils.iterables import tuplify
from ..utils.space_ops import normalize, perpendicular_bisector, z_to_vector
class ThreeDVMobject(VMobject, metaclass=ConvertToOpenGL):
def __init__(self, shade_in_3d=True, **kwargs):
super().__init__(shade_in_3d=shade_in_3d, **kwargs)
class Surface(VGroup, metaclass=ConvertToOpenGL):
"""Creates a Parametric Surface using a checkerboard pattern.
Parameters
----------
func :
The function that defines the surface.
u_range :
The range of the ``u`` variable: ``(u_min, u_max)``.
v_range :
The range of the ``v`` variable: ``(v_min, v_max)``.
resolution :
The number of samples taken of the surface. A tuple
can be used to define different resolutions for ``u`` and
``v`` respectively.
Examples
--------
.. manim:: ParaSurface
:save_last_frame:
class ParaSurface(ThreeDScene):
def func(self, u, v):
return np.array([np.cos(u) * np.cos(v), np.cos(u) * np.sin(v), u])
def construct(self):
axes = ThreeDAxes(x_range=[-4,4], x_length=8)
surface = Surface(
lambda u, v: axes.c2p(*self.func(u, v)),
u_range=[-PI, PI],
v_range=[0, TAU]
)
self.set_camera_orientation(theta=70 * DEGREES, phi=75 * DEGREES)
self.add(axes, surface)
"""
def __init__(
self,
func: Callable[[float, float], np.ndarray],
u_range: Sequence[float] = [0, 1],
v_range: Sequence[float] = [0, 1],
resolution: Sequence[int] = 32,
surface_piece_config: dict = {},
fill_color: Color = BLUE_D,
fill_opacity: float = 1.0,
checkerboard_colors: Sequence[Color] = [BLUE_D, BLUE_E],
stroke_color: Color = LIGHT_GREY,
stroke_width: float = 0.5,
should_make_jagged: bool = False,
pre_function_handle_to_anchor_scale_factor: float = 0.00001,
**kwargs
) -> None:
self.u_range = u_range
self.v_range = v_range
super().__init__(**kwargs)
self.resolution = resolution
self.surface_piece_config = surface_piece_config
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self.checkerboard_colors = checkerboard_colors
self.stroke_color = stroke_color
self.stroke_width = stroke_width
self.should_make_jagged = should_make_jagged
self.pre_function_handle_to_anchor_scale_factor = (
pre_function_handle_to_anchor_scale_factor
)
self.func = func
self._setup_in_uv_space()
self.apply_function(lambda p: func(p[0], p[1]))
if self.should_make_jagged:
self.make_jagged()
def _get_u_values_and_v_values(self):
res = tuplify(self.resolution)
if len(res) == 1:
u_res = v_res = res[0]
else:
u_res, v_res = res
u_values = np.linspace(*self.u_range, u_res + 1)
v_values = np.linspace(*self.v_range, v_res + 1)
return u_values, v_values
def _setup_in_uv_space(self):
u_values, v_values = self._get_u_values_and_v_values()
faces = VGroup()
for i in range(len(u_values) - 1):
for j in range(len(v_values) - 1):
u1, u2 = u_values[i : i + 2]
v1, v2 = v_values[j : j + 2]
face = ThreeDVMobject()
face.set_points_as_corners(
[
[u1, v1, 0],
[u2, v1, 0],
[u2, v2, 0],
[u1, v2, 0],
[u1, v1, 0],
],
)
faces.add(face)
face.u_index = i
face.v_index = j
face.u1 = u1
face.u2 = u2
face.v1 = v1
face.v2 = v2
faces.set_fill(color=self.fill_color, opacity=self.fill_opacity)
faces.set_stroke(
color=self.stroke_color,
width=self.stroke_width,
opacity=self.stroke_opacity,
)
self.add(*faces)
if self.checkerboard_colors:
self.set_fill_by_checkerboard(*self.checkerboard_colors)
def set_fill_by_checkerboard(self, *colors, opacity=None):
n_colors = len(colors)
for face in self:
c_index = (face.u_index + face.v_index) % n_colors
face.set_fill(colors[c_index], opacity=opacity)
return self
def set_fill_by_value(
self,
axes: Mobject,
colors: Union[Iterable[Color], Color],
axis: int = 2,
):
"""Sets the color of each mobject of a parametric surface to a color relative to its axis-value
Parameters
----------
axes :
The axes for the parametric surface, which will be used to map axis-values to colors.
colors :
A list of colors, ordered from lower axis-values to higher axis-values. If a list of tuples is passed
containing colors paired with numbers, then those numbers will be used as the pivots.
axis :
The chosen axis to use for the color mapping. (0 = x, 1 = y, 2 = z)
Returns
-------
:class:`~.Surface`
The parametric surface with a gradient applied by value. For chaining.
Examples
--------
.. manim:: FillByValueExample
:save_last_frame:
class FillByValueExample(ThreeDScene):
def construct(self):
resolution_fa = 42
self.set_camera_orientation(phi=75 * DEGREES, theta=-120 * DEGREES)
axes = ThreeDAxes(x_range=(0, 5, 1), y_range=(0, 5, 1), z_range=(-1, 1, 0.5))
def param_surface(u, v):
x = u
y = v
z = np.sin(x) * np.cos(y)
return z
surface_plane = Surface(
lambda u, v: axes.c2p(u, v, param_surface(u, v)),
resolution=(resolution_fa, resolution_fa),
v_range=[0, 5],
u_range=[0, 5],
)
surface_plane.set_style(fill_opacity=1)
surface_plane.set_fill_by_value(axes=axes, colors=[(RED, -0.4), (YELLOW, 0), (GREEN, 0.4)], axis = 1)
self.add(axes, surface_plane)
"""
ranges = [axes.x_range, axes.y_range, axes.z_range]
if type(colors[0]) is tuple:
new_colors, pivots = [[i for i, j in colors], [j for i, j in colors]]
else:
new_colors = colors
pivot_min = ranges[axis][0]
pivot_max = ranges[axis][1]
pivot_frequency = (pivot_max - pivot_min) / (len(new_colors) - 1)
pivots = np.arange(
start=pivot_min,
stop=pivot_max + pivot_frequency,
step=pivot_frequency,
)
for mob in self.family_members_with_points():
axis_value = axes.point_to_coords(mob.get_midpoint())[axis]
if axis_value <= pivots[0]:
mob.set_color(new_colors[0])
elif axis_value >= pivots[-1]:
mob.set_color(new_colors[-1])
else:
for i, pivot in enumerate(pivots):
if pivot > axis_value:
color_index = (axis_value - pivots[i - 1]) / (
pivots[i] - pivots[i - 1]
)
color_index = min(color_index, 1)
mob_color = interpolate_color(
new_colors[i - 1],
new_colors[i],
color_index,
)
if config.renderer == "opengl":
mob.set_color(mob_color, recurse=False)
else:
mob.set_color(mob_color, family=False)
break
return self
@deprecated(since="v0.10.0", replacement=Surface)
class ParametricSurface(Surface):
# shifts inheritance from Surface/OpenGLSurface depending on the renderer.
"""Creates a parametric surface"""
# Specific shapes
class Sphere(Surface):
"""A mobject representing a three-dimensional sphere.
Examples
---------
.. manim:: ExampleSphere
:save_last_frame:
class ExampleSphere(ThreeDScene):
def construct(self):
self.set_camera_orientation(phi=PI / 6, theta=PI / 6)
sphere1 = Sphere(
center=(3, 0, 0),
radius=1,
resolution=(20, 20),
u_range=[0.001, PI - 0.001],
v_range=[0, TAU]
)
sphere1.set_color(RED)
self.add(sphere1)
sphere2 = Sphere(center=(-1, -3, 0), radius=2, resolution=(18, 18))
sphere2.set_color(GREEN)
self.add(sphere2)
sphere3 = Sphere(center=(-1, 2, 0), radius=2, resolution=(16, 16))
sphere3.set_color(BLUE)
self.add(sphere3)
"""
def __init__(
self,
center=ORIGIN,
radius=1,
resolution=None,
u_range=(0, TAU),
v_range=(0, PI),
**kwargs
):
if config.renderer == "opengl":
res_value = (101, 51)
else:
res_value = (24, 12)
resolution = resolution if resolution is not None else res_value
self.radius = radius
super().__init__(
self.func,
resolution=resolution,
u_range=u_range,
v_range=v_range,
**kwargs,
)
self.shift(center)
def func(self, u, v):
return self.radius * np.array(
[np.cos(u) * np.sin(v), np.sin(u) * np.sin(v), -np.cos(v)],
)
class Dot3D(Sphere):
"""A spherical dot.
Parameters
--------
point : Union[:class:`list`, :class:`numpy.ndarray`], optional
The location of the dot.
radius : :class:`float`, optional
The radius of the dot.
color : :class:`~.Colors`, optional
The color of the :class:`Dot3D`
Examples
--------
.. manim:: Dot3DExample
:save_last_frame:
class Dot3DExample(ThreeDScene):
def construct(self):
self.set_camera_orientation(phi=75*DEGREES, theta=-45*DEGREES)
axes = ThreeDAxes()
dot_1 = Dot3D(point=axes.coords_to_point(0, 0, 1), color=RED)
dot_2 = Dot3D(point=axes.coords_to_point(2, 0, 0), radius=0.1, color=BLUE)
dot_3 = Dot3D(point=[0, 0, 0], radius=0.1, color=ORANGE)
self.add(axes, dot_1, dot_2,dot_3)
"""
def __init__(
self,
point=ORIGIN,
radius=DEFAULT_DOT_RADIUS,
color=WHITE,
resolution=(8, 8),
**kwargs
):
super().__init__(center=point, radius=radius, resolution=resolution, **kwargs)
self.set_color(color)
class Cube(VGroup):
def __init__(
self,
side_length=2,
fill_opacity=0.75,
fill_color=BLUE,
stroke_width=0,
**kwargs
):
self.side_length = side_length
super().__init__(
fill_color=fill_color,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
**kwargs,
)
def generate_points(self):
for vect in IN, OUT, LEFT, RIGHT, UP, DOWN:
face = Square(
side_length=self.side_length,
shade_in_3d=True,
)
face.flip()
face.shift(self.side_length * OUT / 2.0)
face.apply_matrix(z_to_vector(vect))
self.add(face)
init_points = generate_points
class Prism(Cube):
"""A cuboid.
Examples
--------
.. manim:: ExamplePrism
:save_last_frame:
class ExamplePrism(ThreeDScene):
def construct(self):
self.set_camera_orientation(phi=60 * DEGREES, theta=150 * DEGREES)
prismSmall = Prism(dimensions=[1, 2, 3]).rotate(PI / 2)
prismLarge = Prism(dimensions=[1.5, 3, 4.5]).move_to([2, 0, 0])
self.add(prismSmall, prismLarge)
"""
def __init__(self, dimensions=[3, 2, 1], **kwargs):
self.dimensions = dimensions
super().__init__(**kwargs)
def generate_points(self):
super().generate_points()
for dim, value in enumerate(self.dimensions):
self.rescale_to_fit(value, dim, stretch=True)
class Cone(Surface):
"""A circular cone.
Can be defined using 2 parameters: its height, and its base radius.
The polar angle, theta, can be calculated using arctan(base_radius /
height) The spherical radius, r, is calculated using the pythagorean
theorem.
Examples
--------
.. manim:: ExampleCone
:save_last_frame:
class ExampleCone(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
cone = Cone(direction=X_AXIS+Y_AXIS+2*Z_AXIS)
self.set_camera_orientation(phi=5*PI/11, theta=PI/9)
self.add(axes, cone)
Parameters
--------
base_radius : :class:`float`
The base radius from which the cone tapers.
height : :class:`float`
The height measured from the plane formed by the base_radius to the apex of the cone.
direction : :class:`numpy.array`
The direction of the apex.
show_base : :class:`bool`
Whether to show the base plane or not.
v_range : :class:`Sequence[float]`
The azimuthal angle to start and end at.
u_min : :class:`float`
The radius at the apex.
checkerboard_colors : :class:`bool`
Show checkerboard grid texture on the cone.
"""
def __init__(
self,
base_radius=1,
height=1,
direction=Z_AXIS,
show_base=False,
v_range=[0, TAU],
u_min=0,
checkerboard_colors=False,
**kwargs
):
self.direction = direction
self.theta = PI - np.arctan(base_radius / height)
super().__init__(
self.func,
v_range=v_range,
u_range=[u_min, np.sqrt(base_radius ** 2 + height ** 2)],
checkerboard_colors=checkerboard_colors,
**kwargs,
)
# used for rotations
self._current_theta = 0
self._current_phi = 0
if show_base:
self.base_circle = Circle(
radius=base_radius,
color=self.fill_color,
fill_opacity=self.fill_opacity,
stroke_width=0,
)
self.base_circle.shift(height * IN)
self.add(self.base_circle)
self._rotate_to_direction()
def func(self, u, v):
"""Converts from spherical coordinates to cartesian.
Parameters
---------
u : :class:`float`
The radius.
v : :class:`float`
The azimuthal angle.
"""
r = u
phi = v
return np.array(
[
r * np.sin(self.theta) * np.cos(phi),
r * np.sin(self.theta) * np.sin(phi),
r * np.cos(self.theta),
],
)
def _rotate_to_direction(self):
x, y, z = self.direction
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if r > 0:
theta = np.arccos(z / r)
else:
theta = 0
if x == 0:
if y == 0: # along the z axis
phi = 0
else:
phi = np.arctan(np.inf)
if y < 0:
phi += PI
else:
phi = np.arctan(y / x)
if x < 0:
phi += PI
# Undo old rotation (in reverse order)
self.rotate(-self._current_phi, Z_AXIS, about_point=ORIGIN)
self.rotate(-self._current_theta, Y_AXIS, about_point=ORIGIN)
# Do new rotation
self.rotate(theta, Y_AXIS, about_point=ORIGIN)
self.rotate(phi, Z_AXIS, about_point=ORIGIN)
# Store values
self._current_theta = theta
self._current_phi = phi
def set_direction(self, direction):
self.direction = direction
self._rotate_to_direction()
def get_direction(self):
return self.direction
class Cylinder(Surface):
"""A cylinder, defined by its height, radius and direction,
Examples
---------
.. manim:: ExampleCylinder
:save_last_frame:
class ExampleCylinder(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
cylinder = Cylinder(radius=2, height=3)
self.set_camera_orientation(phi=75 * DEGREES, theta=30 * DEGREES)
self.add(axes, cylinder)
Parameters
---------
radius : :class:`float`
The radius of the cylinder.
height : :class:`float`
The height of the cylinder.
direction : :class:`numpy.array`
The direction of the central axis of the cylinder.
v_range : :class:`Sequence[float]`
The height along the height axis (given by direction) to start and end on.
show_ends : :class:`bool`
Whether to show the end caps or not.
"""
def __init__(
self,
radius=1,
height=2,
direction=Z_AXIS,
v_range=[0, TAU],
show_ends=True,
resolution=(24, 24),
**kwargs
):
self._height = height
self.radius = radius
super().__init__(
self.func,
resolution=resolution,
u_range=[-self._height / 2, self._height / 2],
v_range=v_range,
**kwargs,
)
if show_ends:
self.add_bases()
self._current_phi = 0
self._current_theta = 0
self.set_direction(direction)
def func(self, u, v):
"""Converts from cylindrical coordinates to cartesian.
Parameters
---------
u : :class:`float`
The height.
v : :class:`float`
The azimuthal angle.
"""
height = u
phi = v
r = self.radius
return np.array([r * np.cos(phi), r * np.sin(phi), height])
def add_bases(self):
"""Adds the end caps of the cylinder."""
color = self.color if config["renderer"] == "opengl" else self.fill_color
opacity = self.opacity if config["renderer"] == "opengl" else self.fill_opacity
self.base_top = Circle(
radius=self.radius,
color=color,
fill_opacity=opacity,
shade_in_3d=True,
stroke_width=0,
)
self.base_top.shift(self.u_range[1] * IN)
self.base_bottom = Circle(
radius=self.radius,
color=color,
fill_opacity=opacity,
shade_in_3d=True,
stroke_width=0,
)
self.base_bottom.shift(self.u_range[0] * IN)
self.add(self.base_top, self.base_bottom)
def _rotate_to_direction(self):
x, y, z = self.direction
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if r > 0:
theta = np.arccos(z / r)
else:
theta = 0
if x == 0:
if y == 0: # along the z axis
phi = 0
else: # along the x axis
phi = np.arctan(np.inf)
if y < 0:
phi += PI
else:
phi = np.arctan(y / x)
if x < 0:
phi += PI
# undo old rotation (in reverse direction)
self.rotate(-self._current_phi, Z_AXIS, about_point=ORIGIN)
self.rotate(-self._current_theta, Y_AXIS, about_point=ORIGIN)
# do new rotation
self.rotate(theta, Y_AXIS, about_point=ORIGIN)
self.rotate(phi, Z_AXIS, about_point=ORIGIN)
# store new values
self._current_theta = theta
self._current_phi = phi
def set_direction(self, direction):
# if get_norm(direction) is get_norm(self.direction):
# pass
self.direction = direction
self._rotate_to_direction()
def get_direction(self):
return self.direction
class Line3D(Cylinder):
"""A cylindrical line, for use in ThreeDScene.
Examples
---------
.. manim:: ExampleLine3D
:save_last_frame:
class ExampleLine3D(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
line = Line3D(start=np.array([0, 0, 0]), end=np.array([2, 2, 2]))
self.set_camera_orientation(phi=75 * DEGREES, theta=30 * DEGREES)
self.add(axes, line)
Parameters
---------
start : :class:`numpy.array`
The start position of the line.
end : :class:`numpy.array`
The end position of the line.
thickness : :class:`float`
The thickness of the line.
"""
def __init__(self, start=LEFT, end=RIGHT, thickness=0.02, color=None, **kwargs):
self.thickness = thickness
self.set_start_and_end_attrs(start, end, **kwargs)
if color is not None:
self.set_color(color)
def set_start_and_end_attrs(self, start, end, **kwargs):
"""Sets the start and end points of the line.
If either ``start`` or ``end`` are :class:`Mobjects <.Mobject>`, this gives their centers.
"""
rough_start = self.pointify(start)
rough_end = self.pointify(end)
self.vect = rough_end - rough_start
self.length = np.linalg.norm(self.vect)
self.direction = normalize(self.vect)
# Now that we know the direction between them,
# we can the appropriate boundary point from
# start and end, if they're mobjects
self.start = self.pointify(start, self.direction)
self.end = self.pointify(end, -self.direction)
super().__init__(
height=np.linalg.norm(self.vect),
radius=self.thickness,
direction=self.direction,
**kwargs,
)
self.shift((self.start + self.end) / 2)
def pointify(self, mob_or_point, direction=None):
if isinstance(mob_or_point, (Mobject, OpenGLMobject)):
mob = mob_or_point
if direction is None:
return mob.get_center()
else:
return mob.get_boundary_point(direction)
return np.array(mob_or_point)
def get_start(self):
return self.start
def get_end(self):
return self.end
@classmethod
def parallel_to(
cls, line: Line3D, point: Sequence[float] = ORIGIN, length: float = 5, **kwargs
):
"""Returns a line parallel to another line going through
a given point.
Parameters
----------
line
The line to be parallel to.
point
The point to pass through.
kwargs
Additional parameters to be passed to the class.
Examples
--------
.. manim:: ParallelLineExample
:save_last_frame:
class ParallelLineExample(ThreeDScene):
def construct(self):
self.set_camera_orientation(PI / 3, -PI / 4)
ax = ThreeDAxes((-5, 5), (-5, 5), (-5, 5), 10, 10, 10)
line1 = Line3D(RIGHT * 2, UP + OUT, color=RED)
line2 = Line3D.parallel_to(line1, color=YELLOW)
self.add(ax, line1, line2)
"""
point = np.array(point)
vect = normalize(line.vect)
return cls(
point + vect * length / 2,
point - vect * length / 2,
**kwargs,
)
@classmethod
def perpendicular_to(
cls, line: Line3D, point: Sequence[float] = ORIGIN, length: float = 5, **kwargs
):
"""Returns a line perpendicular to another line going through
a given point.
Parameters
----------
line
The line to be perpendicular to.
point
The point to pass through.
kwargs
Additional parameters to be passed to the class.
Examples
--------
.. manim:: PerpLineExample
:save_last_frame:
class PerpLineExample(ThreeDScene):
def construct(self):
self.set_camera_orientation(PI / 3, -PI / 4)
ax = ThreeDAxes((-5, 5), (-5, 5), (-5, 5), 10, 10, 10)
line1 = Line3D(RIGHT * 2, UP + OUT, color=RED)
line2 = Line3D.perpendicular_to(line1, color=BLUE)
self.add(ax, line1, line2)
"""
point = np.array(point)
norm = np.cross(line.vect, point - line.start)
if all(np.linalg.norm(norm) == np.zeros(3)):
raise ValueError("Could not find the perpendicular.")
start, end = perpendicular_bisector([line.start, line.end], norm)
vect = normalize(end - start)
return cls(
point + vect * length / 2,
point - vect * length / 2,
**kwargs,
)
class Arrow3D(Line3D):
"""An arrow made out of a cylindrical line and a conical tip.
Examples
---------
.. manim:: ExampleArrow3D
:save_last_frame:
class ExampleArrow3D(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
arrow = Arrow3D(start=np.array([0, 0, 0]), end=np.array([2, 2, 2]))
self.set_camera_orientation(phi=75 * DEGREES, theta=30 * DEGREES)
self.add(axes, arrow)
Parameters
---------
start : :class:`numpy.array`
The start position of the arrow.
end : :class:`numpy.array`
The end position of the arrow.
thickness : :class:`float`
The thickness of the arrow.
height : :class:`float`
The height of the conical tip.
base_radius: :class:`float`
The base radius of the conical tip.
"""
def __init__(
self,
start=LEFT,
end=RIGHT,
thickness=0.02,
height=0.3,
base_radius=0.08,
color=WHITE,
**kwargs
):
super().__init__(
start=start, end=end, thickness=thickness, color=color, **kwargs
)
self.length = np.linalg.norm(self.vect)
self.set_start_and_end_attrs(
self.start,
self.end - height * self.direction,
**kwargs,
)
self.cone = Cone(
direction=self.direction, base_radius=base_radius, height=height, **kwargs
)
self.cone.shift(end)
self.add(self.cone)
self.set_color(color)
class Torus(Surface):
"""A torus.
Examples
---------
.. manim :: ExampleTorus
:save_last_frame:
class ExampleTorus(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
torus = Torus()
self.set_camera_orientation(phi=75 * DEGREES, theta=30 * DEGREES)
self.add(axes, torus)
Parameters
---------
major_radius : :class:`float`
Distance from the center of the tube to the center of the torus.
minor_radius : :class:`float`
Radius of the tube.
"""
def __init__(
self,
major_radius=3,
minor_radius=1,
u_range=(0, TAU),
v_range=(0, TAU),
resolution=None,
**kwargs
):
if config.renderer == "opengl":
res_value = (101, 101)
else:
res_value = (24, 24)
resolution = resolution if resolution is not None else res_value
self.R = major_radius
self.r = minor_radius
super().__init__(
self.func,
u_range=u_range,
v_range=v_range,
resolution=resolution,
**kwargs,
)
def func(self, u, v):
P = np.array([np.cos(u), np.sin(u), 0])
return (self.R - self.r * np.cos(v)) * P - self.r * np.sin(v) * OUT
| StarcoderdataPython |
4905201 | <filename>ssm/factorial_hmm/base.py
import warnings
import jax.numpy as np
import jax.random as jr
from ssm.factorial_hmm.posterior import FactorialHMMPosterior
from ssm.factorial_hmm.initial import FactorialInitialCondition
from ssm.factorial_hmm.transitions import FactorialTransitions
from ssm.factorial_hmm.emissions import FactorialEmissions
from ssm.hmm.base import HMM
from ssm.utils import ensure_has_batch_dim, auto_batch
class FactorialHMM(HMM):
def __init__(self, num_states: (tuple or list),
initial_condition: FactorialInitialCondition,
transitions: FactorialTransitions,
emissions: FactorialEmissions):
r"""
Factorial HMM base class.
The model consists of :math:`G` discrete latent states
:math:`z_t = (z_{t1}, \ldots, z_{tG})`. The :math:`g`-th state takes
values :math:`(0, ..., K_g-1)`.
Args:
num_states (tuple or list): number of discrete latent states per group
initial_condition (FactorialInitialCondition): factorial initial state object
transitions (FactorialTransitions): factorial transitions object
emissions (FactorialEmissions): factorial emissions object
"""
super().__init__(num_states, initial_condition, transitions, emissions)
@ensure_has_batch_dim()
def initialize(self,
data: np.ndarray,
covariates: np.ndarray=None,
metadata=None,
key: jr.PRNGKey=None,
method: str="kmeans") -> None:
warnings.warn(UserWarning("FactorialHMM.initialize() is not implemented!"))
pass
@auto_batch(batched_args=("data", "covariates", "metadata"))
def e_step(self, data, covariates=None, metadata=None):
return FactorialHMMPosterior.infer(
self._initial_condition.log_initial_probs(data, covariates=covariates, metadata=metadata),
self._emissions.log_likelihoods(data, covariates=covariates, metadata=metadata),
self._transitions.log_transition_matrices(data, covariates=covariates, metadata=metadata))
| StarcoderdataPython |
1845484 | # Generated by Django 2.1 on 2018-08-25 07:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task', '0007_auto_20180821_1416'),
]
operations = [
migrations.AddField(
model_name='task',
name='deadline',
field=models.DateField(null=True),
),
]
| StarcoderdataPython |
1946593 | <reponame>tidalmigrations/machine_stats
"""
Script to prepare JSON output for tidal sync servers from the list of hosts
"""
import argparse
import json
import os
import shutil
from functools import partial
from ansible.utils.path import unfrackpath
# Loading config file must be prior to importing most of the ansible.* packages
def find_config_file():
"""Find configuration file"""
potential_paths = []
cfg_files = [
"machine_stats.cfg",
"machine-stats.cfg",
"machinestats.cfg",
]
# Look for config file in the current working directory
try:
cwd = os.getcwd()
for cfg_file in cfg_files:
cwd_cfg = os.path.join(cwd, cfg_file)
potential_paths.append(cwd_cfg)
except OSError:
# If we can't access cwd, we'll simply skip it as a possible config source
pass
# Per user location
for cfg_file in cfg_files:
potential_paths.append(unfrackpath("~/." + cfg_file, follow=False))
for path in potential_paths:
if os.path.exists(path) and os.access(path, os.R_OK):
break
else:
path = None
return path
# Do nothing if ANSIBLE_CONFIG environment variable was already set.
if "ANSIBLE_CONFIG" in os.environ:
pass
else:
cfg_file = find_config_file()
if cfg_file is not None:
os.environ["ANSIBLE_CONFIG"] = cfg_file
import ansible.constants as C
from ansible import context
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.common.collections import ImmutableDict
from ansible.parsing.dataloader import DataLoader
from ansible.playbook.play import Play
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
from ansible.utils.display import Display
from ansible.vars.manager import VariableManager
from pluginbase import PluginBase
# For easier usage calculate the path relative to here.
here = os.path.abspath(os.path.dirname(__file__))
get_path = partial(os.path.join, here)
display = Display()
class PluginManager(object):
def __init__(self):
# Setup a plugin base for "machine_stats.plugins" and make sure to load
# all the default built-in plugins from the plugins folder.
self._base = PluginBase(
package="machine_stats_plugins", searchpath=[get_path("./plugins")]
)
self._source = self._base.make_plugin_source(searchpath=[])
def __getattr__(self, fn):
def method(*args, **kwargs):
for plugin_name in self._source.list_plugins():
plugin = self._source.load_plugin(plugin_name)
if not hasattr(plugin, fn):
display.warning(
"no method '%s' for plugin '%s'" % (fn, plugin_name)
)
return None
# calling a function of a module by using its name (a string)
getattr(plugin, fn)(*args, **kwargs)
return method
def ram_allocated_gb(facts):
"""Return total memory allocation in GB"""
return facts["ansible_memtotal_mb"] / 1024
def ram_used_gb(facts):
"""Return used memory in GB"""
return (facts["ansible_memtotal_mb"] - facts["ansible_memfree_mb"]) / 1024
def _size(key, mounts):
return sum([item.get(key, 0) for item in mounts])
def storage_allocated_gb(facts):
"""Return total storage allocation in GB"""
if "ansible_mounts" not in facts:
return 0
return _size("size_total", facts["ansible_mounts"]) / 1024 ** 3
def storage_used_gb(facts):
"""Return used storage in GB"""
if "ansible_mounts" not in facts:
return 0
return (
_size("size_total", facts["ansible_mounts"])
- _size("size_available", facts["ansible_mounts"])
) / 1024 ** 3
def cpu_count(facts):
"""Return the number of CPUs"""
return max(
[
int(facts.get("ansible_processor_count", 0)),
int(facts.get("ansible_processor_vcpus", 0)),
]
)
def cpu_name(proc):
"""Return CPU name"""
items_count = len(proc)
if items_count == 1:
return proc[0]
if items_count >= 3:
return proc[2]
return "Unknown"
class ResultCallback(CallbackBase):
"""A sample callback plugin used for performing an action as results come in
If you want to collect all results into a single object for processing at
the end of the execution, look into utilizing the ``json`` callback plugin
or writing your own custom callback plugin.
"""
def __init__(self, plugins, *args, **kwargs):
super().__init__(*args, **kwargs)
self._total_results = None
self._plugins = plugins
def v2_runner_on_unreachable(self, result):
host = result._host # pylint: disable=protected-access
self._display.error(
"{0}: {1}".format(
host.get_name(),
result._result["msg"], # pylint: disable=protected-access
),
wrap_text=False,
)
def v2_runner_on_failed(self, result, *args, **kwargs):
del args, kwargs # Unused
host = result._host # pylint: disable=protected-access
self._display.error(
"{0}: {1}".format(
host.get_name(),
result._result["msg"], # pylint: disable=protected-access
),
wrap_text=False,
)
def update_results(self, host, data: dict):
if self._total_results is None:
self._total_results = {}
if host not in self._total_results:
self._total_results[host] = data
else:
self._total_results[host].update(data)
def v2_runner_on_ok(self, result):
self._plugins.ok_callback(self, result)
facts = result._result.get("ansible_facts") # pylint: disable=protected-access
if facts is None:
return
host = result._host.get_name()
self.update_results(
host,
{
"host_name": facts["ansible_hostname"],
"fqdn": facts["ansible_fqdn"],
"ip_addresses": facts["ansible_all_ipv4_addresses"]
+ facts["ansible_all_ipv6_addresses"],
"ram_allocated_gb": ram_allocated_gb(facts),
"ram_used_gb": ram_used_gb(facts),
"storage_allocated_gb": storage_allocated_gb(facts),
"storage_used_gb": storage_used_gb(facts),
"cpu_count": cpu_count(facts),
"operating_system": facts["ansible_distribution"],
"operating_system_version": facts["ansible_distribution_version"],
"cpu_name": cpu_name(facts["ansible_processor"]),
},
)
def v2_playbook_on_stats(self, stats):
if self._total_results is not None:
print(
json.dumps(
{"servers": list(self._total_results.values())},
indent=4,
sort_keys=True,
)
)
self._display.display("MACHINE STATS RECAP", stderr=True)
hosts = sorted(stats.processed.keys())
for h in hosts: # pylint: disable=invalid-name
t = stats.summarize(h) # pylint: disable=invalid-name
self._display.display(
u"%s : %s %s %s %s %s %s %s"
% (
hostcolor(h, t),
colorize(u"ok", t["ok"], C.COLOR_OK), # pylint: disable=no-member
colorize(
u"changed",
t["changed"],
C.COLOR_CHANGED, # pylint: disable=no-member
),
colorize(
u"unreachable",
t["unreachable"],
C.COLOR_UNREACHABLE, # pylint: disable=no-member
),
colorize(
u"failed",
t["failures"],
C.COLOR_ERROR, # pylint: disable=no-member
),
colorize(
u"skipped",
t["skipped"],
C.COLOR_SKIP, # pylint: disable=no-member
),
colorize(
u"rescued",
t["rescued"],
C.COLOR_OK, # pylint: disable=no-member
),
colorize(
u"ignored",
t["ignored"],
C.COLOR_WARN, # pylint: disable=no-member
),
),
screen_only=True,
stderr=True,
)
self._display.display("", screen_only=True, stderr=True)
class Application: # pylint: disable=too-few-public-methods
"""Machine Stats application"""
def __init__(
self, *, sources: list = None, plugins: PluginManager, args: argparse.Namespace
):
if sources is None:
sources = list()
self._sources = sources
self._plugins = plugins
self.args = args
self._playbook_tasks = []
self._plugins.setup(self)
def add_playbook_tasks(self, *args):
for arg in args:
if isinstance(arg, list):
self._playbook_tasks.extend(arg)
else:
self._playbook_tasks.append(arg)
def playbook_tasks(self):
if not self._playbook_tasks:
return None
return self._playbook_tasks
def run(self):
"""Run the Application"""
# Since the API is constructed for CLI it expects certain options to
# always be set in the context object
context.CLIARGS = ImmutableDict(
connection="smart",
module_path=[get_path("./modules"), "/usr/share/ansible"],
forks=10,
become=None,
become_method=None,
become_user=None,
check=False,
diff=False,
verbosity=3,
)
# Initialize needed objects
loader = DataLoader() # Takes care of finding and reading yaml, json and
# ini files
passwords = dict(vault_pass="<PASSWORD>")
# Instantiate our ResultCallback for handling results as they come in
results_callback = ResultCallback(plugins=self._plugins)
# Create inventory, use path to host config file as source or hosts in a
# comma separated string
inventory = InventoryManager(loader=loader, sources=self._sources)
# Variable manager takes care of merging all the different sources to give
# you a unified view of variables available in each context
variable_manager = VariableManager(loader=loader, inventory=inventory)
# Instantiate task queue manager, which takes care of forking and setting
# up all objects to iterate over host list and tasks
# IMPORTANT: This also adds library dirs paths to the module loader
# IMPORTANT: and so it must be initialized before calling `Play.load()`.
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=passwords,
stdout_callback=results_callback, # Use our custom callback instead of
# the ``default`` callback plugin,
# which prints to stdout
)
# Create data structure that represents our play, including tasks, this is
# basically what our YAML loader does internally.
play_source = dict(
name="Ansible Play",
hosts="all",
gather_facts="yes",
tasks=self.playbook_tasks(),
)
# Create play object, playbook objects use .load instead of init or new
# methods, this will also automatically create the task objects from the
# info provided in play_source
play = Play().load(
play_source, variable_manager=variable_manager, loader=loader
)
# Actually run it
try:
tqm.load_callbacks()
tqm.run(play)
tqm.send_callback(
"v2_playbook_on_stats",
tqm._stats, # pylint: disable=protected-access
)
finally:
# We always need to cleanup child procs and the structures we use to
# communicate with them
tqm.cleanup()
if loader:
loader.cleanup_all_tmp_files()
# Remove ansible tmpdir
shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) # pylint: disable=no-member
if tqm is not None:
tqm.cleanup()
def main():
"""Main"""
plugins = PluginManager()
parser = argparse.ArgumentParser(prog="machine_stats")
parser.add_argument(
"hosts",
metavar="FILE",
type=argparse.FileType("r"),
help="inventory file (default 'hosts')",
nargs="*",
)
plugins.add_arguments(parser)
args = parser.parse_args()
if not args.hosts:
try:
with open("hosts", "r") as f: # pylint: disable=invalid-name
args.hosts.append(f)
except FileNotFoundError:
pass
sources = list(map(lambda f: f.name, args.hosts))
app = Application(sources=sources, plugins=plugins, args=args)
app.run()
if __name__ == "__main__":
main()
| StarcoderdataPython |
172135 | <filename>tests/test_entropy_encoders/test_arithmetic_coding.py
from typing import List, Sequence
import hypothesis.strategies as st
from entropy_encoders import arithmetic_coding
from hypothesis import given
EOF = "\n"
text_strategy = st.text(st.characters(blacklist_characters=EOF),
max_size=10**9)
@given(st.lists(text_strategy))
def test_list_of_strings(symbol_list: List):
symbol_list += EOF
enc = arithmetic_coding.encode(symbol_list, EOF)
dec = arithmetic_coding.decode(enc)
assert symbol_list == dec
def test_handwritten():
pt = {
"R": 0.4,
"G": 0.5,
"B": 0.1,
}
string = list("GGB")
enc = arithmetic_coding.encode(string, "B", probability_table=pt)
assert enc.decimal == "83"
dec = arithmetic_coding.decode(enc)
if isinstance(string, str):
dec = "".join(dec)
assert string == dec
| StarcoderdataPython |
3584452 | <reponame>elisabethzinck/Fairness-oriented-interpretability-of-predictive-algorithms
import torch
import os
import platform
import numpy as np
import pandas as pd
import re
import time
import pytorch_lightning as pl
from torchmetrics.functional import accuracy, auroc
from src.models.data_modules import CheXpertDataModule
from src.models.cheXpert_modelling_functions import BinaryClassificationTaskCheXpert
from src.models.data_modules import CheXpertDataModule
from src.models.general_modelling_functions import print_timing
def print_res(acc, AUROC, eval_data):
print("---- Results ----")
print(f"\nPredicting on {eval_data}\n")
print(f"Accuracy = {acc}\n")
print(f"AUROC = {AUROC}\n")
print("------------------")
#### Setup #######
if __name__ == '__main__':
t0 = time.time()
if torch.cuda.is_available():
GPU = 1
else:
GPU = None
if platform.system() == 'Linux':
n_avail_cpus = len(os.sched_getaffinity(0))
num_workers = min(n_avail_cpus-1, 8)
else:
num_workers = 0
print(f"Using num_workers = {num_workers}")
# ---- Start: Inputs in script----
save_metrics = True
save_preds = True
model_name = "adam_dp=2e-1"
model_type = "best" # "best" or "last"
eval_data = "test"
assert eval_data in ['train', 'val', 'test'], "eval_data must be 'train', 'val' or 'test'"
dm = CheXpertDataModule(**{
"target_disease":"Cardiomegaly",
"uncertainty_approach": "U-Zeros",
"num_workers": num_workers,
"tiny_sample_data": False})
output_path = f"data/CheXpert/predictions/{model_name}/"
if not os.path.exists(output_path):
os.makedirs(output_path)
# ---- End: Inputs in Script ----
#### Loading Checkpointed Model #######
ckpt_folder_path = f"models/CheXpert/checkpoints_from_trainer/{model_name}/"
if model_type == 'best':
files = next(os.walk(ckpt_folder_path))[2]
best_ckpts = [f for f in files if "val_loss" in f]
loss_list = []
for ckpt_file in best_ckpts:
loss = re.findall("val_loss=(.+).ckpt", ckpt_file)[0]
loss_list.append(float(loss))
best_model = best_ckpts[np.argmin(loss_list)]
model_ckpt = f"{ckpt_folder_path}{best_model}"
elif model_type == 'last':
model_ckpt = f"models/CheXpert/checkpoints_from_trainer/{model_name}/{model_type}.ckpt"
else:
raise ValueError("model_type must be 'best' or 'last'")
print(f"model checkpoint: {model_ckpt}")
pl_model = BinaryClassificationTaskCheXpert()
pl_trained_model = pl_model.load_from_checkpoint(model_ckpt)
#### Predictions and Evaluation ######
print("---- Initializing Training ----")
trainer = pl.Trainer(
fast_dev_run = False,
deterministic = True,
gpus = GPU,
progress_bar_refresh_rate = 0)
# Val, Test or train data to predict on
cols = ["patient_id"]
if eval_data == 'val':
df = (dm.val_data.dataset_df[cols].assign(
y = dm.val_data.y.squeeze())
)
dataloader = dm.val_dataloader()
elif eval_data == 'test':
df = (dm.test_data.dataset_df[cols].assign(
y = dm.test_data.y.squeeze())
)
dataloader = dm.test_dataloader()
elif eval_data == 'train':
df = (dm.train_data.dataset_df[cols].assign(
y = dm.train_data.y.squeeze())
)
dataloader = dm.train_dataloader()
print("---- Running Predictions ----")
out_batches = trainer.predict(pl_trained_model, dataloaders = dataloader)
scores = torch.sigmoid(torch.cat(out_batches, dim = 0))
preds = (scores > 0.5).to(torch.int8)
print("---- Calculating Metrics ----")
labels = torch.from_numpy(df.y.values).unsqueeze(dim=1).to(torch.int8)
acc = accuracy(preds, labels)
AUROC = auroc(preds = scores, target = labels)
print_res(acc, AUROC, eval_data)
if save_metrics:
print("---- Saving Metrics ----")
save_dict = {"Predicted": eval_data,
"Accuracy": acc.numpy(),
"AUROC": AUROC.numpy()}
(pd.DataFrame(save_dict, index = [0])
.to_csv(f"{output_path}{eval_data}_{model_type}_metrics.csv", index=False)
)
if save_preds:
print("---- Saving Predictions ----")
(df.assign(
y_hat = preds.numpy(),
scores = scores.numpy())
.to_csv(f"{output_path}{eval_data}_{model_type}_predictions.csv", index=False)
)
### FINISHING UP ####
t1 = time.time()
print_timing(t0, t1, text = 'Total time to run script:')
| StarcoderdataPython |
77345 | import argparse
import logging
import os
import json
import pickle
import random
import time
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset
from transformers import AdamW, get_linear_schedule_with_warmup, set_seed
from utils import set_logger, set_seed
from transformers import BertConfig, BertTokenizer
from transformers import AutoModelForSequenceClassification, AutoConfig, AutoTokenizer, AutoModel
from net.bert_base import BertForSequenceClassification
from net.bert_attention import BertForSequenceClassificationAttention
from net.bert_lstm import BertForSequenceClassificationLSTM
from net.bert_lstm_attention import BertForSequenceClassificationLSTMAttenion
from processor import sentiment_processors as processors
from processor import sentiment_convert_examples_to_features, SentimentDataset
from train_and_eval import train, test, predict, _predict
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"bert_base": (BertConfig, BertForSequenceClassification, BertTokenizer),
"bert_attention": (BertConfig, BertForSequenceClassificationAttention, BertTokenizer),
"bert_lstm": (BertConfig, BertForSequenceClassificationLSTM, BertTokenizer),
"bert_lstm_attention": (BertConfig, BertForSequenceClassificationLSTMAttenion, BertTokenizer),
}
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--log_dir", default="log", type=str, required=True, help="设置日志的输出目录")
parser.add_argument(
"--dataset",
choices=["ISEAR", "TEC", "IECE", "SMP2020"],
default="ISEAR",
type=str,
help="应用的数据集,ISEAR, TEC, IECE, SMP2020中4选1",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--pre_train_path",
default=None,
type=str,
required=True,
help="预训练模型所在的路径,包括 pytorch_model.bin, vocab.txt, bert_config.json",
)
parser.add_argument(
"--output_dir",
default="output",
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--max_seq_length", default=256, type=int, help="输入到bert的最大长度,通常不应该超过512")
parser.add_argument("--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.")
parser.add_argument("--num_train_epochs", default=20, type=int, help="epoch 数目")
parser.add_argument("--train_batch_size", default=8, type=int, help="训练集的batch_size")
parser.add_argument("--eval_batch_size", default=512, type=int, help="验证集的batch_size")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1, help="梯度累计更新的步骤,用来弥补GPU过小的情况")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="学习率")
parser.add_argument("--weight_decay", default=0.01, type=float, help="权重衰减")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="最大的梯度更新")
parser.add_argument("--seed", type=int, default=233, help="random seed for initialization")
# parser.add_argument("--warmup_steps", default=0, type=int,
# help="让学习增加到1的步数,在warmup_steps后,再衰减到0")
parser.add_argument(
"--warmup_rate", default=0.00, type=float, help="让学习增加到1的步数,在warmup_steps后,再衰减到0,这里设置一个小数,在总训练步数*rate步时开始增加到1"
)
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, args.dataset + "_" + args.model_name)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
assert os.path.exists(os.path.join("data", args.dataset))
assert os.path.exists(args.pre_train_path)
assert os.path.exists(args.output_dir)
# 暂时不写多GPU
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
set_seed(args.seed)
log_dir = os.path.join(
args.log_dir,
args.dataset + "_" + args.model_name + time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime(time.time())) + ".log",
)
set_logger(log_dir)
data_dir = os.path.join("data", args.dataset)
processor = processors[args.dataset](args, data_dir)
label_list = processor.get_labels()
num_labels = len(label_list)
args.model_name = args.model_name.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_name]
if args.do_train:
logging.info("loading pretrained model... ...")
config = config_class.from_pretrained(args.pre_train_path, num_labels=num_labels)
tokenizer = tokenizer_class.from_pretrained(args.pre_train_path, do_lower_case=args.do_lower_case)
config.save_pretrained(args.output_dir)
tokenizer.save_vocabulary(args.output_dir)
model = model_class.from_pretrained(args.pre_train_path, config=config, args=args)
model.to(args.device)
logging.info("load pretrained model end... ...")
logger.info("Training parameters %s", args)
def convert_to_dataset(examples):
features = sentiment_convert_examples_to_features(
examples=examples, tokenizer=tokenizer, max_length=args.max_seq_length, label_list=label_list
)
return SentimentDataset(features)
# Training
if args.do_train:
logging.info("loading dataset... ...")
train_examples = processor.get_train_examples()
train_dataset = convert_to_dataset(train_examples)
dev_examples = processor.get_dev_examples()
dev_dataset = convert_to_dataset(dev_examples)
logging.info("dataset loaded...")
train_dataset = np.array(train_dataset)
dev_dataset = np.array(dev_dataset)
logging.info("start training... ...")
train(args, train_dataset, dev_dataset, model)
logging.info("train end...")
if args.do_eval:
logging.info("loading trained model... ...")
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
config = config_class.from_pretrained(args.output_dir, num_labels=num_labels)
model = model_class.from_pretrained(args.output_dir, config=config, args=args)
model.to(args.device)
logging.info("load trained model end... ...")
logger.info("Evaluation parameters %s", args)
# Evaluation
if args.do_eval:
logging.info("loading dataset... ...")
test_examples = processor.get_test_examples()
test_dataset = convert_to_dataset(test_examples)
logging.info("dataset loaded...")
test_dataset = np.array(test_dataset)
test_dataset = np.array(test_dataset)
logging.info("start evaluating... ...")
test_probs = test(args, model, test_dataset)
logging.info("evaluate end...")
if __name__ == "__main__":
main()
| StarcoderdataPython |
9623195 | # -*- coding: utf-8 -*-
import glob
import os
import pandas as pd
from glassimaging.dataloading.niftidataset import NiftiDataset
import logging
import json
from torch.utils.data import Dataset
class Brats18(NiftiDataset):
available_sequences = ['flair', 't1', 't1Gd', 't2']
"""The image paths for each subject are stored at initialization
"""
def __init__(self, df=None):
NiftiDataset.__init__(self)
if df is not None:
self.df = df
def importData(self, data_loc, nsplits = 5):
flair_images = glob.glob(os.path.join(data_loc, '*', '*_flair.nii.gz'))
flair_images = {os.path.basename(os.path.dirname(i)): i for i in flair_images}
patients = flair_images.keys()
t1_images = glob.glob(os.path.join(data_loc, '*', '*_t1.nii.gz'))
t1_images = {os.path.basename(os.path.dirname(i)): i for i in t1_images}
t1Gd_images = glob.glob(os.path.join(data_loc, '*', '*_t1ce.nii.gz'))
t1Gd_images = {os.path.basename(os.path.dirname(i)): i for i in t1Gd_images}
t2_images = glob.glob(os.path.join(data_loc, '*', '*_t2.nii.gz'))
t2_images = {os.path.basename(os.path.dirname(i)): i for i in t2_images}
segmentations = glob.glob(os.path.join(data_loc, '*', '*_seg.nii.gz'))
segmentations = {os.path.basename(os.path.dirname(i)): i for i in segmentations}
df = pd.DataFrame.from_dict(flair_images, orient='index', columns=['flair'])
df['t1'] = ''
df['t1Gd'] = ''
df['t2'] = ''
df['seg'] = ''
for p in patients:
df.at[p, 't1'] = t1_images[p]
df.at[p, 't1Gd'] = t1Gd_images[p]
df.at[p, 't2'] = t2_images[p]
df.at[p, 'seg'] = segmentations[p]
self.df = df
self.patients = patients
self.createCVSplits(nsplits)
"""Create a datamanager object from the filesystem
"""
@staticmethod
def fromFile(loc, nsplits = 5):
instance = Brats18()
instance.importData(loc, nsplits)
logging.info('Brats new Datamanager created from ' + loc + '.')
return instance
def setSplits(self, splits_file):
""" Load the information on cross-validation splits from a json file
"""
with open(splits_file, 'r') as file:
splits = json.load(file)
# Set all patient to split -1, so that only patients in the actual splits file are included
self.df['split'] = -1
for i in range(0, len(splits)):
for p in splits[i]:
self.df.at[p, 'split'] = i
def getDataset(self, splits=(), sequences = None, transform=None):
if len(splits) == 0:
splits = range(0, self.nsplits)
if sequences is None:
sequences = self.available_sequences
dataset = Brats18Dataset(self.df.loc[[s in splits for s in self.df['split']]], sequences, transform=transform)
return dataset
class Brats18Dataset(NiftiDataset, Dataset):
def __init__(self, dataframe, sequences, transform=None):
Dataset.__init__(self)
NiftiDataset.__init__(self)
self.df = dataframe
self.sequences = sequences
self.patients = self.df.index.values
self.transform = transform
def __len__(self):
return len(self.patients)
def __getitem__(self, idx):
patientname = self.patients[idx]
(image, segmentation) = self.loadSubjectImages(patientname, self.sequences)
seg_file = self.getFileName(patientname, 'seg')
sample = {'data': image, 'seg': segmentation, 'seg_file': seg_file, 'subject': patientname}
if self.transform is not None:
sample = self.transform(sample)
return sample
def saveListOfPatients(self, path):
with open(path, 'w') as file:
json.dump(self.patients.tolist(), file)
| StarcoderdataPython |
4815661 | from graphics import drawRectangle, fillRectangle
def mapreader(world):
'''
this reads the map file and creates a dict of cords=key terrain type=val.
'''
world.mapdict = {} #key = cords , value = blocktype
worlddoc = open('map.txt', 'r')
lineCount = 0
world.tileWidth = 0
for line in worlddoc: #line is row
line.strip()
if len(line) > world.tileWidth:
world.tileWidth = len(line)
charCount = 0
for char in line: #char is col
if char == '-':
world.mapdict[(charCount, lineCount)] = 1 #walkable
elif char == '#':
world.mapdict[(charCount, lineCount)] = 0 #unwalkable
elif char == 'T':
world.mapdict[(charCount, lineCount)] = 2 #trail tile
charCount += 1
lineCount += 1
world.tileHeight = lineCount
def mapSize():
'''
helper func that returns largest dimensions of map to use for calculating window size
'''
worlddoc = open('map.txt', 'r')
lineCount = 0
width = 0
for line in worlddoc: #line is row
line.strip()
if len(line) > width:
width = len(line)
lineCount += 1
return (width-1, lineCount)
def findTileFromCords(cords, tileSize):
'''
takes tuple of pixel cords in program window, and outputs tuple of cords of the tile that contains given cords. tile cords are
the cords that are used to reference a tile in mapdict
'''
(x, y) = cords
col = x/tileSize
row = y/tileSize
return (col, row)
def drawTileHelper(cords, tileSize, tileType):
(x, y) = cords
if tileType == 0: # walkable tile
fillRectangle(x*tileSize, y*tileSize, tileSize, tileSize, 'red')
drawRectangle(x*tileSize, y*tileSize, tileSize, tileSize)
elif tileType == 1: # unwalkable tile
fillRectangle(x*tileSize, y*tileSize, tileSize, tileSize, 'green')
drawRectangle(x*tileSize, y*tileSize, tileSize, tileSize)
elif tileType == 2: # trail tile
fillRectangle(x*tileSize, y*tileSize, tileSize, tileSize, 'brown')
drawRectangle(x*tileSize, y*tileSize, tileSize, tileSize)
elif tileType == -1: #start tile
fillRectangle(x*tileSize, y*tileSize, tileSize, tileSize, 'yellow')
drawRectangle(x*tileSize, y*tileSize, tileSize, tileSize)
elif tileType == -2: #dest tile
fillRectangle(x*tileSize, y*tileSize, tileSize, tileSize, 'orange')
drawRectangle(x*tileSize, y*tileSize, tileSize, tileSize)
| StarcoderdataPython |
8189323 | <reponame>trueMiskin/tree-structures<gh_stars>0
#!/usr/bin/env python3
from array import array
import random
import time
import sys
CASES = [
# test number, number of operations, operation chances
[1, 100, [0.4, 0.3, 0.3, 0, 0]],
[2, 1_000, [0.3, 0.3, 0.3, 0.05, 0.05]],
[3, 10_000, [0.3, 0.3, 0.3, 0.05, 0.05]],
[4, 100_000, [0.3, 0.3, 0.3, 0.05, 0.05]],
[5, 50_000, [0.59, 0.4, 0.0, 0.005, 0.005]], # performence check - balance tree must be quicker
]
"""
Input: On first line is number of operations. On next N lines contain individual operations.
Operations (key and value are same):
0 <key> - insert
1 <key> - find/contains
2 <key> - delete
3 - print sequence in increasing order
4 - print sequence in decreasing order
"""
random.seed(42)
def insert(key):
global arr
for i in range(len(arr)):
if arr[i] == key:
return
if arr[i] > key:
arr.insert(i, key)
return
arr.append(key)
def delete(key):
global arr
arr.remove(key)
def contains(key):
global arr, fout
try:
arr.index(key)
fout.write(f"1\n")
except Exception:
fout.write(f"0\n")
def printAll(increasing=True):
global arr, fin, fout
if increasing:
fin.write(f"3\n")
fout.write(f"{' '.join(str(x) for x in arr)}\n")
else:
fin.write(f"4\n")
arr.reverse()
fout.write(f"{' '.join(str(x) for x in arr)}\n")
arr.reverse()
def log(str):
print(str, file=sys.stderr)
start = time.time()
for case in CASES:
test_num, num_op, chances = case
keys = []
arr = []
log(f"Generating test case {test_num}")
with open(f"test{test_num}.in", "w") as fin, open(f"test{test_num}.out", "w") as fout:
fin.write(f"{num_op + 2}\n")
operation_now = 0
while operation_now < num_op:
chance = random.random()
prev_chance = 0
operation = 0
for c in chances:
if prev_chance + c > chance:
break
else:
prev_chance += c
operation += 1
if operation == 0:
key = random.randint(0, num_op*2)
if key in keys:
continue
keys.append(key)
insert(key)
fin.write(f"{operation} {key}\n")
elif operation == 1:
idx_key = random.randint(0, len(keys))
if idx_key == len(keys):
key = random.randint(0, num_op*2)
else:
key = keys[idx_key]
fin.write(f"{operation} {key}\n")
contains(key)
elif operation == 2:
if len(keys) == 0:
continue
key = keys[random.randint(0, len(keys) - 1)]
keys.remove(key)
fin.write(f"{operation} {key}\n")
delete(key)
elif operation == 3:
printAll(True)
else:
printAll(False)
operation_now += 1
# Final check
printAll(True)
printAll(False)
end = time.time()
log(f"Generating tests took {end - start} s")
| StarcoderdataPython |
94485 | import xlrd
import numpy as np
import xlwt
from tempfile import TemporaryFile
book = xlwt.Workbook()
sheet1 = book.add_sheet('sheet1')
data=xlrd.open_workbook(r'C:\Users\Desktop\teamE\D1_route.xlsx')
table=data.sheets()[0]
all_data=[]
row_num=table.nrows
col_num=table.ncols
all_loc=[]
for i in range(table.nrows):
every_row=table.row_values(i)
all_data.append(every_row)
new_all_data=np.array(all_data)
data_try=new_all_data.flatten()
data_try1=sorted(data_try)
for l in range(len(data_try1)):
j = 1
order_min=data_try1[l]
loca=np.where(new_all_data==order_min)
all_data_for_choose=new_all_data
sheet1.write(l, 0, order_min)
while j<12:
#all_loc.append([loca[0][0],loca[1][0]])
change1=np.delete(all_data_for_choose,[loca[0][0],loca[1][0]],0)
change2=np.delete(change1,[loca[0][0],loca[1][0]],1)
dis = np.min(change2)
all_data_for_choose = change2
loca = np.where(all_data_for_choose == dis)
sheet1.write(l, j, dis)
j+=1
name = "find_route_sensitivity_D1.xls"
book.save(name)
book.save(TemporaryFile())
| StarcoderdataPython |
6555835 | <filename>src/scripts/training_stats.py
import argparse
import glob
import json
import os
import time
from datetime import timedelta
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Training stats')
parser.add_argument('--path', default='outputs/librispeech', help='path')
parser.add_argument('--checkpoint', default='last', help='checkpoint tag')
parser.add_argument('--all_speakers', action='store_true', help='Evaluate on all speakers')
parser.add_argument('--recompute', action='store_true', help='Recompute results')
parser.add_argument('--tag', default=None, help='Tag of output')
args = parser.parse_args()
fns = glob.glob(os.path.join(args.path, '**', 'checkpoint-0.pkl'))
paths = [os.path.split(fn)[0] for fn in fns]
print('No. utterances: %d' % len(paths))
finished = [os.path.split(p)[1] for p in paths if os.path.exists(os.path.join(p, 'checkpoint-last.pkl'))]
print('No. reconstructions finished: %d' % len(finished))
in_progress = []
print('Reconstructions in progress:')
for p in paths:
if not os.path.exists(os.path.join(p, 'progress.json')):
continue
with open(os.path.join(p, 'progress.json')) as f:
stats = json.load(f)
if time.time() - stats['last'] < 10:
in_progress.append(os.path.split(p)[1])
print(' - %s (pid: %d, eta: %s)' % (os.path.split(p)[-1], stats['process'], str(timedelta(seconds=time.time() - stats['start'])))) | StarcoderdataPython |
3546239 | <gh_stars>0
def greeting():
print("how 're u doing!!")
answer = input()
if answer.lower() == "cool n u":
print("me too great")
else:
print("Hmm all is going to be fine")
greeting()
| StarcoderdataPython |
3323757 | <filename>strategy/script/methods/attack.py
#!/usr/bin/env python
from __future__ import print_function
import rospy
import math
class Attack(object):
def ClassicAttacking(self, goal_dis, goal_ang):
v_x = goal_dis * math.cos(math.radians(goal_ang))
v_y = goal_dis * math.sin(math.radians(goal_ang))
v_yaw = goal_ang
return v_x, v_y, v_yaw | StarcoderdataPython |
6558664 | <filename>main.py
from __future__ import annotations
from copy import copy
from dataclasses import dataclass
from operator import add, floordiv, mul, sub
from tkinter import Button, Entry, StringVar, Tk
from typing import Any, Callable, Union
class App:
def __init__(self):
self.root = Tk()
self.root.title("Calculator")
self.entryVar = StringVar()
Entry(self.root, width=44, state="readonly", textvariable=self.entryVar
).grid(row=0, column=0, columnspan=5)
self.reset()
for i in range(9):
CalcValBtn(self, 3 - i // 3, i % 3, CalcVal(i + 1))
CalcValBtn(self, 4, 0, CalcVal(0), 3)
for e in [(1, 3, add), (1, 4, sub), (2, 3, mul), (2, 4, floordiv)]:
CalcValBtn(self, e[0], e[1], CalcVal(e[2]))
EvalBtn(self, 3, 3, 2)
def addItem(self, val: CalcVal):
if self.lastItem is None:
self.lastItem = CalcItem(None, val)
self.firstItem = self.lastItem
elif not self.lastItem.appendDigit(val):
next = CalcItem(self.lastItem, val)
self.lastItem.next = next
self.lastItem = next
self.updateEntry()
def genItems(self):
currItem = self.firstItem
while currItem is not None:
yield currItem
currItem = currItem.next
def genItemStrs(self):
for item in self.genItems():
yield str(item.val)
def itemsByHighPriority(self):
return sorted(self.genItems(), key=lambda x: -x.priority)
def reset(self):
self.firstItem: CalcItem = None
self.lastItem: CalcItem = None
def run(self):
self.root.mainloop()
def showRes(self, res: Union[int, RuntimeError, str]):
self.entryVar.set(str(res))
def updateEntry(self):
self.entryVar.set(" ".join(self.genItemStrs()))
class Btn:
def __init__(self, app: App, row: int, col: int, span: int, name: str):
self.app = app
Button(self.app.root, text=name, padx=40 * span, pady=20,
command=self.onClick
).grid(row=row, column=col, columnspan=span)
def onClick(self):
pass
@dataclass
class CalcItem:
prev: CalcItem
val: CalcVal
def __post_init__(self):
self.next: CalcItem = None
self.priority = self.val.priority()
def appendDigit(self, val: CalcVal):
return self.val.appendDigit(val)
def delete(self):
self.prev = None
self.next = None
def deleteOp(self):
self.prev.delete()
self.next.delete()
self.delete()
def eval(self):
self.reqOp()
newNext = self.next.next
newPrev = self.prev.prev
new = CalcItem(newPrev, CalcVal(
self.val(self.prev.toInt(), self.next.toInt())))
new.next = newNext
setPrev(newNext, new)
setNext(newPrev, new)
self.deleteOp()
return new
def isInt(self):
return self.val.isInt()
def isOp(self):
return not self.isInt()
def reqOp(self):
if self.isInt():
raise RuntimeError(f"{self.val} is not an operator.")
if self.next is None or self.prev is None:
raise RuntimeError(
f"Operator {self.val} doesn't have both neighbors.")
def toInt(self):
if self.isOp():
raise RuntimeError(
f"Operator {self.val} is not a number.")
return self.val.val
@dataclass
class CalcVal:
val: Union[int, Operator]
def __call__(self, a, b) -> int:
return self.val(a, b)
def __str__(self):
return str(self.val) if self.isInt() else opInfo[self.val].name
def appendDigit(self, o: CalcVal):
if self.isInt() and o.isInt():
self.val = self.val * 10 + o.val
return True
return False
def isInt(self):
return isinstance(self.val, int)
def priority(self):
return 0 if self.isInt() else opInfo[self.val].priority
class CalcValBtn(Btn):
def __init__(self, app: App, row: int, col: int, val: CalcVal,
span: int = 1
):
super().__init__(app, row, col, span, str(val))
self.val = val
def onClick(self):
self.app.addItem(copy(self.val))
def evalItems(items: list[CalcItem]):
try:
if not items:
return "No input."
if len(items) == 1:
return f"Sole number: {items[0].toInt()}"
res = items[0]
for item in items:
if item.isInt():
return res.toInt()
res = item.eval()
except RuntimeError as e:
return e
except ZeroDivisionError:
return "Division by zero."
class EvalBtn(Btn):
def __init__(self, app: App, row: int, col: int, span: int):
super().__init__(app, row, col, span, "=")
def onClick(self):
self.app.showRes(evalItems(self.app.itemsByHighPriority()))
self.app.reset()
Operator = Callable[[Any, Any], Any]
@dataclass
class OpInfo:
name: str
priority: int
opInfo = {
add: OpInfo("+", 1),
sub: OpInfo("-", 1),
mul: OpInfo("*", 2),
floordiv: OpInfo("/", 2)
}
def setNext(item: CalcItem, next: CalcItem):
if item is not None:
item.next = next
def setPrev(item: CalcItem, prev: CalcItem):
if item is not None:
item.prev = prev
if __name__ == "__main__":
App().run()
| StarcoderdataPython |
4847012 | # Generated by Django 2.0.3 on 2019-01-02 12:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0026_auto_20190102_2053'),
]
operations = [
migrations.AlterField(
model_name='product',
name='watchlist',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='watchlist.WatchList'),
),
]
| StarcoderdataPython |
1870504 | <filename>minicash/utils/parsers.py<gh_stars>1-10
import re
import sys
import os
import hashlib
def isValidProof(fprint, proof, difficulty):
keyhash = hashlib.sha256()
fingerproof = fprint + '_' + str(proof)
keyhash.update(fingerproof.encode('utf-8'))
hashResult = keyhash.hexdigest()
if not hashResult.startswith(difficulty * '0'):
return False
return True
def isValidSignaturesDict(d):
for key, sig in d.items():
if type(key) is not str or type(sig) is not str:
return False
if not isValidFingerprint(key):
return False
return True
def isValidFingerprint(s):
res = re.match('^[a-fA-F0-9]{16}$', s)
if res == None:
return False
return True
def isValidMD5Sum(s):
res = re.match('^[a-f0-9]{32}$', s)
if res == None:
return False
return True
def isValidLedgerKey(s):
fprint = s[:16]
if not isValidFingerprint(fprint):
return False
try:
if s[16] != '_':
return False
except IndexError:
return False
proof = s[17:]
try:
proofint = int(proof)
except ValueError:
return False
if not isValidProof(fprint, proofint, 5):
return False
return True
# Checks for the ledger's format since it's of dict type
def isValidLedger(ledger):
if type(ledger) is not dict:
return False
for key, value in ledger.items():
if type(key) is not str:
return False
if type(value) is not int:
return False
if not isValidLedgerKey(key):
return False
# Check if balance is 0 or above
if value < 0:
return False
# Check if the sum of all balances is 100000000 times the number of keys
if len(ledger) != 0:
numOfKeys = len(ledger)
balancesSum = 0
for balance in ledger.values():
balancesSum += balance
if balancesSum / numOfKeys != 10000000:
return False
return True
class PacketParser:
def __init__(self, packet = None):
self.packet = packet
self.type = None
self.data = None
self.errorMessage = None
def setPacket(self, packet):
self.packet = packet
def getData(self):
return self.data
def getType(self):
return self.type
def isPacketValid(self):
# Check for dict type
if type(self.packet) is not dict:
self.errorMessage = 'It is not dict'
return False
# Check for packet keys
if not set(['Type', 'Data']) == set(list(self.packet.keys())):
self.errorMessage = 'Wrong keys in the packet'
return False
# Check for correct types
validTypes = ['HELLO', 'REQ_LEDGER', 'RESP_LEDGER', 'REQ_INTRO_KEY',
'RESP_INTRO_KEY', 'REQ_INTRO_KEY_END', 'REQ_PAY',
'RESP_PAY', 'REQ_PAY_END']
if self.packet['Type'] not in validTypes:
self.errorMessage = 'Invalid type'
return False
self.type = self.packet['Type']
self.data = self.packet['Data']
# Check for each type exclusively
# First check if Data is list because only type HELLO has Data of type list
if type(self.data) is list:
# HELLO
if self.type == 'HELLO':
for val in self.data:
if type(val) is not dict:
self.errorMessage = 'HELLO: element in Data list is not dict'
return False
if not set(['Fingerprint', 'ProofOfWork']) == set(list(val.keys())):
self.errorMessage = 'HELLO: Wrong Data keys'
return False
if type(val['Fingerprint']) is not str:
self.errorMessage = 'HELLO: Fingerprint value is not a string'
return False
if not isValidFingerprint(val['Fingerprint']):
self.errorMessage = 'HELLO: Fingerprint value has not valid format'
return False
if type(val['ProofOfWork']) is not int:
self.errorMessage = 'HELLO: ProofOfWork value is not int'
return False
if val['ProofOfWork'] < 0:
self.errorMessage = 'HELLO: ProofOfWork is negative'
return False
elif type(self.data) is dict:
# REQ_LEDGER
if self.type == 'REQ_LEDGER':
if len(self.data) != 0:
self.errorMessage = 'REQ_LEDGER: Data is not empty'
return False
# RESP_LEDGER
if self.type == 'RESP_LEDGER':
if not set(['Ledger', 'Signatures']) == set(list(self.data.keys())):
self.errorMessage = 'RESP_LEDGER: Wrong Data keys'
return False
if not isValidLedger(self.data['Ledger']):
self.errorMessage = 'RESP_LEDGER: Invalid ledger'
return False
if not isValidSignaturesDict(self.data['Signatures']):
self.errorMessage = 'RESP_LEDGER: fprints-signatures dict invalid'
return False
# REQ_INTRO_KEY
if self.type == 'REQ_INTRO_KEY':
if not set(['Key', 'Checksum', 'Sig']) == set(list(self.data.keys())):
self.errorMessage = 'REQ_INTRO_KEY: Wrong Data keys'
return False
if not isValidLedgerKey(self.data['Key']):
self.errorMessage = 'REQ_INTRO_KEY: Wrong ledger key'
return False
if not isValidMD5Sum(self.data['Checksum']):
self.errorMessage = 'REQ_INTRO_KEY: Wrong md5sum format'
return False
if type(self.data['Sig']) is not str:
self.errorMessage = 'REQ_INTRO_KEY: The signature is not string'
return False
# RESP_INTRO_KEY and REQ_INTRO_KEY_END
if self.type == 'RESP_INTRO_KEY' or self.type == 'REQ_INTRO_KEY_END':
if not set(['Checksum', 'Signatures']) == set(list(self.data.keys())):
self.errorMessage = self.type + ': Wrong Data keys'
return False
if not isValidMD5Sum(self.data['Checksum']):
self.errorMessage = self.type + ': Wrong md5sum format'
return False
if not isValidSignaturesDict(self.data['Signatures']):
self.errorMessage = self.type + ': fprints-signatures dict invalid'
return False
# REQ_PAY
if self.type == 'REQ_PAY':
if not set(['Fromkey', 'Tokey', 'Amount', 'Checksum', 'Sig']) == set(list(self.data.keys())):
self.errorMessage = 'REQ_PAY: Wrong Data keys'
return False
if not isValidFingerprint(self.data['Fromkey']):
self.errorMessage = 'REQ_PAY: Invalid Fromkey value'
return False
if not isValidFingerprint(self.data['Tokey']):
self.errorMessage = 'REQ_PAY: Invalid Tokey value'
return False
if not type(self.data['Amount']) is float:
self.errorMessage = 'REQ_PAY: Amount is not float'
return False
if self.data['Amount'] <= 0:
self.errorMessage = 'REQ_PAY: Amount is not larger than 0'
return False
if not isValidMD5Sum(self.data['Checksum']):
self.errorMessage = 'REQ_PAY: Invalid checksum value'
return False
if not type(self.data['Sig'] is str):
self.errorMessage = 'REQ_PAY: Signature is not string'
return False
# RESP_PAY and REQ_PAY_END
if self.type == 'RESP_PAY' or self.type == 'REQ_PAY_END':
if not set(['Checksum', 'Signatures']) == set(list(self.data.keys())):
self.errorMessage = self.type + ': Wrong Data keys'
return False
if not isValidMD5Sum(self.data['Checksum']):
self.errorMessage = self.type + ': Invalid checksum value'
return False
if not isValidSignaturesDict(self.data['Signatures']):
self.errorMessage = self.type + ': fprints-signatures dict invalid'
return False
else:
return False
return True
| StarcoderdataPython |
9694895 | from __future__ import division
import copy
from operator import itemgetter
import numpy as np
from flatland.core.env_observation_builder import ObservationBuilder
from flatland.envs.distance_map import DistanceMap
#from flatland.envs.rail_env_shortest_paths import get_shortest_paths
from shortestpath import get_shortest_paths
class StateMaskingObs(ObservationBuilder):
ENTRYS_PER_COLUMN = 8
TRAFFIC_LIGHT_SIZE = 3
HOMO_SIZE = 3
OBS_SIZE = 3 * ENTRYS_PER_COLUMN + TRAFFIC_LIGHT_SIZE + HOMO_SIZE
ADDITIONAL_INPUT = OBS_SIZE - 3 * ENTRYS_PER_COLUMN
def __init__(self):
super(StateMaskingObs, self).__init__()
self.fake_envs = []
self.single_solver = []
self.SKIPLARGE = True
def set_env(self, env):
super().set_env(env)
def reset(self):
self.time = 0
self.junctions = []
self.visited = []
self.actual_stopping_positions = []
self.stopping_positions_only = []
self.actual_junction_cluster = []
self.permanent_pointer_position = []
self.temporary_pointer_position = []
self.initialize_list = [0 for i in range(len(self.env.agents))]
self.num_agent = len(self.env.agents)
self.agent_in_clusters = [[-1, -1] for i in range(len(self.env.agents))]
self.num_active_agents = [0 for i in range(len(self.env.agents))]
self.initialization_timestep = 0
self.max_timestep = int((8 * (self.env.height + self.env.width)) / len(self.env.agents))
self.upper_bound = int((self.env.height + self.env.width) / 12)
self.observations = [0 for i in range(len(self.env.agents))]
self.queues = {}
self.agents_activated = []
self.clusters_activated = []
self.path_dict= {}
self.State ={}
self.Next_Positions = {}
self.agents_stuck = [[0,0] for i in range(len(self.env.agents))]
self.old_info = [ [0,0 ] for i in range(len(self.env.agents))]
return
def _cheat_expert(self, start_pos, orientation, agentID):
"""
return the next position when expert is standing on a junction
"""
target = self.env.agents[agentID].target
agent_inform = (start_pos,orientation,target)
if agent_inform in self.path_dict.keys() :
return self.path_dict[agent_inform]
else :
path = get_shortest_paths(self.distance_map,start_pos,orientation, agent_handle = agentID)
self.path_dict[agent_inform] = path[agentID]
return path[agentID]
def get_distance_map(self) :
self.distance_map = DistanceMap(env_width=self.env.rail.width, env_height=self.env.rail.height,
agents=self.env.agents)
self.distance_map.reset(self.env.agents, self.env.rail)
return
def get(self, handle=0):
"""
param-handle: agent id
if agent_id==0, add Obs of all agents to self.observations,
return respective Obs of agent_id
New obs is a 3*8+3+3 tuple observation for RL
"""
def is_junction_homo(cell_list):
assert len(cell_list) == 3
homo_output = [0, 0, 0]
count_dict = {}
junction_pos = None
# index = []
for i in cell_list:
index = [x for x in range(len(cell_list)) if cell_list[x] == i]
# for x in range(len(cell_list)):
# if cell_list[x] is not None:
# print(cell_list[x], i)
# if cell_list[x] == i:
# index.append(x)
count_dict.update({i: index})
if 1 <= len(count_dict) <= 3:
if len(count_dict) == 1:
k = list(count_dict.keys())
if k[0] is not None:
homo_output = [1, 1, 1]
junction_pos = k[0]
elif len(count_dict) == 2:
for key, value in count_dict.items():
if len(value) == 2 and key is not None:
junction_pos = key
for index in value:
homo_output[index] = 1
else:
raise RuntimeError('bug somewhere')
return homo_output
def initialize_stopping_points():
"""
Compute all junction clusters and the stopping points associated with them
Only needs to be called once for an episode
"""
self.compute_all_junctions()
self.compute_stopping_points()
self.set_stopping_pointers()
# if self.initialize_list[handle] == 1:
# self.agent_initial_positions[handle] = [0, (-3, -3)]
if len(self.env.agents) >81 and self.SKIPLARGE == True :
if ((len(self.env.agents)==100) and ((self.env.height+self.env.width)!=200)) or len(self.env.agents) >100 :
if self.time == 0 :
for agent in range(len(self.env.agents)) :
self.observations[agent] = [0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0,
1.0, 0.0, -1.0]
self.time+=1
return
if handle == 0:
self.get_others_complete()
if self.time == 0:
self.GetAllStates()
initialize_stopping_points()
#self.get_initial_positions()
self.get_initialization_queue()
self.get_distance_map()
self.update_pointers()
# Get clusters with stuck agents
self.get_stuck_clusters()
self.get_timed_out_clusters()
# Increment Time
self.time += 1
self.initialization_timestep +=1
for agent in range(len(self.env.agents)) :
my_pos = self.env.agents[agent].position if self.env.agents[agent].position is not None else self.env.agents[
agent].initial_position
my_direction = self.env.agents[agent].direction if self.env.agents[agent].direction is not None else \
self.env.agents[
agent].initial_direction
if self.time >2 and not self.env.dones[agent] and self.agents_stuck[agent][0] ==0 and agent in self.agents_activated:
if self.old_info[agent][0] == my_pos and self.old_info[agent][1] == my_direction:
self.agents_stuck[agent][1] +=1
else :
self.agents_stuck[agent][1] = 0
self.old_info[agent][0] = copy.deepcopy(my_pos)
self.old_info[agent][1] = copy.deepcopy(my_direction)
if self.agents_stuck[agent][1] >100 or self.agents_stuck[agent][0] == 1 and not self.env.dones[agent] :
self.agents_stuck[agent][0] = 1
self.observations[agent] = [0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, # homogenous junction cell
1.0, 0.0, -1.0]
continue
if self.env.dones[agent]:
self.num_active_agents[agent] = 2
self.observations[agent] = [0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, # homogenous junction cell
1.0, 0.0, -1.0]
continue # last element: traffic light
state_of_agent = self.StateClassifier(my_pos,my_direction)
if state_of_agent in [0,3,4] and agent in self.agents_activated :
self.observations[agent] = [0.0, -1.0, 0.0, 0.0, 0.0 ,0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0, 0.0 ,0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0 , -1 , # homagogenous junction cell
1.0, 0.0, -1.0]
continue
isStoppingPoint = False
actual_pos = self.env.agents[agent].position if self.env.agents[agent].position is not None else \
self.env.agents[
agent].initial_position
actual_dir = self.env.agents[agent].direction if self.env.agents[agent].direction is not None else \
self.env.agents[
agent].initial_direction
# Get Traffic Signal
if agent in self.agents_activated:
others_traffic = self.get_others(agent,1)
traffic_signal = self.get_traffic_signal(actual_pos, actual_dir, others_traffic, agent)
else :
traffic_signal = 0
obs = []
others = []
next_junction_list = []
all_handles = [i for i in range(len(self.env.agents))]
# Adjust Position if at Stopping Point
if state_of_agent in [1, 4]:
valid_act_pos_pair = self._get_next_valid_position(my_pos, my_direction)
for action, next_pos in valid_act_pos_pair.items():
if next_pos[1] is not None:
my_pos = next_pos[1]
my_direction = next_pos[0]
isStoppingPoint = True
break
# Check whether agent is at decision state
if state_of_agent ==2 :
state_of_agent = 1
else:
state_of_agent = -1
valid_act_pos_pair = self._get_next_valid_position(my_pos, my_direction)
for action, next_pos in valid_act_pos_pair.items():
# stopping = self.stopping_point_occupied(actual_pos, actual_dir, my_pos, my_direction, action, others)
if next_pos[1] is not None: # valid action
# print(next_pos)
has_solution, obs_1_direction, junction_cell = self._count_path_block(next_pos[1], next_pos[0],
agent,
my_pos,
my_direction)
next_junction_list.append(junction_cell)
if has_solution:
obs_1_direction[0] += 1 # compensate 2 cell for current pos
if isStoppingPoint:
# obs_1_direction[6] = 1
obs_1_direction[
0] += 1 # compensate 1 cell for stopping point bc it receives future obs
obs_1_direction.insert(0, True)
obs.append(obs_1_direction)
else:
# print(my_pos, my_direction, self.StateClassifier(my_pos, my_direction), "still fail!")
obs_1_direction.insert(0, False)
obs.append(obs_1_direction)
else:
next_junction_list.append(None)
obs.append([False, 0, 0, 0, 0, 0, 0, 0])
obs = np.array(obs, dtype=float)
max_length = max(obs[:, 1])
for i in range(3):
obs[i, 1] = obs[i, 1] / max_length if obs[i, 1] > 0 else -1
obs = np.reshape(obs, (1, -1))
obs = obs.squeeze()
obs = obs.tolist()
assert len(obs) == 3 * self.ENTRYS_PER_COLUMN
obs.append(traffic_signal)
if agent in self.agents_activated:
obs.append(0)
else:
obs.append(1)
obs.append(state_of_agent)
if agent in self.agents_activated:
junction_homo = is_junction_homo(next_junction_list)
obs.extend(junction_homo)
else :
junction_homo = [0,0,0]
obs.extend(junction_homo)
assert len(junction_homo) == self.HOMO_SIZE
assert len(obs) == self.OBS_SIZE
self.observations[agent] = obs
# initialization_code()
self.get_initialization()
return self.observations[0]
else:
return self.observations[handle]
def get_many(self, handles=None):
observations = {}
self.get(0) # store all obs in self.observation
if handles is None:
handles = []
for h in handles:
observations[h] = self.observations[h]
return observations
def StateComputation(self,agent_pos,agent_dir) :
avb_moves = self.env.rail.get_transitions(*agent_pos, agent_dir)
move2grid = np.array([[[0, -1], [-1, 0], [0, +1]], [[-1, 0], [0, +1], [+1, 0]], [[0, +1], [+1, 0], [0, -1]],
[[+1, 0], [0, -1], [-1, 0]]]) # Obtained from colliding agent code
trans2act = np.array([[2, 3, 0, 1], [1, 2, 3, 0], [0, 1, 2, 3], [3, 0, 1, 2]]) # Maps transition to an action
# next_dir_grid = np.array([-1,0,1]) # Maps action to a change in agent direction
if sum(avb_moves) > 1: # This is definitely a decision junction since more than 1 move possible
self.State[agent_pos,agent_dir] = [2,None]
self.Get_Valid_Positions(agent_pos,agent_dir,2)
return
elif sum(avb_moves) == 1:
avbmove = avb_moves.index(1) # Get the available transition to next cell
action = trans2act[agent_dir][avbmove] # Get the corresponding action for that transition
if action == 0:
next_pos = agent_pos + move2grid[(agent_dir + 2) % 4][
1] # This is a dead end, so turn around and move forward
else:
next_pos = agent_pos + move2grid[agent_dir][action - 1]
# next_dir = (agent_dir + (next_dir_grid[action-1]) )%4
sumnextcell = 0 # How many possible transitions at next cell
for i in range(0, 4):
new_avb_moves = self.env.rail.get_transitions(*next_pos, i)
sumnextcell += sum(new_avb_moves)
if (sumnextcell > 2) :
self.State[agent_pos,agent_dir] = [1,tuple(next_pos)]
self.Get_Valid_Positions(agent_pos,agent_dir,1)
return
elif (sumnextcell <= 2) :
self.State[agent_pos,agent_dir] = [0,tuple(next_pos)]
self.Get_Valid_Positions(agent_pos,agent_dir,0)
return
else:
self.State[agent_pos,agent_dir] = [-1,None]
next_positions = {}
for action in [1, 2, 3]:
next_positions.update({action: [None, None]})
self.Next_Positions[agent_pos,agent_dir] = next_positions
return
def GetAllStates(self):
for row in range(self.env.height):
for column in range(self.env.width):
position = (row, column)
for direction in range(0,4) :
self.StateComputation(position,direction)
return
def StateClassifier(self, agent_pos, agent_dir):
"""
returns 0 : No decision point
returns 1 : Stopping point (Decision at next cell)
returns 2 : At decision point currently (More than 1 available transition)
returns 3,4 : MUST STOP point - Agent Ahead
returns None: invalid cell
"""
output = self.State[agent_pos,agent_dir]
state = output[0]
next_position = output[1]
if state ==2 :
return 2
elif state ==0 :
others = self.get_others(0,2)
if next_position in others:
return 3
else :
return 0
elif state ==1 :
others = self.get_others(0,2)
if next_position in others:
return 4
else :
return 1
else :
return None
def Get_Valid_Positions(self, my_pos, my_direction,state):
"""
action: 0 ---> stop
1 ---> left
2 ---> forward
3 ---> right
"""
avb_moves = self.env.rail.get_transitions(*my_pos, my_direction)
action2direction = [[3, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 0]]
dir2grid = np.array([[-1, 0], [0, 1], [1, 0], [0, -1]])
next_dir_grid = np.array([-1, 0, 1])
move2grid = np.array([[[0, -1], [-1, 0], [0, +1]],
[[-1, 0], [0, +1], [+1, 0]],
[[0, +1], [+1, 0], [0, -1]],
[[+1, 0], [0, -1], [-1, 0]]])
avbmove = [i for i, x in enumerate(avb_moves) if x == 1]
trans2act = np.array([[2, 3, 0, 1], [1, 2, 3, 0], [0, 1, 2, 3], [3, 0, 1, 2]]) # Maps transition to an action
next_positions = {}
for action in [1, 2, 3]:
next_positions.update({action: [None, None]})
if state in [0, 1, 2, 3, 4]:
if state == 2: # decision point
for action in [1, 2, 3]:
i = action2direction[my_direction][action - 1]
if i in avbmove: # available NSWE direction
next_pos = my_pos + dir2grid[i]
next_positions[action] = [i, tuple(next_pos)]
else:
avbmove = avb_moves.index(1) # Get the available transition to next cell
action = trans2act[my_direction][avbmove] # Get the corresponding action for that transition
if action == 0:
next_dir = (my_direction + 2) % 4
next_pos = my_pos + move2grid[next_dir][1]
# This is a dead end, so turn around and move forward
else:
next_pos = my_pos + move2grid[my_direction][action - 1]
next_dir = (my_direction + (next_dir_grid[action - 1])) % 4
if action == 2 or action == 0 or sum(avb_moves) == 1:
next_positions[2] = [next_dir, tuple(next_pos)]
else:
next_positions[action] = [next_dir, tuple(next_pos)]
self.Next_Positions[my_pos,my_direction] = next_positions
return next_positions
def _get_next_valid_position(self,my_pos,my_direction):
return self.Next_Positions[my_pos,my_direction]
def DistToNextJunction(self, agentID, full_path, old_pos, old_heading):
"""
Returns 1 : If at Stopping Point
Returns Distance to Junction (Greater than 1) : If at No Decision Point
Returns 0 : If at Junction currently
"""
full_path_cp = copy.deepcopy(full_path)
state = self.StateClassifier(old_pos, old_heading)
sumcell = 0 # How many possible transitions at next cell
# for j in range(0, 4):
# new_avb_moves = self.env.rail.get_transitions(*old_pos, j)
# sumcell += sum(new_avb_moves)
if state in [1, 4]:
return 1
elif state in [0, 2, 3]:
distance = 0
if state == 2:
full_path_cp.pop(0) # remove current junction pos, and add 1 more step
distance += 1
for i in range(1, len(full_path_cp) - 1): # full_path_cp[0] is current pos, not moving yet
distance += 1
statecell = self.StateClassifier(full_path_cp[i].position, full_path_cp[i].direction)
if statecell in [1, 4]:
return distance + 1
elif statecell == 2:
return distance
return distance
else:
print("Some error in DistToNextJunction")
return 0
def check_transition_validity(self, next_pos, current_pos):
directions = [-1, -1, -1, -1]
current_pos = tuple(current_pos)
if (current_pos[0] >= 0 and 0 <= current_pos[1] < self.env.width and current_pos[
0] < self.env.height):
for i in range(0, 4):
next_positions = self._get_next_valid_position(tuple(current_pos), i)
for j in range(1, 4):
if next_positions[j][1] == next_pos:
directions[i] = i
break
if sum(directions) >= -3:
return True, directions
else:
return False, directions
else:
return False, directions
def stopping_point_occupied(self, actual_pos, actual_dir, current_pos, current_dir, action, others):
action = action - 1
# print(others)
count = 0
movegrid = np.array([[[0, -1], [-1, 0],
[0, +1], [+1, 0]],
[[-1, 0], [0, +1],
[+1, 0], [0, -1]],
[[0, +1], [+1, 0],
[0, -1], [-1, 0]]])
if self.StateClassifier(actual_pos, actual_dir) == 2:
next_pos = actual_pos + movegrid[action][actual_dir]
validity, directions = self.check_transition_validity(actual_pos, next_pos)
if validity:
for i in range(0, 4):
if [tuple(next_pos.reshape(1, -1)[0]), directions[i]] in others:
return 1
return 0
elif self.StateClassifier(actual_pos, actual_dir) in [1, 4]:
next_pos = current_pos + movegrid[action][current_dir]
validity, directions = self.check_transition_validity(current_pos, next_pos)
if validity:
for i in range(0, 4):
if [tuple(next_pos.reshape(1, -1)[0]), directions[i]] in others:
return 1
return 0
elif self.StateClassifier(actual_pos, actual_dir) in [0, 3] and action == 1:
new_pos = actual_pos
new_dir = actual_dir
while self.StateClassifier(new_pos, new_dir) not in [1, 2, 4]:
next_positions = self._get_next_valid_position(new_pos, new_dir)
new_dir = next_positions[2][0]
new_pos = next_positions[2][1]
count += 1
if count > 30:
return 0
for i in range(0, 4):
if [new_pos, i] in others:
return 1
return 0
elif self.StateClassifier(actual_pos, actual_dir) in [0, 3] and action in [0, 2]:
return 0
else:
return 0
def isJuntion(self, agent_pos):
trans = []
for direction in range(0, 4):
trans.append(sum(self.env.rail.get_transitions(*agent_pos, direction)))
return sum(trans) > 2
def _count_path_block(self, start_pos, heading, agentID, old_pos, old_heading):
"""
input: the start position (x,y).
make sure the start position is a rail cell not a obstacle!
Convert a list of directions to the opposite direction list.
return: a bool and a 6-element list
int: does_this_direction has expert solution?,
[int: astar_path length,
int:num_blocking within the first junction,
int:num_all_blocking,
int:num_blocking on junctions,
int: distance to the next junction]
"""
def next_junction_cell(full_path):
for i in range(len(full_path)):
check_pos = full_path[i]
if self.isJuntion(check_pos.position):
return full_path[i][0]
return None
assert self.env.rail.grid[(start_pos[0], start_pos[1])] != 0, "start position " + str(start_pos) \
+ " is not valid in the map!"
count_first_decision_block = 0
count_first_junction_block = 0
count_all_block = 0
count_all_decision_block = 0
count_decision_point = 0
count_junction = 0
crash_buffer = []
visit_first_decision_point = False
visit_first_stopping = False
actual_pos = self.env.agents[agentID].position if self.env.agents[agentID].position is not None else \
self.env.agents[
agentID].initial_position
actual_direction = self.env.agents[agentID].direction if self.env.agents[agentID].direction is not None else \
self.env.agents[
agentID].initial_direction
all_handle = [i for i in range(len(self.env.agents))]
others_pos = self.get_others(agentID,3)
others_directions = self.get_others(agentID,4)
#others_moving = []
# start to compute quantities ---------------------------------------
full_path = self._cheat_expert(start_pos, heading, agentID)
if full_path is None:
return False, [-1, # single agent path length
min(1, count_first_decision_block), # num_block within the first junction
min(1, count_first_junction_block),
count_first_decision_block,
count_all_block / self.num_agent, # num_block all along the path
count_all_decision_block, # num_block standing on junctions
-1], None
distance_to_next_junction = self.DistToNextJunction(agentID, full_path, actual_pos,
actual_direction)
junction_cell = next_junction_cell(full_path)
for num_step in range(len(full_path) - 1):
checking_cell = full_path[num_step].position # checking cell is the cell we want to check blocking
checking_cell_dir = full_path[num_step].direction
if self.isJuntion(checking_cell):
count_junction += 1
if self.StateClassifier(checking_cell, checking_cell_dir) == 2:
count_decision_point += 1
for direction in range(4):
if self.StateClassifier(checking_cell, direction) in [1, 4] and not visit_first_stopping:
visit_first_stopping = True
if (checking_cell in others_pos.keys()) is True:
idx = others_pos[checking_cell]
crash_buffer.append(checking_cell)
# test if there is other agents stepping on the stopping point
if others_directions[idx] == checking_cell_dir:
# same heading, not moving, so waiting
if num_step == 0:
count_first_decision_block += 1
elif (others_directions[idx] + checking_cell_dir) % 2 == 0:
# opposite heading, so blocking
count_all_block += 1
if self.StateClassifier(checking_cell, checking_cell_dir) == 2:
count_all_decision_block += 1
if not visit_first_decision_point:
count_first_decision_block += 1
if not visit_first_stopping: # stopping point must lead to a junction
count_first_junction_block += 1
else: # neither same direction or opposite direction, meaning that an agent staying at a junction
# but that is not a junction for the current direction (non-decision point)
count_all_block += 1
count_all_decision_block += 1
if not visit_first_decision_point:
count_first_decision_block += 1
if self.StateClassifier(checking_cell, checking_cell_dir) == 2:
visit_first_decision_point = True
return True, [len(full_path), # single agent path length
min(1, count_first_decision_block), # num_block within the first decision point
min(1, count_first_junction_block), # num_block within the first junction
((count_first_decision_block - count_first_junction_block) / max(1, count_first_decision_block)),
count_all_block / self.num_agent, # num_block all along the path
count_all_decision_block / count_decision_point if count_decision_point else 0,
# num_block standing on junctions
distance_to_next_junction / len(full_path) if len(full_path) > 0 else -1,
], junction_cell
def compute_all_junctions(self):
"""
Finds all junctions : Any cell with greater than 2 transitions available
"""
for row in range(self.env.height):
for column in range(self.env.width):
position = (row, column)
if self.total_transitions(position) > 2:
self.junctions.append(position)
return
def total_transitions(self, position):
"""
Input - Position
Return- Total transitions available at a particular cell location
Called by compute_all_junctions
"""
sum_transitions = 0
for i in range(0, 4):
sum_transitions += sum(self.env.rail.get_transitions(*position, i))
return sum_transitions
def compute_stopping_points(self):
"""
Input - Environment
Return - None
Computes all stopping points and junction clumps and stores them
"""
for position in self.junctions: # Iterate over all junctions
if position not in self.visited: # Only visit junctions which have not been visited before
self.visited.append(position) # Keep track of junctions visited
self.stopping_points = []
self.stopping_points_only = []
self.junction_cluster = []
self.junction_cluster.append(position) # Computing Junction clusters
self.visit(position) # VISIT the current junction
self.actual_stopping_positions.append(self.stopping_points)
self.actual_junction_cluster.append(self.junction_cluster)
self.stopping_positions_only.append(self.stopping_points_only)
return
def get_possible_positions(self, position):
"""
Input - Position
Returns - 4 Positions which can be reached by this cell , doesn't check for validity of the transition
"""
movements = np.array([[[-1, 0], [0, +1],
[1, 0], [0, -1]]])
possible_positions = []
for i in range(0, 4):
next_pos = position + movements[0][i]
next_pos = tuple(next_pos.reshape(1, -1)[0])
possible_positions.append(next_pos)
return possible_positions
def visit(self, position):
"""
Recursive code to visit a junction position and compute the junction clump
"""
possible_stopping_points = self.get_possible_positions(
position) # Get possible positions from the current position
for stopping_position in possible_stopping_points:
if (stopping_position is not None):
valid, directions = self.check_transition_validity(position,
stopping_position) # Check if transition to that cell is possible
if not valid:
continue
else:
for j in range(0, 4):
if (0 < self.total_transitions(stopping_position) <= 2) and (
(stopping_position, directions) not in self.stopping_points) and (
directions[j] != -1): # Check whether that cell is a stopping point
self.stopping_points.append(
(stopping_position, directions[j])) # which has not been visited yet
self.stopping_points_only.append(stopping_position)
elif self.total_transitions(stopping_position) > 2 and (
stopping_position not in self.visited): # Check whether that cell is a junction
self.junction_cluster.append(stopping_position) # which has not been visited yet
self.visited.append(stopping_position) # Mark this junction visited
self.visit(
stopping_position) # Make a recursive call to the function if that cell hasn't been visited yet
else:
pass
return
def set_stopping_pointers(self):
"""
Initializes values for all traffic lights at the computed stopping points
"""
for _ in self.actual_stopping_positions:
self.permanent_pointer_position.append(0) # Permanent pointer position moves by one at each time step
self.temporary_pointer_position.append(
0) # Temporary pointer position makes the traffic light smart by checking for incoming traffic
return
def stopping_point_find(self, agent_pos, agent_dir):
"""
Input - Agent Position and Direction
Returns - Is Agent At Stopping Point, Stopping Cluster, Index within Cluster
"""
agent_info = (agent_pos, agent_dir)
for stopping_cluster in range(len(self.actual_stopping_positions)):
for stopping_point in self.actual_stopping_positions[stopping_cluster]:
if stopping_point == agent_info:
return True, stopping_cluster, self.actual_stopping_positions[stopping_cluster].index(
stopping_point)
return False, None, None
def junction_find(self, agent_pos):
for cluster in range(len(self.actual_junction_cluster)):
for point in self.actual_junction_cluster[cluster]:
if point == agent_pos:
return True, cluster, self.actual_junction_cluster[cluster].index(point)
return False, None, None
def get_traffic_signal(self, agent_pos, agent_dir, others, handle):
"""
Input - Agent Position, Direction, handle and others
Get the traffic signal for an agent
Returns 1 : If Traffic Signal is Green
Returns -1 : If Traffic Signal is Red
Two clearances are required to be obtained except if an agent is stuck within the cluster
"""
validity, cluster, index = self.stopping_point_find(agent_pos,
agent_dir) # Check whether agent is at Stopping Point
if validity == False: # If agent not at stopping point, traffic signal always green
return 1
if validity == True: # If agent is at stopping point
if cluster in self.stuck_clusters: # First check if another agent is stuck inside the cluster
current_pointer_pos = self.permanent_pointer_position[cluster]
if current_pointer_pos == index:
return 1
clearance = self.get_clearence(cluster, index, others)
if clearance == True: # If another agent is stuck, then directly get clearance for the traffic light
return 1
else:
return -1
if cluster in self.timed_out_clusters:
current_pointer_pos = self.permanent_pointer_position[cluster]
if current_pointer_pos == index:
return 1
clearance = self.get_clearence(cluster, index, others)
if clearance == True: # If another agent is stuck, then directly get clearance for the traffic light
return 1
else:
return -1
# If no agent is stuck inside the cluster, then 2 clearances need to be obtained
cleared = self.get_first_clearance(cluster,
handle) # First clearance checks whether any agent is already occupying the cluster
if cleared == False:
return -1
current_pointer_pos = self.permanent_pointer_position[cluster]
if current_pointer_pos == index:
return 1
else:
clearance = self.get_clearence(cluster, index,
others) # Second clearance checks whether other agents are waiting to go inside the cluster,
if clearance == True: # If other agents are waiting to go inside cluster, only 1 of them gets a green traffic light
return 1
else:
return -1
def get_agent_stuck(self, cluster, others, position, direction, handle):
"""
Input - Current cluster, others, agent pos, agent dir , agent handle
Recursively checks whether agent inside a cluster has any possible exit
"""
# Get the next valid positions
valid_act_pos_pair = self._get_next_valid_position(position, direction)
for action, next_pos in valid_act_pos_pair.items():
if next_pos[1] is not None:
# If that position,direction has not been checked before
if (next_pos[1], next_pos[0]) not in self.possible_positions:
self.possible_positions.append((next_pos[1], next_pos[0]))
# If that position is a stopping point and it is unoccupied , then agent has a free exit
if next_pos[1] in self.stopping_positions_only[cluster]:
if (next_pos[1], next_pos[0]) not in others.keys():
return 0
# If that position is another junction whithin the cluster, then recursively call this function
elif next_pos[1] in self.actual_junction_cluster[cluster]:
output = self.get_agent_stuck(cluster, others, next_pos[1], next_pos[0], handle)
if output == 0:
return 0
else:
pass
# If all exits are occupied by other agents, then the agent inside the cluster is stuck
# In such a case, allowing 1 more agent entry into the cluster might be able to solve the problem
return 1
def get_stuck_clusters(self):
"""
Computes the clusters inside which agents are stuck (no unoccupied agents from that cluster)
Returns - Cluster indexes in which agents are stuck
"""
self.stuck_clusters = []
others = self.get_others(0,0) # Get information of all agents
for cluster in range(len(self.actual_junction_cluster)): # Iterate over all clusters
counter = 0
total = 0
for handle in range(len(self.env.agents)): # Iterate over all agents
position = self.env.agents[handle].position
direction = self.env.agents[handle].direction
if position in self.actual_junction_cluster[
cluster]: # If an agent is inside a particular cluster, call function
self.possible_positions = [] # to determine whether the agent is stuck
self.possible_positions.append(
(self.env.agents[handle].position, self.env.agents[handle].direction))
stuck = self.get_agent_stuck(cluster, others, position, direction, handle)
if stuck: # If the agent is stuck inside cluster, increment the counter
counter += 1
total += 1
if counter == 1 and total < 2: # Make sure that no more than 2 agents are allowed entry into the cluster
self.stuck_clusters.append(cluster)
return self.stuck_clusters
def get_timed_out_clusters(self):
self.timed_out_clusters = []
self.num_agents_in_clusters = [0 for i in range(len(self.actual_junction_cluster))]
self.timed_out = [0 for i in range(len(self.actual_junction_cluster))]
for handle in range(len(self.env.agents)):
if self.env.agents[handle].position != None:
validity, cluster, index = self.junction_find(self.env.agents[handle].position)
if validity == True:
if self.agent_in_clusters[handle][0] == cluster:
self.agent_in_clusters[handle][1] += 1
else:
self.agent_in_clusters[handle][0] = cluster
self.agent_in_clusters[handle][1] = 1
self.num_agents_in_clusters[cluster] += 1
if self.agent_in_clusters[handle][1] > 10:
self.timed_out[cluster] = 1
for cluster in range(len(self.actual_junction_cluster)):
if self.num_agents_in_clusters[cluster] < 2 and self.timed_out[cluster] == 1:
self.timed_out_clusters.append(cluster)
return self.timed_out_clusters
def get_others_complete(self):
"""
Returns tuple of (position,opposite direction) of each agent.
Unborn/Completed agents are set to have : (-3,-3,0)
"""
self.others0 = {}
self.others1 ={}
self.others2 ={}
self.others3 ={}
self.others4 ={}
for id in range(len(self.env.agents)):
if self.env.agents[id].position is None:
if self.env.dones[id] is True:
otherspos = (-id, -id)
othersdir = 0
othersdirections =0
else:
otherspos = (-id, -id)
othersdir = 0
othersdirections =0
else: # position not None
otherspos = self.env.agents[id].position
othersdir = (self.env.agents[id].direction + 2) % 4
othersdirections = self.env.agents[id].direction
self.others0[otherspos, othersdir] = id
self.others1[otherspos, othersdirections] = id
self.others2[otherspos] = id
self.others3[otherspos] = id
self.others4[id] = othersdirections
return
def get_others(self,handle,value) :
if value ==0 :
others = copy.copy(self.others0)
return others
elif value==1 :
others = copy.copy(self.others1)
if self.env.agents[handle].position is not None :
agentpos = self.env.agents[handle].position
agentdir = self.env.agents[handle].direction
del others[agentpos,agentdir]
return others
elif value ==2 :
others = copy.copy(self.others2)
return others
elif value ==3 :
others = copy.copy(self.others3)
if self.env.agents[handle].position is not None :
agentpos = self.env.agents[handle].position
del others[agentpos]
return others
else :
others = copy.copy(self.others4)
del others[handle]
return others
def get_first_clearance(self, cluster, handle):
"""
Checks whether a particular cluster is occupied
Returns 0 - If Occupied
Returns 1 - If Free
"""
all_handles = [i for i in range(len(self.env.agents))]
others = self.get_others(handle,3)
for value in range(len(self.actual_junction_cluster[cluster])): # Checks whether the cluster is occupied
if self.actual_junction_cluster[cluster][value] in others.keys():
return 0
return 1
def get_clearence(self, cluster, index, others):
"""
Executes smart time-dependent traffic light
Returns 1 - If traffic light is green
Returns 0 - If traffic light is red
If more than 1 agent is waiting to enter a cluster, only a single agent gets a clearance
If only 1 agent waiting to enter a cluster, clearance always green for that agent
Pointers change values at each timestep
"""
for positions in range(
len(self.actual_stopping_positions[cluster])): # Iterate over all stopping positions of a cluster
if self.actual_stopping_positions[cluster][self.permanent_pointer_position[cluster]] in others.keys():
return 0, # Check if the position pointed by permanent pointer is occupied by another agent
if self.temporary_pointer_position[cluster] == index:
self.temporary_pointer_position[cluster] = self.permanent_pointer_position[cluster]
return 1 # Check if temporary pointer index is the same as our agent's index, if yes return 1 (green)
else:
self.temporary_pointer_position[cluster] = (self.temporary_pointer_position[cluster] + 1) % len(
self.actual_stopping_positions[cluster]) # Increment temporary pointer to check at next position
if self.actual_stopping_positions[cluster][self.temporary_pointer_position[cluster]] in others.keys():
self.temporary_pointer_position[cluster] = self.permanent_pointer_position[cluster]
return 0 # If temporary pointer index is same as another agent's index , then traffic signal for our agent is red
def update_pointers(self):
"""
Updates pointer by 1 at each timestep
"""
for i in range(len(self.actual_stopping_positions)):
self.permanent_pointer_position[i] = (self.time) % len(self.actual_stopping_positions[i])
self.temporary_pointer_position[i] = (self.time) % len(self.actual_stopping_positions[i])
return
# def get_others_traffic_light(self, handle):
# """
# Compute positions of all agents except our agent
# Returns - Others
# """
# all_handles = [i for i in range(len(self.env.agents))]
# others = []
# others2 = []
# for id in all_handles:
# if id != handle:
# if self.env.agents[id].position is None:
# if self.env.dones[id] is True:
## otherspos = (-3, -3)
# othersdirections = 0
# else:
# otherspos = (-3, -3)
# othersdirections = self.env.agents[id].initial_direction
# else: # position not None
# otherspos = self.env.agents[id].position
# othersdirections = self.env.agents[id].direction
# others.append((otherspos, othersdirections))
# others2.append([otherspos, othersdirections])
# return others, others2
# def get_initial_positions(self):
# """
# Get initial position of all agents
# """
# all_handles = [i for i in range(len(self.env.agents))]
# for id in all_handles:
# self.agent_initial_positions.append(
# [id, self.env.agents[id].initial_position, self.env.agents[id].initial_direction,
# self.env.agents[id].target])
# return
def get_initialization_queue(self):
for id in range(len(self.env.agents)):
info = (self.env.agents[id].initial_position, self.env.agents[id].initial_direction,
self.env.agents[id].target)
if info in self.queues.keys():
self.queues[info][0] += 1
self.queues[info].append(id)
else:
self.queues[info] = [1]
self.queues[info].append(id)
queue = sorted(self.queues.values(), key=itemgetter(0), reverse=True)
self.queue = []
for element in queue:
self.queue.append([0, element[1:]])
return
def get_initialization(self):
clusters_to_check = []
queue_size = 0
if (self.num_active_agents.count(1) > self.upper_bound) and (self.initialization_timestep < self.max_timestep):
return
if len(self.agents_activated) == len(self.env.agents):
return
for index_cluster in range(len(self.queue)):
if index_cluster not in self.clusters_activated:
queue_size += len(self.queue[index_cluster][1])
clusters_to_check.append(index_cluster)
if self.upper_bound < queue_size:
break
for index in clusters_to_check:
if self.num_active_agents.count(1) > self.upper_bound:
break
for agent in self.queue[index][1]:
if agent not in self.agents_activated:
others = self.get_others(agent,3 )
allowed = self.initialize(agent, self.observations[agent], others)
if allowed:
self.agents_activated.append(agent)
self.queue[index][0] += 1
self.num_active_agents[agent] = 1
self.observations[agent][self.OBS_SIZE - self.ADDITIONAL_INPUT + 1] = 0
if self.queue[index][0] == len(self.queue[index][1]):
self.clusters_activated.append(index)
self.initialization_timestep = 0
break
if self.initialization_timestep > self.max_timestep:
for index_cluster in range(len(self.queue)):
if index_cluster not in self.clusters_activated :
for agent in self.queue[index_cluster][1]:
if agent not in self.agents_activated:
others = self.get_others(agent,3 )
allowed = self.initialize(agent, self.observations[agent], others)
if allowed:
self.num_active_agents[agent] = 1
self.agents_activated.append(agent)
self.queue[index_cluster][0] += 1
self.observations[agent][self.OBS_SIZE - self.ADDITIONAL_INPUT + 1] = 0
if self.queue[index_cluster][0] == len(self.queue[index_cluster][1]):
self.clusters_activated.append(index_cluster)
self.initialization_timestep = 0
break
if allowed:
break
return
def initialize(self, handle, obs, others):
"""
Input - Observation, agent handle, others
Returns 1 if an agent is not allowed to initialize
Returns 0 if an agent is allowed to initialize
"""
# An agent is only allowed to initialize if -
# 1) No other unborn agent with a smaller handle is currently at the same position waiting to be born
# 2) No currently alive agent is at the agent's position
# 3) No currently alive agent is currently behind the agent , this would lead to a crash
# 4) The agent won't be blocked if it initializes
for agent in range(len(self.env.agents)):
if (agent != handle) and (agent in self.agents_activated) and (self.env.agents[agent].position == None and \
self.num_active_agents[agent] != 2) and (
self.env.agents[agent].initial_position == self.env.agents[handle].initial_position):
return 0
if (self.env.agents[handle].initial_position in others.keys()):
return 0
valid_act_pos_pair = self._get_next_valid_position(self.env.agents[handle].initial_position,
((self.env.agents[handle].direction + 2) % 4))
for action, pos in valid_act_pos_pair.items():
if pos[1] is not None:
if (pos[1] in others.keys()):
return 0
agentblocking = obs[self.ENTRYS_PER_COLUMN + 2]
if agentblocking != 0:
return 0
# if (self.num_active_agents.count(1) < self.upper_bound) or (self.initialization_timestep>self.max_timestep) :
# if (self.initialization_timestep>self.max_timestep) :
# self.initialization_timestep = 0
# self.num_active_agents[handle] = 1
# return 0
return 1
if __name__ == '__main__':
from flatland.envs.rail_env import RailEnv
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.utils.rendertools import RenderTool
num_agent = 20
def Complex_params():
grid_width = 25 # min(int(np.random.uniform(ENVIRONMENT_SIZE[0], ENVIRONMENT_SIZE[1] )),
# int(np.random.uniform(ENVIRONMENT_SIZE[0], ENVIRONMENT_SIZE[1] )))
grid_height = 25 # min(int(np.random.uniform(ENVIRONMENT_SIZE[0], ENVIRONMENT_SIZE[1])),
# nt(np.random.uniform(ENVIRONMENT_SIZE[0], ENVIRONMENT_SIZE[1] )))
rnd_start_goal = 8 + np.random.randint(0,
3) # int(np.random.uniform(num_workers, num_workers+1+episode_difficulty ))
# int(np.random.uniform( num_workers , min(grid_width,grid_height))),
rnd_extra = 1 # int(np.random.uniform(0 , 1+2*episode_difficulty ))
# int(np.random.uniform( 0 , min(grid_width,grid_height))))
rnd_min_dist = int(
0.6 * min(grid_height, grid_width)) # int(np.random.uniform( episode_difficulty , 4+2*episode_difficulty ))
rnd_max_dist = 99999 # int(np.random.uniform(3+episode_difficulty, 6+2*episode_difficulty))
rnd_seed = 3
return grid_width, grid_height, rnd_start_goal, rnd_extra, rnd_min_dist, rnd_max_dist, rnd_seed
grid_width, grid_height, rnd_start_goal, rnd_extra, rnd_min_dist, rnd_max_dist, rnd_seed = Complex_params()
gameEnv = RailEnv(width=35, height=20,
rail_generator=sparse_rail_generator(
max_num_cities=5,
max_rails_between_cities=2,
max_rails_in_city=3,
grid_mode=False,
seed=rnd_seed)
,
schedule_generator=sparse_schedule_generator(),
obs_builder_object=StateMaskingObs(),
number_of_agents=num_agent)
gameEnv.reset(regenerate_rail=True, regenerate_schedule=True)
env_renderer = RenderTool(gameEnv)
for t in range(500):
gameEnv.reset(regenerate_rail=True, regenerate_schedule=True)
print(t)
for i in range(30):
obs, _, _, _ = gameEnv.step({0: 2, 1: 2, 2: 2, 3: 2, 4: 2, 5: 2, 6: 2, 7: 2})
#print(obs)
env_renderer.render_env(show=True, frames=True, show_observations=True)
# print(obs)
# print(len(obs[0]))
#input()
| StarcoderdataPython |
4952737 | <reponame>admariner/jurigged<filename>tests/snippets/dandelion:main.py
class Flower:
def __init__(self, name):
self.name = name
def sing(self):
return f"O {self.name}, how beautiful are thee!"
def test(self):
print("test 1 2 3")
def pluck(n):
return n - 1
def plack():
return True
| StarcoderdataPython |
5114606 | <reponame>Matheus-Rangel/dash-carbon-components<gh_stars>1-10
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class NumberInput(Component):
"""A NumberInput component.
NumberInput component
Keyword arguments:
- allowEmpty (boolean; default False): `true` to allow empty string.
- style (dict; optional): style of the input
- ariaLabel (string; default 'Numeric input field with increment and decrement buttons'): Provide a description that would be used to best describe the use case of the NumberInput component
- className (string; optional): Specify an optional className to be applied to the wrapper node
- defaultValue (number | string; optional): Optional starting value for uncontrolled state
- disabled (boolean; default False): Specify if the control should be disabled, or not
- helperText (string; default ''): Provide text that is used alongside the control label for additional help
- hideLabel (boolean; default False): Specify whether you want the underlying label to be visually hidden
- iconDescription (string; default 'choose a number'): Provide a description for up/down icons that can be read by screen readers
- id (string; required): Specify a custom `id` for the input
- invalid (boolean; default False): Specify if the currently value is invalid.
- invalidText (string; default 'Provide invalidText'): Message which is displayed if the value is invalid.
- label (string; optional): Generic `label` that will be used as the textual representation of what
this field is for
- light (boolean; default False): `true` to use the light version.
- max (number; optional): The maximum value.
- min (number; optional): The minimum value.
- readOnly (boolean; optional): Specify if the component should be read-only
- size (a value equal to: 'sm', 'xl'; optional): Specify the size of the Number Input. Currently supports either `sm` or `xl` as an option.
- step (number; default 1): Specify how much the valus should increase/decrease upon clicking on up/down button
- value (number | string; optional): Specify the value of the input
- warn (boolean; default False): Specify whether the control is currently in warning state
- warnText (string; default ''): Provide the text that is displayed when the control is in warning state"""
@_explicitize_args
def __init__(self, allowEmpty=Component.UNDEFINED, style=Component.UNDEFINED, ariaLabel=Component.UNDEFINED, className=Component.UNDEFINED, defaultValue=Component.UNDEFINED, disabled=Component.UNDEFINED, helperText=Component.UNDEFINED, hideLabel=Component.UNDEFINED, iconDescription=Component.UNDEFINED, id=Component.REQUIRED, invalid=Component.UNDEFINED, invalidText=Component.UNDEFINED, label=Component.UNDEFINED, light=Component.UNDEFINED, max=Component.UNDEFINED, min=Component.UNDEFINED, readOnly=Component.UNDEFINED, size=Component.UNDEFINED, step=Component.UNDEFINED, value=Component.UNDEFINED, warn=Component.UNDEFINED, warnText=Component.UNDEFINED, **kwargs):
self._prop_names = ['allowEmpty', 'style', 'ariaLabel', 'className', 'defaultValue', 'disabled', 'helperText', 'hideLabel', 'iconDescription', 'id', 'invalid', 'invalidText', 'label', 'light', 'max', 'min', 'readOnly', 'size', 'step', 'value', 'warn', 'warnText']
self._type = 'NumberInput'
self._namespace = 'dash_carbon_components'
self._valid_wildcard_attributes = []
self.available_properties = ['allowEmpty', 'style', 'ariaLabel', 'className', 'defaultValue', 'disabled', 'helperText', 'hideLabel', 'iconDescription', 'id', 'invalid', 'invalidText', 'label', 'light', 'max', 'min', 'readOnly', 'size', 'step', 'value', 'warn', 'warnText']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in ['id']:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(NumberInput, self).__init__(**args)
| StarcoderdataPython |
8001744 | <filename>lucy/verbosity.py
import os
from contextlib import redirect_stdout
VERBOSE = False
def set_verbosity(verbose):
"""Turn on/off verbose output."""
global VERBOSE
VERBOSE = verbose
def verbose(func):
"""Redirect output to /dev/null if VERBOSE is False."""
def decorator(*args, **kwargs):
if not VERBOSE:
with open(os.devnull, 'w') as void:
with redirect_stdout(void):
return func(*args, **kwargs)
return func(*args, **kwargs)
return decorator
| StarcoderdataPython |
12852193 | from datetime import datetime
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, no_translations
from note.models import NoteIndexPage
from note.models import NotePage
class Command(BaseCommand):
help = 'Create note page'
def add_arguments(self, parser):
parser.add_argument(
'--index-id', action='store', required=True,
help='set index page id')
parser.add_argument(
'--title', action='store', required=True,
help='set title')
parser.add_argument(
'--intro', action='store', required=True,
help='set intro')
parser.add_argument(
'--owner', action='store', required=True,
help='set owner')
@no_translations
def handle(self, *args, **options):
index = NoteIndexPage.objects.get(id=options['index_id'])
User = get_user_model()
owner = User.objects.get(username=options['owner'])
note = NotePage(
title=options['title'],
intro=options['intro'],
date=datetime.now(),
owner=owner)
index.add_child(instance=note)
self.stdout.write(self.style.SUCCESS(f'created: {repr(note)}'))
| StarcoderdataPython |
11255316 | from noise.backends.experimental.backend import ExperimentalNoiseBackend
noise_backend = ExperimentalNoiseBackend()
| StarcoderdataPython |
1960909 | <filename>5/evens_calculator.py<gh_stars>0
range_length = int(input("Type range length: "))
even_sum = 0
for i in range(0, range_length, 2):
even_sum += i
print(f"Sum of evens in range from 0 to {range_length} is equal {even_sum}") | StarcoderdataPython |
6488036 | ########################################################################
# LeetCode Problem Number : 102
# Difficulty Level : Medium
# URL : https://leetcode.com/problems/binary-tree-level-order-traversal/
########################################################################
from binary_search_tree.tree_node import TreeNode
class BinaryTree:
""" level-order traversal
visit all nodes at a particular level
3
/ \
9 20
/ \
15 7
level - order -> [3, 9, 20, 15, 7]
"""
# runtime -> 98.90%, memory -> 21.67%
def levelOrder(self, root: TreeNode) -> [[int]]:
if not root:
return []
res = []
current_level = [root]
while current_level:
cval = []
next_level = []
for node in current_level:
cval.append(node.val)
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
res.append(cval)
""" set next_level to the current_level for next iteration """
next_level, current_level = current_level, next_level
return res
| StarcoderdataPython |
96510 | '''Compute the SLR parsing table for a grammar read from stdin in extended
sytax, printing the original grammar, augmented grammar, first sets, follow
sets, and table to stdout in HTML.'''
import sys
from cfg.cfg_reader import *
from cfg.slr import *
try:
G = parse_cfg(sys.stdin.read())
except ValueError, e:
print e
sys.exit(1)
T = ParsingTable(G)
print '<h1>Original Grammar</h1>'
print T._grammar.html()
print '<h1>Augmented Grammar</h1>'
print T._automaton.augmented_grammar().html()
print '<h1>First Sets</h1>'
print T._first_sets.html()
print '<h1>Follow Sets</h1>'
print T._follow_sets.html()
print '<h1>Parsing Table</h1>'
print T.html()
| StarcoderdataPython |
6574956 | <reponame>OptimalDesignLab/pyStatReduce
# run_hadamard_eigen_accuracy
import os
import sys
import errno
import numpy as np
import chaospy as cp
from pystatreduce.stochastic_collocation import StochasticCollocation
from pystatreduce.quantity_of_interest import QuantityOfInterest
from pystatreduce.dimension_reduction import DimensionReduction
import pystatreduce.examples as examples
def run_hadamard(systemsize, eigen_decayrate, std_dev, n_sample):
# n_collocation_pts = 2
# Create Hadmard Quadratic object
QoI = examples.HadamardQuadratic(systemsize, eigen_decayrate)
# Create stochastic collocation object
# collocation = StochasticCollocation(n_collocation_pts, "Normal")
# Initialize chaospy distribution
x = np.random.randn(QoI.systemsize)
jdist = cp.MvNormal(x, np.diag(std_dev))
threshold_factor = 0.5
dominant_space_exact = DimensionReduction(threshold_factor=threshold_factor,
exact_Hessian=True)
dominant_space = DimensionReduction(threshold_factor=threshold_factor,
exact_Hessian=False,
n_arnoldi_sample=n_sample)
dominant_space.getDominantDirections(QoI, jdist, max_eigenmodes=20)
dominant_space_exact.getDominantDirections(QoI, jdist)
# Sort the exact eigenvalues in descending order
sort_ind = dominant_space_exact.iso_eigenvals.argsort()[::-1]
# Compare the eigenvalues of the 10 most dominant spaces
lambda_exact = dominant_space_exact.iso_eigenvals[sort_ind]
error_arr = dominant_space.iso_eigenvals[0:10] - lambda_exact[0:10]
# print 'error_arr = ', error_arr
rel_error_norm = np.linalg.norm(error_arr) / np.linalg.norm(lambda_exact[0:10])
return rel_error_norm
systemsize_arr = [64, 128, 256]
eigen_decayrate_arr = [2.0, 1.0, 0.5]
n_arnoldi_samples_arr = [11, 21, 31, 41, 51]
n_stddev_samples = 10
eigen_decayrate_arr_idx = 0
err_arr = np.zeros([len(n_arnoldi_samples_arr), n_stddev_samples])
avg_err = np.zeros(len(n_arnoldi_samples_arr))
max_err = np.zeros(len(n_arnoldi_samples_arr))
min_err = np.zeros(len(n_arnoldi_samples_arr))
for eigen_decayrate_arr_idx in range(0, len(eigen_decayrate_arr)):
for i in systemsize_arr:
for j in range(0, len(n_arnoldi_samples_arr)):
print('decay rate = ', eigen_decayrate_arr[eigen_decayrate_arr_idx]
,', systemsize = ', i, ', arnoldi samples = ', n_arnoldi_samples_arr[j])
for k in range(0, n_stddev_samples):
std_dev = abs(np.random.randn(i))
err_arr[j,k] = run_hadamard(i, eigen_decayrate_arr[eigen_decayrate_arr_idx],
std_dev, n_arnoldi_samples_arr[j])
# print 'error_norm = ', error_norm
# sys.exit()
avg_err[j] = np.mean(err_arr[j,:])
max_err[j] = np.max(err_arr[j,:])
min_err[j] = np.min(err_arr[j,:])
dirname = ''.join(['./plot_data/eigen_accuracy/', str(i), '/'])
# Create the directory if it doesn't exist
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
fname1 = ''.join([dirname, 'avg_err_decay', str(eigen_decayrate_arr[eigen_decayrate_arr_idx]), '.txt'])
fname2 = ''.join([dirname, 'max_err_decay', str(eigen_decayrate_arr[eigen_decayrate_arr_idx]), '.txt'])
fname3 = ''.join([dirname, 'min_err_decay', str(eigen_decayrate_arr[eigen_decayrate_arr_idx]), '.txt'])
np.savetxt(fname1, avg_err, delimiter=',')
np.savetxt(fname2, max_err, delimiter=',')
np.savetxt(fname3, min_err, delimiter=',')
| StarcoderdataPython |
3245288 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
# from functools import lru_cache
# from itertools import combinations
from copy import deepcopy
import time
# import regex
from collections import defaultdict
def arguments():
# Handle command line arguments
parser = argparse.ArgumentParser(description='Adventofcode.')
parser.add_argument('-f', '--file', required=True)
args = parser.parse_args()
return args
class AllergenAssessment():
def __init__(self):
self.reset()
self.all_ingredients_cnt = defaultdict(int)
self.allergens = defaultdict(list)
self.all_possible = set()
self.intersects = {}
self.num_safe_ingredients = 0
def reset(self):
self.instructions = None
self.recipes = []
self.possible_allers = defaultdict(set)
self.recipes_with = defaultdict(list)
self.safe = []
# self.num_safe_ingredients = 0
self.exact_allers = defaultdict(set)
def check_recipies(self):
for line in self.instructions:
ingredients = []
if line[-1] == ')':
toks = line.split(' (')
ingredients = toks[0].split(' ')
al = toks[1][9:-1].split(', ')
for a in al:
self.allergens[a].append(set(toks[0].split(' ')))
else:
ingredients = line.split(' ')
for i in ingredients:
self.all_ingredients_cnt[i] += 1
for k, v in self.allergens.items():
self.intersects[k] = set.intersection(*v)
self.all_possible.update(self.intersects[k])
for k, v in self.all_ingredients_cnt.items():
if k not in self.all_possible:
self.num_safe_ingredients += 1
def build_recipies(self):
# self.instructions = [x.replace(')', '').split('(') for x in self.instructions]
for idx, line in enumerate(self.instructions):
ingredients, allergens = line.rstrip(')\n').split(' (contains ')
ingredients = set(ingredients.split())
allergens = set(allergens.split(', '))
self.recipes.append(ingredients)
for aller in allergens:
self.recipes_with[aller].append(idx)
for ingr in ingredients:
self.possible_allers[ingr] |= allergens
def safe_ingredients(self):
for k, v in self.possible_allers.items():
possible = deepcopy(self.possible_allers[k])
impossible = set()
for aller in v:
if any(k not in self.recipes[x] for x in self.recipes_with[aller]):
impossible.add(aller)
possible -= impossible
if not possible:
self.safe.append(k)
# self.exact_allers = deepcopy(self.possible_allers)
for safes in self.safe:
# del self.exact_allers[safes]
self.num_safe_ingredients += (sum(safes in x for x in self.recipes))
def find_exact_ingredient(self):
self.exact_allers = sorted(self.possible_allers, key=lambda x: len(self.possible_allers[x]))
solution = []
while (len(self.exact_allers) > 0):
allergen = self.exact_allers[0]
ing = self.possible_allers[allergen].pop()
solution.append((self.exact_allers[0], ing))
# delete ingredient from all allergens
for v in self.possible_allers.values():
if ing in v:
v.remove(ing)
del self.possible_allers[allergen]
self.exact_allers = sorted(self.possible_allers, key=lambda x: len(self.possible_allers[x]))
print(solution)
def main():
startTime = time.time()
args = arguments()
with open(args.file) as file:
input_file = file.read().splitlines()
allergens = AllergenAssessment()
allergens.instructions = input_file
allergens.check_recipies()
print(f'Part1: {allergens.num_safe_ingredients}')
print(f'Execution time in seconds: {(time.time() - startTime)}')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1705756 | <filename>kivygallery.py
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.behaviors import ButtonBehavior
from kivy.properties import StringProperty, ListProperty
from kivy.clock import Clock
Builder.load_string("""
#:import get_color_from_hex kivy.utils.get_color_from_hex
#:import Window kivy.core.window.Window
<Gallery>
img_sources:["image1.jpg", "image2.jpg", "image3.jpg", "image4.jpg"]
Carousel:
id:gallery
loop:True
anim_type:"in_out_circ"
on_current_slide:root.indicae_slide(*args)
pos_hint:{"center_x":.5, "center_y":.5}
GaleryButton:
text:'<'
size_hint:.05, .05
pos_hint:{"center_x":.1, "center_y":.5}
on_press:gallery.load_previous()
GaleryButton:
text:'>'
size_hint:.05, .05
pos_hint:{"center_x":.9, "center_y":.5}
on_press:gallery.load_next()
BoxLayout:
id:gallery_navs
spacing:10
size_hint_x: .20 if len(self.children) <= 5 else .50
size_hint_y: .05
pos_hint:{"center_x":.5, "center_y":.05}
<GaleryButton>
opacity:1 if self.collide_point(*Window.mouse_pos) else 0.5
canvas.before:
Color:
rgba:get_color_from_hex(self.background_color)
Ellipse:
size:self.size
pos:self.pos
<MyImage>:
canvas.before:
Rectangle:
source:self.source
size:self.size
pos:self.pos
""")
class GaleryButton(ButtonBehavior, Label):
background_color = StringProperty('#808080')
class MyImage(Label):
source = StringProperty()
class Gallery(FloatLayout):
img_sources = ListProperty([])
def __init__(self,**kwargs):
super(Gallery,self).__init__(**kwargs)
Clock.schedule_once(self.accept_images, 0)
def accept_images(self):
for index, img_src in enumerate(self.img_sources):
async_img = MyImage(source = img_src)
self.ids.gallery.add_widget(async_img)
self.ids.gallery_navs.add_widget(GaleryButton(text=str(index),
on_press=self.load_currentslide,
background_color="#1E90FF"))
self.ids.gallery_navs.children[-1].disabled = True #last item in list is dissabled
Clock.schedule_interval(lambda dt:self.ids.gallery.load_next(), 6) #in every 6 seconds loads next slide
def load_currentslide(self, button):
gallery = self.ids.gallery
gallery.load_slide(gallery.slides[int(button.text)])
def indicae_slide(self, *args):
gallery = self.ids.gallery
navbtns = self.ids.gallery_navs.children
for btn in navbtns:
if int(btn.text) == gallery.index:
btn.disabled = True
else:
btn.disabled = False
class SimpleApp(App):
def build(self):
return Gallery()
SimpleApp().run()
| StarcoderdataPython |
3522956 | <gh_stars>100-1000
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on February 22 2019
@author: talbpaul
Template interface for the test UQ template input.
"""
from __future__ import print_function, unicode_literals
import os
import configparser
from collections import OrderedDict
from UQTemplate.UQTemplate import UQTemplate
print('Loading template ...')
temp = UQTemplate()
temp.loadTemplate('uq_template.xml', os.path.dirname(__file__))
print(' ... template loaded')
# information needed by UQTemplate to make run file
print('Loading input file ...')
config = configparser.ConfigParser()
config.read('UQTemplate/uq_template_input.i')
print(' ... input file loaded')
model = {'file': config.get('model', 'file'),
'output': list(x.strip() for x in config.get('model', 'output').split(','))
}
variables = OrderedDict()
for var in config['variables'].keys():
mean, std = list(float(x) for x in config.get('variables', 'x').split(','))
variables[var] = {'mean': mean,
'std': std}
case = config.get('settings', 'case')
numSamples = config.getint('settings', 'samples')
workflow = os.path.join('UQTemplate', config.get('settings', 'workflow'))
print('Writing RAVEN file ...')
template = temp.createWorkflow(model=model, variables=variables, samples=numSamples, case=case)
errors = temp.writeWorkflow(template, workflow, run=False)
# finish up
if errors == 0:
print('\n\nSuccessfully wrote input "{}". Run it with RAVEN!\n'.format(workflow))
else:
print('\n\nProblems occurred while running the code. See above.\n')
| StarcoderdataPython |
1719173 | import os
import time
import urllib.parse
import requests
from bs4 import BeautifulSoup
import argparse
base_url = ""
directory = ""
is_directory_specified = False
def crawl_all_files(url):
global base_url
global directory
global is_directory_specified
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
links = soup.find_all('a')
for link in links:
file = link.attrs.get('href')
# If the file is a directory
if file.endswith('/'):
print("\"" + file + "\", directory")
crawl_all_files(url + file)
# Else, the file is not a directory
else:
print("\"" + file + "\", file")
print(url + file)
parse_result = urllib.parse.urlparse(url + file)
# If the directory to store the files is specified
if is_directory_specified:
file_path = directory + \
urllib.parse.unquote(parse_result.path, encoding='UTF-8') \
.replace('/', os.sep)
# Else, use the default directory
else:
file_path = parse_result.netloc.replace(':', '_') + \
urllib.parse.unquote(parse_result.path, encoding='UTF-8') \
.replace('/', os.sep)
# Get the file in stream mode
file_request = requests.get(url + file, stream=True)
# Get the size of the file
file_size = int(file_request.headers.get('Content-Length'))
parent_directory_path = os.path.abspath(os.path.join(file_path, os.pardir))
# If the parent directory does not exist
if not os.path.exists(parent_directory_path):
os.makedirs(parent_directory_path)
# Download the file
chunk_size = 128
start = time.time()
with open(file_path, 'wb') as fd:
downloaded_size = 0
for chunk in file_request.iter_content(chunk_size=chunk_size):
downloaded_size += chunk_size
print(str(downloaded_size) + " bytes, " +
str(downloaded_size * 100 / file_size) + "%")
fd.write(chunk)
end = time.time()
print(str(file_size) + " bytes downloaded in {interval}s."
.format(interval=str(end - start)))
print("File path: " + file_path)
file_request.close()
r.close()
return
def main():
global base_url
global directory
global is_directory_specified
# Parse the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("url", help="The full url of the server.")
parser.add_argument("-d", "--directory", type=str, help="The target directory.")
args = parser.parse_args()
# For the positional argument "url"
print("Starting crawling files from " + args.url + "...")
base_url = args.url
# For the optional argument "directory"
if args.directory is not None:
is_directory_specified = True
print("directory = \"" + args.directory + "\"")
directory = args.directory
else:
parse_result = urllib.parse.urlparse(base_url)
directory = parse_result.netloc.replace(':', '_')
print("Directory is not specified. The files will be stored into ")
# Make the root directory
os.makedirs(directory)
crawl_all_files(base_url)
return
if __name__ == "__main__":
main()
| StarcoderdataPython |
6563953 | """
Controls the scene graph viewer.
@author <NAME>
"""
import json
from tools.envedit.edenv_component import EComponent
from tools.envedit.graph_node import GraphNode
from tools.envedit.gui.gui_component import GUIComponent
from tools.envedit.gui.gui_dock_layout import GUIDockLayout
from tools.envedit.gui.gui_frame import GUIFrame
from tools.envedit.gui.gui_label import GUILabel
from tools.envedit.gui.gui_list import GUIList
from tools.envedit.gui.gui_list_dropdown import GUIListDropdown
from tools.envedit.gui.gui_menu_item import GUIMenuItem
from tools.envedit.gui.gui_scroll_container import GUIScrollContainer
from tools.envedit.gui.gui_stack_layout import GUIStackLayout
from tools.envedit.gui.gui_system import GUISystem
from tools.envedit.gui.gui_text_box import GUITextBox
from tkinter import filedialog
class GraphViewer(GUIFrame):
def __init__(self):
GUIFrame.__init__(self)
# Scene graph
self.envedit_data = None
# GUI settings
self.bg_color = (0, 0, 0, 0.8)
self.bbox.width = 300
self.padding = 10
master_layout = GUIDockLayout()
self.set_child(master_layout)
layout = GUIStackLayout()
master_layout.set_child_dock(layout, GUIDockLayout.TOP)
title = GUILabel()
title.text_size = 30
title.set_font(GUISystem.get_font("default_light"))
title.set_text("Scene Graph")
layout.add_child(title)
spacer = GUIComponent()
spacer.bbox.height = 20
layout.add_child(spacer)
scroll_container = GUIScrollContainer(scroll_v=True, scroll_h=True)
master_layout.set_child_dock(scroll_container, GUIDockLayout.CENTER)
self.scene_list = GUIList()
scroll_container.set_child(self.scene_list)
# Adds a node from the scene tree to the graph viewer.
def setup_scene_tree(self, node, parent):
# Create list item element
list_item = None
if type(parent) == GUIList:
for item in parent.child.children:
if item.data is node:
list_item = item
break
else:
for item in parent.sub_list:
if item.data is node:
list_item = item
break
# If list item was not found, insert the new item into the hierarchy
if list_item is None:
node.pressed_callback = self.node_pressed_handler
list_item = self.create_new_item(node)
# For the root node, just add it to the scene list
if type(parent) == GUIList:
self.scene_list.child.clear()
parent.add_item(list_item)
# Expand the parent if it's a collapsed dropdown, and add to the scene list
else:
if not parent.expanded:
parent.expand()
parent.add_sub_item(list_item)
self.scene_list.add_item(list_item, self.scene_list.child.children.index(parent) + 1)
# Propagate to children
for child in node.children:
self.setup_scene_tree(child, list_item)
# If there exist any sub items of list_item that aren't children of the node, remove them
for child in list_item.sub_list:
if child.data not in node.children:
list_item.remove_sub_item(child)
self.scene_list.remove_item(child)
# Clears all nodes from viewer
def clear_viewer(self):
# Removing the root item removes all items in the list
if len(self.scene_list.child.children) > 0:
self.scene_list.remove_item(self.scene_list.child.children[0])
# Creates a new list item based off the scene node
def create_new_item(self, node):
list_item = GUIListDropdown()
list_item.label.text_size = 12
list_item.label.set_text(node.name)
list_item.data = node
list_item.select_callback = self.list_item_clicked
list_item.right_release_callback = self.list_item_right_released
return list_item
# Sets the data model
def set_envedit_data(self, envedit_data):
self.envedit_data = envedit_data
self.setup_scene_tree(self.envedit_data.scene_root, self.scene_list)
# Updates the viewer
def update_viewer(self):
if self.envedit_data is not None:
self.setup_scene_tree(self.envedit_data.scene_root, self.scene_list)
# If target_node is in list, select it
if self.scene_list.selected_item is None or (self.envedit_data.target_node is not None and self.scene_list.selected_item.data is not self.envedit_data.target_node):
for list_item in self.scene_list.child.children:
if list_item.data is self.envedit_data.target_node:
list_item.select()
break
self.update()
# Called when a list item is clicked
def list_item_clicked(self, item):
self.envedit_data.set_target_node(item.data)
# Called when a list item is right clicked
def list_item_right_released(self, item):
# Create context menu
menu = GUISystem.create_context_menu()
add_node_button = GUIMenuItem()
add_node_button.child.set_text("Create Child Node")
add_node_button.on_release = self.add_node_handler
add_node_button.data = item
menu.child.add_child(add_node_button)
if item.data is not self.envedit_data.scene_root:
del_node_button = GUIMenuItem()
del_node_button.child.set_text("Delete Node")
del_node_button.on_release = self.del_node_handler
del_node_button.data = item
menu.child.add_child(del_node_button)
ren_node_button = GUIMenuItem()
ren_node_button.child.set_text("Rename Node")
ren_node_button.on_release = self.ren_node_handler
ren_node_button.data = item
menu.child.add_child(ren_node_button)
import_node_button = GUIMenuItem()
import_node_button.child.set_text("Import Node Tree")
import_node_button.on_release = self.import_node_handler
import_node_button.data = item
menu.child.add_child(import_node_button)
export_node_button = GUIMenuItem()
export_node_button.child.set_text("Export Node Tree")
export_node_button.on_release = self.export_node_handler
export_node_button.data = item
menu.child.add_child(export_node_button)
# No clue why this works
menu.update()
menu.update()
# Handles a node being selected
def node_pressed_handler(self, node):
self.envedit_data.target_node = node
self.envedit_data.update()
# Handles the "add node" option being selected
def add_node_handler(self, item):
# Create new node
pos_comp = EComponent.from_script("components.position")
pos_comp.property_vals["scale"][0] = "1"
pos_comp.property_vals["scale"][1] = "1"
pos_comp.property_vals["scale"][2] = "1"
new_node = GraphNode(f"New Node ({len(item.data.sub_list)})", [pos_comp])
item.data.data.add_child(new_node)
new_node.component_property_changed()
# Update model
self.envedit_data.modify()
self.envedit_data.update()
# Handles the "delete node" option being selected
def del_node_handler(self, item):
# Delete node from graph
item.data.data.parent.remove_child(item.data.data)
# Update model
self.envedit_data.modify()
self.envedit_data.update()
# Handles the "rename node" option being selected
def ren_node_handler(self, item):
# Replace list label with textbox
node_name = item.data.data.name
text_box = GUITextBox()
text_box.padding = 0
text_box.data = item.data
text_box.set_text(node_name)
item.data.child.remove_child(item.data.child.children[3])
item.data.child.add_child(text_box)
item.data.update()
text_box.handle_left_pressed()
text_box.on_lost_focus = self.rename_lost_focus
# Handles the "export node tree" option being selected
def export_node_handler(self, item):
# Open file dialog
filetypes = [("JSON", "*.json")]
file_path = filedialog.asksaveasfilename(filetypes=filetypes, defaultextension=filetypes)
# Save graph node
if file_path != "":
file_dict = GraphNode.scene_graph_to_dict(item.data.data)
with open(file_path, "w") as file:
json.dump(file_dict, file)
# Handles the "import node tree" option being selected
def import_node_handler(self, item):
# Open file dialog
filetypes = [("JSON", "*.json")]
file_path = filedialog.askopenfilename(filetypes=filetypes, defaultextension=filetypes)
# Open graph node
if file_path != "":
with open(file_path, "r") as file:
# Import sub tree
file_json = json.load(file)
imported_node = GraphNode.dict_to_scene_graph(file_json)
# Replace NODE property to proper ones
conv_dict = GraphNode.replace_id(imported_node)
GraphNode.replace_node_props(imported_node, conv_dict)
# Add sub tree to graph
item.data.data.add_child(imported_node)
self.envedit_data.update()
# Handles losing focus of renaming text box
def rename_lost_focus(self, item):
# Check if name was changed
if item.data.data.name != item.text:
self.envedit_data.modify()
# Replace textbox with list label
item.data.data.name = item.text
label = GUILabel()
label.text_size = 12
label.receive_events = False
label.set_text(item.data.data.name)
item.data.child.remove_child(item.data.child.children[3])
item.data.child.add_child(label)
item.data.update()
| StarcoderdataPython |
1904982 | from django.shortcuts import render
# Create your views here.
from lib.http import render_json
from vip.models import Vip
def show_vip_permissions(request):
data={}
for vip in Vip.objects.all():
per_list=[]
for per in vip.permissions:
per_list.append(per.description)
data[vip.name]=per_list
print(data)
return render_json(data) | StarcoderdataPython |
81337 | from .pyomexmeta import PersonalInformation
from .pyomexmeta import EnergyDiff
from .pyomexmeta import PhysicalProcess
from .pyomexmeta import RDF, Editor, PhysicalEntity
from .pyomexmeta import SingularAnnotation
from .pyomexmeta import OmexMetaException
from .pyomexmeta_api import PyOmexMetaAPI, get_version, eUriType, eXmlType
__version__ = get_version()
def run_tests():
import os
import unittest
loader = unittest.TestLoader()
start_dir = os.path.abspath(os.path.dirname(__file__))
suite = loader.discover(start_dir)
runner = unittest.TextTestRunner()
runner.run(suite)
| StarcoderdataPython |
9787965 | import asyncio
from asyncio import Event
from typing import Awaitable
class TestBind:
def __init__(self):
return
async def keyEVT(self):
def bind(self, event:Event, handler: Awaitable):
async def print_msg(text: str = None):
if text is not None:
print(text)
print('sleeping(5)')
for _ in range(0, 5):
await asyncio.sleep(1.)
print(f"slept {_}")
print("Woke up")
return
async def do_when(c):
task = asyncio.create_task(print_msg(c))
await asyncio.wait_for(task, 10.)
return
async def something(on_key=None, on_when=None, what=None, when: int = 51):
t = asyncio.create_task(on_key())
for _ in range(0, 50):
await asyncio.sleep(.1)
print(f"{_}")
if _ == when:
if on_when is None:
pass
else:
await on_when(what)
print(f"Interrupted by {on_when.__repr__}")
return
print("Normal exit...")
return
async def which_key_pressed():
while True:
if keyboard.Key is not None:
pass
# print(keyboard.Key)
await asyncio.sleep(0.01)
async def main():
await something(on_key=which_key_pressed)
return
if __name__ == '__main__':
loopy = asyncio.get_event_loop()
asyncio.run(main(), debug=True)
| StarcoderdataPython |
1911120 | <gh_stars>1-10
import capybara
from capybara.tests.suite import DriverSuite
@capybara.register_driver("webtest")
def init_webtest_driver(app):
from capybara_webtest.driver import Driver
return Driver(app)
WebTestDriverSuite = DriverSuite(
"webtest",
skip=["css", "frames", "hover", "js", "modals", "screenshot", "send_keys", "server", "windows"])
| StarcoderdataPython |
11242081 | import numpy
import pylab
from cmepy import model, solver, recorder
def main():
initial_copies = 20
m = model.create(
propensities = [lambda x: initial_copies - x],
transitions = [(1, )],
shape = (initial_copies + 1, ),
initial_state = (0, )
)
s = solver.create(
model = m,
sink = False
)
r = recorder.create(
(('A->B', ), )
)
time_steps = numpy.linspace(0.0, 3.0, 6)
for t in time_steps:
s.step(t)
r.write(t, s.y)
pylab.figure()
for t, d in zip(r['A->B'].times, r['A->B'].distributions):
marginal = d.to_dense(m.shape)
pylab.plot(marginal, label = 't = %.1f' % t)
pylab.xlabel('Reaction count')
pylab.ylabel('Probability')
pylab.legend()
pylab.savefig('simple_plot.png')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3400801 | # -*- coding: utf-8 -*-
"""Add the Reveal.js specific short codes"""
import yaml
from nikola.plugin_categories import ShortcodePlugin
from nikola.utils import LOGGER
revealCodes = {
"start" : "<div class=\"reveal\" style=\"width:100%;height:75vh\"><div class=\"slides\"><section>",
"startSub" : "<section>",
"new" : "</section> <section>",
"finishSub" : "</section>",
"finish" : "</section></div></div>"
}
class RevealShortCodes(ShortcodePlugin) :
"""Add the Reveal.js specific short codes"""
name = "reveal"
def handler(self, rCode, **_options) :
if rCode in revealCodes :
return revealCodes[rCode]
LOGGER.warning('Unknown reveal code: [{}]'.format(rCode))
return ""
| StarcoderdataPython |
153279 | '''
<NAME> 10/2/2017
<EMAIL>
Switches the MMGIS environment to release or development
Switching to release sets up the default test mission
and can limit access to tools
Usage Examples:
prepare.py d # switch to Dev env
prepare.py r # switch to Release env with all tools
prepare.py r -t i all # Rel env and Include ALL tools
prepare.py r -t e all # Rel env and Exclude ALL tools
prepare.py r -t i all -t e draw # Rel Include ALL Exclude DRAW
prepare.py r -t i layers -t i draw # Rel wih only LAYERS and DRAW Included
'''
import os
import sys
import json
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
fullToolNames = []
fullToolNamesL = [] # lowercase
configconfig = None
def parse_args():
# Parse input arguments
parser = ArgumentParser(
description='Turns MMGIS into a development environment or into a release with the exclusion of specified tools.',
formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument( 'environment', choices=['r','d'], help='release or development' )
parser.add_argument( '-t', '--tools', nargs=2, metavar=('i/e','tool'), action='append', help='A tool to include (tool name or all)' )
args = parser.parse_args()
return args
def getFullToolNames():
# Get full tools
global configconfig
with open( 'baseConfigconfig.json' ) as json_data:
configconfig = json.load(json_data)
for t in configconfig['tools']:
fullToolNames.append( t['name'].encode( 'ascii', 'ignore' ) )
fullToolNamesL.append( t['name'].encode( 'ascii', 'ignore' ).lower() )
# MAIN ENVIRONMENT SETTERS
def set_rel_environment( toolList ):
if toolList is None:
toolList = [['i', 'all']]
cwd = os.path.dirname( os.getcwd() )
# Check whether Missions folders are not in release state
homeDirs = os.listdir( cwd )
if( 'Missions_rel' in homeDirs and 'Missions' in homeDirs ):
# Switch Missions to Missions_dev folder
os.rename( cwd + '/Missions', cwd + '/Missions_dev' )
# Switch Missions_rel to Missions folder
os.rename( cwd + '/Missions_rel', cwd + '/Missions' )
# Check whether configconfig.json is not in release state
midPath = '/config'
configDirs = os.listdir( cwd + midPath )
if( 'configconfig_rel.json' in configDirs and 'configconfig.json' in configDirs ):
# Switch configconfig.json to configconfig_dev.json
os.rename( cwd + midPath + '/configconfig.json', cwd + midPath + '/configconfig_dev.json' )
# Switch configconfig_rel.json to configconfig.json
os.rename( cwd + midPath + '/configconfig_rel.json', cwd + midPath + '/configconfig.json' )
# Change configconfig.json to reflect only the included tools
# Check whether config.sqlite3 is not in release state
midPath = '/config/db'
configDbDirs = os.listdir( cwd + midPath )
if( 'config_rel.sqlite3' in configDbDirs and 'config.sqlite3' in configDbDirs ):
# Switch config.sqlite3 to config_dev.sqlite3
os.rename( cwd + midPath + '/config.sqlite3', cwd + midPath + '/config_dev.sqlite3' )
# Switch config_rel.sqlite3 to config.sqlite3
os.rename( cwd + midPath + '/config_rel.sqlite3', cwd + midPath + '/config.sqlite3' )
# Tell .gitignore to ignore include/exclude appropriate tool scripts
includedTools = getIncludedTools( toolList )
excludedTools = notTheseTools( includedTools )
gitIgnoreTheseTools( excludedTools )
configconfigIgnoreTheseTools( excludedTools )
def set_dev_environment():
cwd = os.path.dirname( os.getcwd() )
homeDirs = os.listdir( cwd )
# Check whether Missions folders are not in development state
if( 'Missions_dev' in homeDirs and 'Missions' in homeDirs ):
# Switch Missions to Missions_rel folder
os.rename( cwd + '/Missions', cwd + '/Missions_rel' )
# Switch Missions_dev to Missions folder
os.rename( cwd + '/Missions_dev', cwd + '/Missions' )
# Check whether configconfig.json is not in development state
midPath = '/config'
configDirs = os.listdir( cwd + midPath )
if( 'configconfig_dev.json' in configDirs and 'configconfig.json' in configDirs ):
# Switch configconfig.json to configconfig_rel.json
os.rename( cwd + midPath + '/configconfig.json', cwd + midPath + '/configconfig_rel.json' )
# Switch configconfig_dev.json to configconfig.json
os.rename( cwd + midPath + '/configconfig_dev.json', cwd + midPath + '/configconfig.json' )
# Check whether config.sqlite3 is not in devlopment state
midPath = '/config/db'
configDbDirs = os.listdir( cwd + midPath )
if( 'config_dev.sqlite3' in configDbDirs and 'config.sqlite3' in configDbDirs ):
# Switch config.sqlite3 to config_rel.sqlite3
os.rename( cwd + midPath + '/config.sqlite3', cwd + midPath + '/config_rel.sqlite3' )
# Switch config_dev.sqlite3 to config.sqlite3
os.rename( cwd + midPath + '/config_dev.sqlite3', cwd + midPath + '/config.sqlite3' )
# HELPER FUNCTIONS
# Parse command line -t arguments to return complete tool list requested
def getIncludedTools( toolList ):
includedTools = []
for a in toolList:
if a[0] == 'i':
if a[1] == 'all':
includedTools = fullToolNamesL[:]
elif a[1].lower() in ( tn.lower() for tn in fullToolNamesL ):
if a[1].lower() not in ( it.lower() for it in includedTools ):
includedTools.append( a[1].lower() )
elif a[0] == 'e':
if a[1] == 'all':
includedTools = []
elif a[1].lower() in ( tn.lower() for tn in fullToolNamesL ):
if a[1].lower() in ( it.lower() for it in includedTools ):
includedTools.remove( a[1] )
return includedTools
# Returns !( includedTools ) of the tools set
def notTheseTools( includedTools ):
excludedTools = []
for t in fullToolNamesL:
if t not in includedTools:
excludedTools.append( t )
return excludedTools
# Writes the .gitignore to ignore tool scripts
def gitIgnoreTheseTools( toolsToIgnore ):
baseIgnore = open( 'base.gitignore', 'r' )
trueIgnore = open( '../.gitignore', 'w' )
#print toolsToIgnore
# Place the base ignore in the file
trueIgnore.write( baseIgnore.read() )
for tti in toolsToIgnore:
trueIgnore.write( '\nscripts/essence/Tools/' + fullToolNames[ fullToolNamesL.index( tti ) ] )
# Writes the config/configconfig.json to ignore tools
def configconfigIgnoreTheseTools( toolsToIgnore ):
baseConfigconfig = open( 'baseConfigconfig.json', 'r' )
baseConfigconfigJ = json.load( baseConfigconfig )
for tti in toolsToIgnore:
name = fullToolNames[ fullToolNamesL.index( tti ) ]
for i, t in enumerate( baseConfigconfigJ['tools'] ):
if t['name'] == name:
del baseConfigconfigJ['tools'][i]
trueConfigconfig = open( '../config/configconfig.json', 'w' )
json.dump( baseConfigconfigJ, trueConfigconfig, indent=4, sort_keys=True )
if __name__ == '__main__':
pargs = parse_args()
env = pargs.environment.lower()
tools = pargs.tools
getFullToolNames();
if env == 'r':
set_rel_environment( tools )
elif env == 'd':
set_dev_environment()
| StarcoderdataPython |
6503167 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __Author__ = angel
# _PlugName_ = gedior upload
import re
def curl3(
url, post=None, raw=None, proxy=None, method=None,
referer=None, cookie=None,
user_agent='Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)',
header=None, max_time=0, connect_timeout=10, retry=2,
retry_delay=1, upfile=None):
u"""
Curl3: 支持上传文件,字典形式 header, 兼容旧 curl2
eg:
1. 发送字典头部
headers = {
'User-Agent': 'Mozilla/4.0',
'Content-Type': 'text/html'
}
code, head, res, errcode, _ = curl3(url, header=headers)
2. 发送字典 post data
post_data = {
'name': 'Medici.Yan',
'password': '<PASSWORD>'
}
code, head, res, errcode, _ = curl3(url, post=post_data)
3. 上传文件
假设有表单如下:
<form id="frmUpload" enctype="multipart/form-data"
action="http://test.com/upload.php" method="post">up
<input type="file" name="image" size="50" />
<input type="file" name="image2" size="50" />
<input type="text" name="token" value="348"/>
<input type="text" name="work" value="upload"/>
<input id="btnUpload" type="submit" value="Upload">
</form>
image 和 image2 都为文件类型
代码如下:
文件部分:
files = [
('image', ('1.gif', 'GIF89axxxx', 'image/gif')),
('image2', ('2.jpg', '2.jpg的内容', 'image/jpeg'))
]
表单中其它部分:
post_data = "token=348&work=upload&submit=Upload"
code, head, res, errcode, _ = curl3(
url, upfile=files, post=post_data)
Tips:
上传要求对服务器不产生任何危害
推荐上传文件内容为:
<?php echo md5(0x2333333);unlink(__FILE__); ?>
unlink 函数会在访问该 php 脚本后自删除本文件
不推荐上传:
1. <?php phpinfo(); ?>
2. <?php eval($_POST[cmd]);?>;
"""
header_str = ""
payload = ""
""" support dict header"""
if isinstance(header, dict):
for i in header.keys():
header_str += "%s: %s\r\n" % (i, header.get(i))
else:
header_str = header
""" support dict post"""
if isinstance(post, dict):
import urllib
payload = urllib.urlencode(post)
else:
payload = post
if upfile:
# The upfile like this.
#
# upfile = [
# ('uploadfile', (
# '3.php',
# "GIF89a\r\n<?php echo md5(0x2333333);unlink(__FILE__); ?>",
# 'image/gif')),
# ('file2', (
# '2.php',
# "GIF89a\r\n<?php echo md5(0x2333333);unlink(__FILE__); ?>",
# 'image/gif'))]
if isinstance(upfile, list):
post = payload
payload = "" # 如果是上传文件的话, post部分要重新处理
boundary = "--Oo0oOoo00"
for i in range(len(upfile)):
payload += "--%s\r\n" % boundary
payload += "Content-Disposition: form-data;"
payload += " name=\"%s\"; " % upfile[i][0]
payload += "filename=\"%s\"\r\n" % upfile[i][1][0]
payload += "Content-Type: %s\r\n\r\n" % upfile[i][1][2]
payload += """%s\r\n""" % upfile[i][1][1]
if post:
postlist = post.split('&')
for i in range(len(postlist)):
if postlist[i]:
key, val = postlist[i].split('=')
payload += "--%s\r\n" % boundary
payload += 'Content-Disposition: form-data; '
payload += 'name="%s"\r\n\r\n' % key
payload += "%s\r\n" % val
payload += "--%s--\r\n" % boundary
if header_str is None:
header_str = ""
if header_str.endswith('\r\n\r\n'):
header_str = header_str.replace('\r\n\r\n', '\r\n')
elif header_str.endswith('\r\n'):
pass
elif header_str == "":
pass
else:
header_str += '\r\n'
header_str += 'Content-Type: multipart/form-data; '
header_str += 'boundary=%s\r\n' % boundary
# header_str += '\r\n'
return curl.curl2(
url, post=payload, raw=raw, proxy=proxy, method=method,
referer=referer, cookie=cookie, user_agent=user_agent,
header=header_str, max_time=max_time,
connect_timeout=connect_timeout,
retry=2, retry_delay=1)
def assign(service, arg):
if service == 'geditor':
return True, arg
def audit(arg):
upfile = 'geditor/upload.php'
f = [
('image', (
'5e2e9b556d77c86ab48075a94740b6f6.php',
"GIF89a\r\n<?php echo md5(0x2333333);unlink(__FILE__); ?>",
'image/gif'))]
# [('form 中 file 的 name', ('文件名', '文件内容', '文件 MIME'))]
# 每个元素是一个文件
#
# post_data = "obj=geditor_wr_content&token=<PASSWORD>&work=upload"
# 如果使用 字典 形式传 post
post_data = {
'obj': 'geditor_wr_content',
'token': '<PASSWORD>',
'work': 'upload',
'submit': 'Upload'
}
code, head, res, errcode, _ = curl3(arg + upfile, post=post_data, upfile=f)
# ########### 使用 curl2 上传文件如下 #######################
# headers = {'Content-Type': 'multipart/form-data; boundary=----Oo0oOoo00'}
# header_str = ""
# for i in headers.keys():
# header_str += "%s: %s\r\n" % (i, headers.get(i))
# payload = ""
# payload += "------Oo0oOoo00\r\n"
# payload += "Content-Disposition: form-data; name=\"image\"; filename=\"5e2e9b556d77c86ab48075a94740b6f6.php\"\r\n"
# payload += "Content-Type: image/gif\r\n\r\n"
# payload += "GIF89a\r\n<?php echo md5(0x2333333);unlink(__FILE__); ?>\r\n"
# payload += "------Oo0oOoo00\r\n"
# payload += 'Content-Disposition: form-data; name="obj"\r\n\r\n'
# payload += "geditor_wr_content\r\n"
# payload += "------Oo0oOoo00\r\n"
# payload += 'Content-Disposition: form-data; name="token"\r\n\r\n'
# payload += "348\r\n"
# payload += "------Oo0oOoo00\r\n"
# payload += 'Content-Disposition: form-data; name="work"\r\n\r\n'
# payload += "upload\r\n"
# payload += "------Oo0oOoo00--\r\n"
# code, head, res, errcode, _ = curl.curl2(
# arg + upfile, post=payload, header=header_str)
# ############ 使用 curl2 上传结束 #####################
# ############# 解析返回后的页面 #################
if res:
pattern = re.search(r'insert_image_preview\(\"(.+)\"\)\;', res)
if pattern:
shell = arg + 'geditor' + pattern.group(1)
code2, head2, res2, errcode2, _ = curl3(shell)
if "5a8adb32edd60e0cfb459cfb38093755" in res2:
security_hole(arg + upfile)
if __name__ == '__main__':
from dummy import *
audit(assign('geditor', 'http://msgr2.talknow.co.kr/')[1])
| StarcoderdataPython |
8182454 | import os
import re
import sys
import copy
import logging
import warnings
import subprocess
import shutil
import uuid
import tempfile
import asyncio
from collections import OrderedDict
from pprint import pformat
from yggdrasil import platform, tools, languages, multitasking, constants
from yggdrasil.components import import_component
from yggdrasil.drivers.Driver import Driver
from yggdrasil.metaschema.datatypes import is_default_typedef
from queue import Empty
logger = logging.getLogger(__name__)
_map_language_ext = OrderedDict()
def remove_product(product, check_for_source=False, **kwargs):
r"""Delete a single product after checking that the product is not (or
does not contain, in the case of directories), source files.
Args:
product (str): Full path to a file or directory that should be
removed.
check_for_source (bool, optional): If True, the specified product
will be checked to ensure that no source files are present. If
a source file is present, a RuntimeError will be raised.
Defaults to False.
**kwargs: Additional keyword arguments are passed to tools.remove_path.
Raises:
RuntimeError: If the specified product is a source file and
check_for_source is False.
RuntimeError: If the specified product is a directory that contains
a source file and check_for_source is False.
RuntimeError: If the product cannot be removed.
"""
tools.import_all_modules('yggdrasil.drivers')
source_keys = list(_map_language_ext.keys())
if '.exe' in source_keys: # pragma: windows
source_keys.remove('.exe')
if check_for_source:
if os.path.isdir(product):
ext_tuple = tuple(source_keys)
for root, dirs, files in os.walk(product):
for f in files:
if f.endswith(ext_tuple):
raise RuntimeError(("%s contains a source file "
"(%s)") % (product, f))
elif os.path.isfile(product):
ext = os.path.splitext(product)[-1]
if ext in source_keys:
raise RuntimeError("%s is a source file." % product)
tools.remove_path(product, **kwargs)
def remove_products(products, source_products):
r"""Delete products produced during the process of running the model.
Args:
products (list): List of products that should be removed after
checking that they are not source files.
source_products (list): List of products that should be removed
without checking that they are not source files.
"""
for p in source_products:
remove_product(p)
for p in products:
remove_product(p, check_for_source=True)
class ModelDriver(Driver):
r"""Base class for Model drivers and for running executable based models.
Args:
name (str): Unique name used to identify the model. This will
be used to report errors associated with the model.
args (str or list): The path to the file containing the model
program that will be run by the driver for the model's language
and/or a list of arguments that should be passed as input to the
model program or language executable (e.g. source code or
configuration file for a domain specific language).
products (list, optional): Paths to files created by the model that
should be cleaned up when the model exits. Entries can be absolute
paths or paths relative to the working directory. Defaults to [].
function (str, optional): If provided, an integrated model is
created by wrapping the function named here. The function must be
located within the file specified by the source file listed in the
first argument. If not provided, the model must contain it's own
calls to the |yggdrasil| interface.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to an
empty array and is ignored.
source_products (list, optional): Files created by running the model
that are source files. These files will be removed without checking
their extension so users should avoid adding files to this list
unless they are sure they should be deleted. Defaults to [].
is_server (bool, dict, optional): If `True`, the model is assumed to be a
server for one or more client models and an instance of
:class:`yggdrasil.drivers.ServerDriver` is started. The
corresponding channel that should be passed to the yggdrasil API
will be the name of the model. If is_server is a dictionary, it
should contain an 'input' key and an 'output' key. These are
required to be the names of existing input and output channels in
the model that will be co-opted by the server. (Note: This requires
that the co-opted output channel's send method is called once for
each time the co-opted input channel's recv method is called. If
used with the `function` parameter, `is_server` must be a dictionary.
Defaults to False.
client_of (str, list, optional): The names of one or more models that
this model will call as a server. If there are more than one, this
should be specified as a sequence collection (list). The
corresponding channel(s) that should be passed to the yggdrasil API
will be the name of the server model joined with the name of the
client model with an underscore `<server_model>_<client_model>`.
There will be one channel created for each server the model is a
client of. Defaults to empty list. Use of `client_of` with `function`
is not currently supported.
timesync (bool, str, optional): If set, the model is assumed to
call a send then receive of the state at each timestep
for syncronization with other models that are also
integrating in time. If a string is provided, it is assumed
to be the name of the server that will handle timestep
synchronization. If a boolean is provided, the name of the
server will be assumed to be 'timestep'. Defaults to False.
overwrite (bool, optional): If True, any existing model products
(compilation products, wrapper scripts, etc.) are removed prior to
the run. If False, the products are not removed. Defaults to True.
Setting this to False can improve the performance, particularly for
models that take a long time to compile, but this should only be
done once the model has been fully debugged to ensure that each run
is tested on a clean copy of the model. The value of this keyword
also determines whether or not products are removed after a run.
preserve_cache (bool, optional): If True model products will be kept
following the run, otherwise all products will be cleaned up.
Defaults to False. This keyword is superceeded by overwrite.
with_strace (bool, optional): If True, the command is run with strace (on
Linux) or dtrace (on MacOS). Defaults to False.
strace_flags (list, optional): Flags to pass to strace (or dtrace).
Defaults to [].
with_valgrind (bool, optional): If True, the command is run with valgrind.
Defaults to False.
valgrind_flags (list, optional): Flags to pass to valgrind. Defaults to [].
model_index (int, optional): Index of model in list of models being run.
Defaults to 0.
copy_index (int, optional): Index of model in set of copies. Defaults
to -1 indicating there is only one copy of the model.
outputs_in_inputs (bool, optional): If True, outputs from wrapped model
functions are passed by pointer as inputs for modification and the
return value will be a flag. If False, outputs are limited to
return values. Defaults to the value of the class attribute
outputs_in_inputs.
logging_level (str, optional): The level of logging messages that should
be displayed by the model. Defaults to the logging level as
determined by the configuration file and environment variables.
allow_threading (bool, optional): If True, comm connections will be set up
so that the model-side comms can be used by more than one thread.
Defaults to False.
copies (int, optional): The number of copies of the model that should be
created. Defaults to 1.
repository_url (str, optional): URL for the git repository containing
the model source code. If provided, relative paths in the model
YAML definition will be considered relative to the repository root
directory.
repository_commit (str, optional): Commit that should be checked out
in the model repository specified by repository_url. If not
provided, the most recent commit on the default branch will be used.
description (str, optional): Description of the model. This parameter
is only used in the model repository or when providing the model
as a service.
contact_email (str, optional): Email address that should be used to
contact the maintainer of the model. This parameter is only used
in the model repository.
validation_command (str, optional): Path to a validation command that
can be used to verify that the model ran as expected. A non-zero
return code is taken to indicate failure.
dependencies (list, optional): A list of packages required by the
model that are written in the same language as the model. If the
package requires dependencies outside the language of the model.
use the additional_dependencies parameter to provide them. If you
need a version of the package from a specific package manager,
a mapping with 'package' and 'package_manager' fields can be
provided instead of just the name of the package.
additional_dependencies (dict, optional): A mapping between languages
and lists of packages in those languages that are required by the
model.
**kwargs: Additional keyword arguments are passed to parent class.
Class Attributes:
language (str): Primary name for the programming language that this
compiler should be used for. [REQUIRED]
language_aliases (list): Additional/alternative names that the language
may be known by.
language_ext (list): Extensions for programs written in the target
language. [REQUIRED]
base_languages (list): Other programming languages that this driver
and the interpreter for the target language are dependent on (e.g.
Matlab models require Python).
executable_type (str): 'compiler' or 'interpreter' to indicate the type
of the executable for the language. [AUTOMATED]
interface_library (list): Name of the library containing the yggdrasil
interface for the target language. [REQUIRED]
interface_directories (list): Directories containing code in the
interface library for the target language.
interface_dependencies (list): List of names of libraries that are
required to use the interface on the current platform. This dosn't
include libraries required by specific communication types which are
described by supported_comm_options.
supported_comms (list): Name of comms supported in the target language.
[REQUIRED]
supported_comm_options (dict): Options for the supported comms like the
platforms they are available on and the external libraries required
to use them. [REQUIRED]
external_libraries (dict): Information on external libraries required
for running models in the target language using yggdrasil.
internal_libraries (dict): Information on internal libraries required
for running models in the target language using yggdrasil.
type_map (dict): Mapping of |yggdrasil| extended JSON types to
datatypes in the target programming language. [REQUIRED]
function_param (dict): Options specifying how different operations
would be encoded in the target language (e.g. if statements, for
loops, while loops). [REQUIRED]
version_flags (list): Flags that should be called with the language
executable to determine the version of the compiler/interpreter.
Defaults to ['--version'].
outputs_in_inputs (bool): If True, outputs are passed by pointer as
inputs for modification and the return value should be a flag.
Defaults to False.
include_arg_count (bool): If True, the number of arguments passed
to send/recv calls is prepended to the arguments to the function.
Defaults to False.
include_channel_obj (bool): If True, the channel object is passed as
input to the send/recv calls (after the argument count if it is
also present due to include_arg_count being True). Defaults to
False.
is_typed (bool): True if the language is typed, False otherwise.
brackets (tuple): A pair of opening and clossing characters that
are used by the language to mark blocks. Set to None and
ignored by default.
no_executable (bool): True if there is not an executable associated
with the language driver. Defaults to False.
comms_implicit (bool): True if the comms installed for this driver
are not explicitly defined (depend on input parameters). Defaults
to False.
Attributes:
args (list): Argument(s) for running the model on the command line.
model_file (str): Full path to the model executable or interpretable
script.
model_args (list): Runtime arguments for running the model on the
command line.
model_src (str): Full path to the model source code. For interpreted
languages, this will be the same as model_file.
model_function_info (dict): Parameters recovered by parsing the
provided model function definition.
overwrite (bool): If True, any existing compilation products will be
overwritten by compilation and cleaned up following the run.
Otherwise, existing products will be used and will remain after
the run.
products (list): Files created by running the model. This includes
compilation products such as executables and/or object files.
source_products (list): Files created by running the model that
are source files. These files will be removed without checking
their extension so users should avoid adding files to this list
unless they are sure they should be deleted.
wrapper_products (list): Files created in order to wrap the model.
process (:class:`yggdrasil.tools.YggPopen`): Process used to run
the model.
function (str): The name of the model function that should be wrapped.
iter_function_over (array): Variable(s) that should be received or
sent as an array, but iterated over.
is_server (bool, dict): If True, the model is assumed to be a server
and an instance of :class:`yggdrasil.drivers.ServerDriver` is
started. If a dict, the input/output channels with the specified
names in the dict will be replaced with a server.
client_of (list): The names of server models that this model is a
client of.
timesync (str): If set, the name of the server performing
timestep synchronization for the model.
with_strace (bool): If True, the command is run with strace or dtrace.
strace_flags (list): Flags to pass to strace/dtrace.
with_valgrind (bool): If True, the command is run with valgrind.
valgrind_flags (list): Flags to pass to valgrind.
model_index (int): Index of model in list of models being run.
copy_index (int): Index of model in set of copies.
modified_files (list): List of pairs of originals and copies of files
that should be restored during cleanup.
allow_threading (bool): If True, comm connections will be set up so that
the model-side comms can be used by more than one thread.
copies (int): The number of copies of the model that should be created.
repository_url (str): URL for the git repository containing the model
source code. If provided, relative paths in the model YAML
definition will be considered relative to the repository root
directory.
repository_commit (str): Commit that should be checked out in the
model repository specified by repository_url.
description (str): Description of the model. This parameter is only
used in the model repository or when providing the model as a
service.
contact_email (str): Email address that should be used to contact the
maintainer of the model. This parameter is only used in the model
repository.
validation_command (str): Path to a validation command that can be
used to verify that the model ran as expected. A non-zero return
code is taken to indicate failure.
dependencies (list): A list of packages required by the model that are
written in the same language as the model. If the package requires
dependencies outside the language of the model, use the
additional_dependencies parameter to provide them. If you need a
version of the package from a specific package manager, a mapping
with 'package' and 'package_manager' fields can be provided
instead of just the name of the package.
additional_dependencies (dict): A mapping between languages and lists
of packages in those languages that are required by the model.
Raises:
RuntimeError: If both with_strace and with_valgrind are True.
"""
_schema_type = 'model'
_schema_subtype_key = 'language'
_schema_required = ['name', 'language', 'args', 'working_dir']
_schema_properties = {
'name': {'type': 'string'},
'language': {'type': 'string', 'default': 'executable',
'description': (
'The programming language that the model '
'is written in. A list of available '
'languages can be found :ref:`here <'
'schema_table_model_subtype_rst>`.')},
'args': {'type': 'array',
'items': {'type': 'string', 'minLength': 1}},
'inputs': {'type': 'array', 'default': [],
'items': {'$ref': '#/definitions/comm'},
'description': (
'Zero or more channels carrying input to the model. '
'A full description of channel entries and the '
'options available for channels can be found '
':ref:`here<yaml_comm_options>`.')},
'outputs': {'type': 'array', 'default': [],
'items': {'$ref': '#/definitions/comm'},
'description': (
'Zero or more channels carrying output from the '
'model. A full description of channel entries and '
'the options available for channels can be found '
':ref:`here<yaml_comm_options>`.')},
'env': {'type': 'object', 'default': {},
'additional_properties': {'type': 'string'}},
'products': {'type': 'array', 'default': [],
'items': {'type': 'string'}},
'source_products': {'type': 'array', 'default': [],
'items': {'type': 'string'}},
'working_dir': {'type': 'string'},
'overwrite': {'type': 'boolean'},
'preserve_cache': {'type': 'boolean', 'default': False},
'function': {'type': 'string'},
'iter_function_over': {'type': 'array', 'default': [],
'items': {'type': 'string'}},
'is_server': {'anyOf': [{'type': 'boolean'},
{'type': 'object',
'properties': {'input': {'type': 'string'},
'output': {'type': 'string'}},
'additionalProperties': False}],
'default': False},
'client_of': {'type': 'array', 'items': {'type': 'string'},
'default': []},
'timesync': {
'anyOf': [
{'type': 'boolean'}, {'type': 'string'},
{'type': 'object',
'required': ['name'],
'properties': {
'name': {'type': 'string', 'default': 'timesync'},
'inputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]},
'outputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]}}},
{'type': 'array',
'items': {
'anyOf': [
{'type': 'string'},
{'type': 'object',
'required': ['name'],
'properties': {
'name': {'type': 'string',
'default': 'timesync'},
'inputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]},
'outputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]}}}]}}],
'default': False},
'with_strace': {'type': 'boolean', 'default': False},
'strace_flags': {'type': 'array',
'default': ['-e', 'trace=memory'],
'items': {'type': 'string'}},
'with_valgrind': {'type': 'boolean', 'default': False},
'valgrind_flags': {'type': 'array',
'default': ['--leak-check=full',
'--show-leak-kinds=all'], # '-v'
'items': {'type': 'string'}},
'outputs_in_inputs': {'type': 'boolean'},
'logging_level': {'type': 'string', 'default': ''},
'allow_threading': {'type': 'boolean'},
'copies': {'type': 'integer', 'default': 1, 'minimum': 1},
'repository_url': {'type': 'string'},
'repository_commit': {'type': 'string'},
'description': {'type': 'string'},
'contact_email': {'type': 'string'},
'validation_command': {'type': 'string'},
'dependencies': {
'type': 'array',
'items': {'oneOf': [
{'type': 'string'},
{'type': 'object',
'required': ['package'],
'properties': {
'package': {'type': 'string'},
'package_manager': {'type': 'string'},
'arguments': {'type': 'string'}},
'additionalProperties': False}]}},
'additional_dependencies': {
'type': 'object',
'additionalProperties': {
'type': 'array',
'items': {'oneOf': [
{'type': 'string'},
{'type': 'object',
'required': ['package'],
'properties': {
'package': {'type': 'string'},
'package_manager': {'type': 'string'},
'arguments': {'type': 'string'}},
'additionalProperties': False}]}}}}
_schema_excluded_from_class = ['name', 'language', 'args', 'working_dir']
_schema_excluded_from_class_validation = ['inputs', 'outputs']
language = None
language_ext = None
language_aliases = []
base_languages = []
executable_type = None
interface_library = None
interface_directories = []
interface_dependencies = []
supported_comms = []
supported_comm_options = {}
external_libraries = {}
internal_libraries = {}
type_map = None
inverse_type_map = None
function_param = None
version_flags = ['--version']
full_language = True
outputs_in_inputs = False
include_arg_count = False
include_channel_obj = False
is_typed = False
types_in_funcdef = True
interface_inside_exec = False
dont_declare_channel = False
is_dsl = False
brackets = None
zero_based = True
max_line_width = None
no_executable = False
comms_implicit = False
python_interface = {'table_input': 'YggAsciiTableInput',
'table_output': 'YggAsciiTableOutput',
'array_input': 'YggArrayInput',
'array_output': 'YggArrayOutput',
'pandas_input': 'YggPandasInput',
'pandas_output': 'YggPandasOutput'}
_library_cache = {}
_config_keys = []
_config_attr_map = []
_executable_search_dirs = None
_disconnect_attr = (Driver._disconnect_attr
+ ['queue', 'queue_thread',
'event_process_kill_called',
'event_process_kill_complete',
'model_process'])
_mpi_tags = {'ENV': 1,
'START': 2,
'STOP_RANK0': 3, # Stopped by partner
'STOP_RANKX': 4, # Stopped by root
'BUILDFILE': 5,
'LOCK_BUILDFILE': 6,
'UNLOCK_BUILDFILE': 7}
def __init__(self, name, args, model_index=0, copy_index=-1, clients=[],
preparsed_function=None, outputs_in_inputs=None,
mpi_rank=0, mpi_tag_start=None, **kwargs):
self._inv_mpi_tags = {v: k for k, v in self._mpi_tags.items()}
self.model_outputs_in_inputs = outputs_in_inputs
self.preparsed_function = preparsed_function
super(ModelDriver, self).__init__(name, **kwargs)
if self.overwrite is None:
self.overwrite = (not self.preserve_cache)
# Setup process things
self.model_process = None
self.queue = multitasking.Queue()
self.queue_thread = None
self.event_process_kill_called = multitasking.Event()
self.event_process_kill_complete = multitasking.Event()
# Strace/valgrind
if self.with_strace and self.with_valgrind:
raise RuntimeError("Trying to run with strace and valgrind.")
if (((self.with_strace or self.with_valgrind)
and platform._is_win)): # pragma: windows
raise RuntimeError("strace/valgrind options invalid on windows.")
self.model_index = model_index
self.copy_index = copy_index
self.clients = clients
self.env_copy = ['LANG', 'PATH', 'USER']
self._exit_line = b'EXIT'
for k in self.env_copy:
if k in os.environ:
self.env[k] = os.environ[k]
if not self.is_installed():
raise RuntimeError("%s is not installed" % self.language)
self.raw_model_file = None
self.model_function_file = None
self.model_function_info = None
self.model_function_inputs = None
self.model_function_outputs = None
self.model_file = None
self.model_args = []
self.model_dir = None
self.model_src = None
self.args = args
self.modified_files = []
self.wrapper_products = []
self._mpi_comm = False
self._mpi_rank = 0
self._mpi_size = 1
self._mpi_requests = {}
self._mpi_tag = (len(self._mpi_tags) * self.model_index)
if mpi_tag_start is not None:
self._mpi_tag += mpi_tag_start
if multitasking._on_mpi:
self._mpi_comm = multitasking.MPI.COMM_WORLD
self._mpi_rank = self._mpi_comm.Get_rank()
self._mpi_size = self._mpi_comm.Get_size()
self._mpi_partner_rank = mpi_rank
# Update for function
if self.function:
args = [self.init_from_function(args)]
# Parse arguments
self.debug(str(args))
self.parse_arguments(args)
assert(self.model_file is not None)
# Remove products
if self.overwrite:
self.remove_products()
# Write wrapper
if self.function:
self.wrapper_products.append(args[0])
self.wrapper_products += self.write_wrappers()
# Install dependencies
if self.dependencies:
self.install_model_dependencies(self.dependencies)
if self.additional_dependencies:
for language, v in self.additional_dependencies.items():
drv = import_component('model', language)
drv.install_model_dependencies(v)
@staticmethod
def before_registration(cls):
r"""Operations that should be performed to modify class attributes prior
to registration including things like platform dependent properties and
checking environment variables for default settings.
"""
Driver.before_registration(cls)
cls.inverse_type_map = None
cls._language = cls.language
cls._language_aliases = cls.language_aliases
if (((cls.language_ext is not None)
and (not isinstance(cls.language_ext, (list, tuple))))):
cls.language_ext = [cls.language_ext]
@staticmethod
def after_registration(cls, cfg=None, second_pass=False):
r"""Operations that should be performed to modify class attributes after
registration. For compiled languages this includes selecting the
default compiler. The order of precedence is the config file 'compiler'
option for the language, followed by the environment variable set by
_compiler_env, followed by the existing class attribute.
Args:
cfg (YggConfigParser, optional): Config class that should
be used to set options for the driver. Defaults to
None and yggdrasil.config.ygg_cfg is used.
second_pass (bool, optional): If True, the class as already
been registered. Defaults to False.
"""
if cfg is None:
from yggdrasil.config import ygg_cfg
cfg = ygg_cfg
cfg.reload()
Driver.after_registration(cls)
cls.cfg = cfg
for x in cls._config_attr_map:
ka = x['attr']
k0 = x.get('key', ka)
setattr(cls, ka, cls.cfg.get(cls.language, k0,
getattr(cls, ka)))
@staticmethod
def finalize_registration(cls):
r"""Operations that should be performed after a class has been fully
initialized and registered."""
global _map_language_ext
for x in cls.get_language_ext():
if x not in _map_language_ext:
_map_language_ext[x] = []
_map_language_ext[x].append(cls.language)
@classmethod
def mpi_partner_init(cls, self):
r"""Actions initializing an MPIPartnerModel."""
pass
@classmethod
def mpi_partner_cleanup(cls, self):
r"""Actions cleaning up an MPIPartnerModel."""
pass
@classmethod
def get_inverse_type_map(cls):
r"""Get the inverse type map.
Returns:
dict: Mapping from native type to JSON type.
"""
if cls.inverse_type_map is None:
cls.inverse_type_map = {}
for k, v in cls.type_map.items():
if k != 'flag':
cls.inverse_type_map[v] = k
return cls.inverse_type_map
@classmethod
def get_language_for_source(cls, fname, languages=None, early_exit=False,
**kwargs):
r"""Determine the language that can be used with the provided source
file(s). If more than one language applies to a set of multiple files,
the language that applies to the most files is returned.
Args:
fname (str, list): The full path to one or more files. If more than
one
languages (list, optional): The list of languages that are acceptable.
Defaults to None and any language will be acceptable.
early_exit (bool, optional): If True, the first language identified
will be returned if fname is a list of files. Defaults to False.
**kwargs: Additional keyword arguments are passed to recursive calls.
Returns:
str: The language that can operate on the specified file.
"""
if isinstance(fname, list):
lang_dict = {}
for f in fname:
try:
ilang = cls.get_language_for_source(f, languages=languages,
**kwargs)
if early_exit:
return ilang
except ValueError:
continue
lang_dict.setdefault(ilang, 0)
lang_dict[ilang] += 1
if lang_dict:
return max(lang_dict, key=lang_dict.get)
else:
ext = os.path.splitext(fname)[-1]
for ilang in cls.get_map_language_ext().get(ext, []):
if (languages is None) or (ilang in languages):
return ilang
raise ValueError("Cannot determine language for file(s): '%s'" % fname)
@classmethod
def get_map_language_ext(cls):
r"""Return the mapping of all language extensions."""
return _map_language_ext
@classmethod
def get_all_language_ext(cls):
r"""Return the list of all language extensions."""
return list(_map_language_ext.keys())
@classmethod
def get_language_dir(cls):
r"""Return the langauge directory."""
return languages.get_language_dir(cls.language)
@classmethod
def get_language_ext(cls):
r"""Return the language extension, including from the base classes."""
out = cls.language_ext
if out is None:
out = []
for x in cls.base_languages:
out += import_component('model', x).get_language_ext()
return out
def parse_arguments(self, args, default_model_dir=None):
r"""Sort model arguments to determine which one is the executable
and which ones are arguments.
Args:
args (list): List of arguments provided.
default_model_dir (str, optional): Path to directory that should be
used to normalize the model file path if it is not absolute.
Defaults to None and is set to the working_dir.
"""
if isinstance(args, (str, bytes)):
args = args.split()
for i in range(len(args)):
args[i] = str(args[i])
assert(isinstance(args, list))
if default_model_dir is None:
default_model_dir = self.working_dir
self.raw_model_file = args[0]
self.model_file = self.raw_model_file
self.model_args = args[1:]
if (self.language != 'executable') and (not os.path.isabs(self.model_file)):
model_file = os.path.normpath(os.path.join(default_model_dir,
self.model_file))
self.model_file = model_file
self.model_dir = os.path.dirname(self.model_file)
self.debug("model_file = '%s', model_dir = '%s', model_args = '%s'",
self.model_file, self.model_dir, self.model_args)
def init_from_function(self, args):
r"""Initialize model parameters based on the wrapped function."""
if not self.preparsed_function:
yml_mock = dict(self.yml,
name=self.name,
args=self.args,
function=self.function,
is_server=self.is_server,
client_of=self.client_of,
inputs=self.inputs,
outputs=self.outputs,
iter_function_over=self.iter_function_over,
copies=self.copies)
self.preparsed_function = self.preparse_function(yml_mock)
self.model_function_info = self.preparsed_function['model_file']
self.model_function_file = self.model_function_info['model_file']
self.model_function_inputs = self.preparsed_function['inputs']
self.model_function_outputs = self.preparsed_function['outputs']
self.model_outputs_in_inputs = self.preparsed_function['outputs_in_inputs']
model_dir, model_base = os.path.split(self.model_function_file)
model_base = os.path.splitext(model_base)[0]
wrapper_fname = os.path.join(model_dir,
'ygg_%s_%s%s' % (model_base, self.name,
self.language_ext[0]))
lines = self.write_model_wrapper(model_name=self.name,
**self.preparsed_function)
# Write file
if (not os.path.isfile(wrapper_fname)) or self.overwrite:
with open(wrapper_fname, 'w') as fd:
fd.write('\n'.join(lines))
return wrapper_fname
@property
def numeric_logging_level(self):
r"""int: Logging level for the model."""
out = self.logger.getEffectiveLevel()
if self.logging_level:
out = logging.getLevelName(self.logging_level)
return out
@property
def n_sent_messages(self):
r"""dict: Number of messages sent by the model via each connection."""
if (self._mpi_rank > 0) and self.check_mpi_request('stopped'):
out = self._mpi_requests['stopped'].result
return out
out = {}
for x in self.yml.get('output_drivers', []):
x_inst = x.get('instance', None)
if x_inst:
out[x_inst.name] = x_inst.models_recvd.get(self.name, 0)
if self.is_server:
for x in self.yml.get('input_drivers', []):
x_inst = x.get('instance', None)
if x_inst and (x_inst._connection_type == 'rpc_request'):
out[x_inst.name] = x_inst.servers_recvd.get(self.name, 0)
return out
@property
def has_sent_messages(self):
r"""bool: True if output has been received from the model."""
n_msg = self.n_sent_messages
if not n_msg:
return True
return bool(sum(n_msg.values()))
def write_wrappers(self, **kwargs):
r"""Write any wrappers needed to compile and/or run a model.
Args:
**kwargs: Keyword arguments are ignored (only included to
allow cascade from child classes).
Returns:
list: Full paths to any created wrappers.
"""
return []
@classmethod
def install_model_dependencies(cls, dependencies, always_yes=False):
r"""Install any dependencies required by the model.
Args:
dependencies (list): Dependencies that should be installed.
always_yes (bool, optional): If True, the package manager will
not ask users for input during installation. Defaults to
False.
"""
packages = {}
for x in dependencies:
if isinstance(x, str):
x = {'package': x}
if x.get('arguments', None):
cls.install_dependency(always_yes=always_yes, **x)
else:
packages.setdefault(x.get('package_manager', None), [])
packages[x.get('package_manager', None)].append(
x['package'])
for k, v in packages.items():
cls.install_dependency(v, package_manager=k,
always_yes=always_yes)
@classmethod
def install_dependency(cls, package=None, package_manager=None,
arguments=None, command=None, always_yes=False):
r"""Install a dependency.
Args:
package (str): Name of the package that should be installed. If
the package manager supports it, this can include version
requirements.
package_manager (str, optional): Package manager that should be
used to install the package.
arguments (str, optional): Additional arguments that should be
passed to the package manager.
command (list, optional): Command that should be used to
install the package.
always_yes (bool, optional): If True, the package manager will
not ask users for input during installation. Defaults to
False.
"""
assert(package)
if isinstance(package, str):
package = package.split()
if package_manager is None:
if tools.get_conda_prefix():
package_manager = 'conda'
elif platform._is_mac:
package_manager = 'brew'
elif platform._is_linux:
package_manager = 'apt'
elif platform._is_win:
package_manager = 'choco'
yes_cmd = []
cmd_kwargs = {}
if command:
cmd = copy.copy(command)
elif package_manager == 'conda':
cmd = ['conda', 'install'] + package
if platform._is_win: # pragma: windows
# Conda commands must be run on the shell on windows as it
# is implemented as a batch script
cmd.insert(0, 'call')
cmd_kwargs['shell'] = True
yes_cmd = ['-y']
elif package_manager == 'brew':
cmd = ['brew', 'install'] + package
elif package_manager == 'apt':
cmd = ['apt-get', 'install'] + package
if bool(os.environ.get('GITHUB_ACTIONS', False)):
# Only enable sudo for testing, otherwise allow the user to
# decide if they want to run yggdrasil with sudo, or just
# install the dependencies themselves
cmd.insert(0, 'sudo')
yes_cmd = ['-y']
elif package_manager == 'choco':
cmd = ['choco', 'install'] + package
elif package_manager == 'vcpkg':
cmd = ['vcpkg.exe', 'install', '--triplet', 'x64-windows']
cmd += package
else:
package_managers = {'pip': 'python',
'cran': 'r'}
if package_manager in package_managers:
drv = import_component(
'model', package_managers[package_manager])
return drv.install_dependency(
package=package, package_manager=package_manager,
arguments=arguments, always_yes=always_yes)
raise NotImplementedError(f"Unsupported package manager: "
f"{package_manager}")
if arguments:
cmd += arguments.split()
if always_yes:
cmd += yes_cmd
if cmd_kwargs.get('shell', False):
cmd = ' '.join(cmd)
subprocess.check_call(cmd, **cmd_kwargs)
def model_command(self):
r"""Return the command that should be used to run the model.
Returns:
list: Any commands/arguments needed to run the model from the
command line.
"""
return [self.model_file] + self.model_args
@classmethod
def language_executable(cls, **kwargs):
r"""Command required to compile/run a model written in this language
from the command line.
Returns:
str: Name of (or path to) compiler/interpreter executable required
to run the compiler/interpreter from the command line.
"""
if cls.no_executable:
return ''
raise NotImplementedError("language_executable not implemented for '%s'"
% cls.language)
@classmethod
def executable_command(cls, args, unused_kwargs=None, **kwargs):
r"""Compose a command for running a program using the exectuable for
this language (compiler/interpreter) with the provided arguments.
Args:
args (list): The program that returned command should run and any
arguments that should be provided to it.
unused_kwargs (dict, optional): Existing dictionary that unused
keyword arguments should be added to. Defaults to None and is
ignored.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Arguments composing the command required to run the program
from the command line using the executable for this language.
"""
raise NotImplementedError("executable_command not implemented for '%s'"
% cls.language)
@classmethod
def run_executable(cls, args, return_process=False, debug_flags=None,
**kwargs):
r"""Run a program using the executable for this language and the
provided arguments.
Args:
args (list): The program that should be run and any arguments
that should be provided to it.
return_process (bool, optional): If True, the process class is
returned without checking the process output. If False,
communicate is called on the process and the output is parsed
for errors. Defaults to False.
debug_flags (list, optional): Debug executable and flags that should
be prepended to the executable command. Defaults to None and
is ignored.
**kwargs: Additional keyword arguments are passed to
cls.executable_command and tools.popen_nobuffer.
Returns:
str: Output to stdout from the run command if return_process is
False, the process if return_process is True.
Raises:
RuntimeError: If the language is not installed.
RuntimeError: If there is an error when running the command.
"""
unused_kwargs = {}
cmd = cls.executable_command(args, unused_kwargs=unused_kwargs, **kwargs)
if isinstance(debug_flags, list):
cmd = debug_flags + cmd
try:
# Add default keyword arguments
if 'working_dir' in unused_kwargs:
unused_kwargs.setdefault('cwd', unused_kwargs.pop('working_dir'))
unused_kwargs.setdefault('shell', platform._is_win)
# Call command
logger.debug("Running '%s' from %s"
% (' '.join(cmd), unused_kwargs.get('cwd', os.getcwd())))
logger.debug("Process keyword arguments:\n%s\n",
' ' + pformat(unused_kwargs).replace('\n', '\n '))
print(' '.join(cmd))
proc = tools.popen_nobuffer(cmd, **unused_kwargs)
if return_process:
return proc
out, err = proc.communicate()
if proc.returncode != 0:
if out:
logger.info('\n%s' % out.decode('utf-8'))
if err: # pragma: debug
logger.info('\n%s' % err.decode('utf-8'))
raise RuntimeError("Command '%s' failed with code %d."
% (' '.join(cmd), proc.returncode))
out = out.decode("utf-8")
logger.debug('%s\n%s' % (' '.join(cmd), out))
return out
except (subprocess.CalledProcessError, OSError) as e: # pragma: debug
raise RuntimeError("Could not call command '%s': %s"
% (' '.join(cmd), e))
def run_validation(self):
r"""Run the validation script for the model."""
if not self.validation_command:
return
subprocess.check_call(self.validation_command.split(),
cwd=self.working_dir)
def run_model(self, return_process=True, **kwargs):
r"""Run the model. Unless overridden, the model will be run using
run_executable.
Args:
return_process (bool, optional): If True, the process running
the model is returned. If False, the process will block until
the model finishes running. Defaults to True.
**kwargs: Keyword arguments are passed to run_executable.
"""
env = self.set_env()
command = self.model_command()
if self.with_strace or self.with_valgrind:
kwargs.setdefault('debug_flags', self.debug_flags)
self.debug('Working directory: %s', self.working_dir)
self.debug('Command: %s', ' '.join(command))
self.debug('Environment Variables:\n%s', self.pprint(env, block_indent=1))
# Update keywords
# NOTE: Setting forward_signals to False allows faster debugging
# but should not be used in deployment for cases where models are not
# running locally.
default_kwargs = dict(env=env, working_dir=self.working_dir,
forward_signals=False,
shell=platform._is_win)
for k, v in default_kwargs.items():
kwargs.setdefault(k, v)
return self.run_executable(command, return_process=return_process, **kwargs)
@property
def debug_flags(self):
r"""list: Flags that should be prepended to an executable command to
enable debugging."""
pre_args = []
if self.with_strace:
if platform._is_linux:
pre_args += ['strace'] + self.strace_flags
else: # pragma: debug
raise RuntimeError("strace not supported on this OS.")
# TODO: dtruss cannot be run without sudo, sudo cannot be
# added to the model process command if it is not in the original
# yggdrasil CLI call, and must be tested with an executable that
# is not "signed with restricted entitlements" (which most built-in
# utilities (e.g. sleep) are).
# elif platform._is_mac:
# if 'sudo' in sys.argv:
# pre_args += ['sudo']
# pre_args += ['dtruss']
elif self.with_valgrind:
pre_args += ['valgrind'] + self.valgrind_flags
return pre_args
@classmethod
def language_version(cls, version_flags=None, **kwargs):
r"""Determine the version of this language.
Args:
**kwargs: Keyword arguments are passed to cls.run_executable.
Returns:
str: Version of compiler/interpreter for this language.
"""
if version_flags is None:
version_flags = cls.version_flags
return cls.run_executable(version_flags, **kwargs).splitlines()[0].strip()
@classmethod
def is_installed(cls):
r"""Determine if this model driver is installed on the current
machine.
Returns:
bool: Truth of if this model driver can be run on the current
machine.
"""
return (cls.is_language_installed()
and cls.are_base_languages_installed()
and cls.are_dependencies_installed()
and cls.is_interface_installed() and cls.is_comm_installed()
and cls.is_configured() and (not cls.is_disabled()))
@classmethod
def are_base_languages_installed(cls, missing=None):
r"""Determine if the base languages are installed.
Args:
missing (list, optional): A pre-existing list that
missing base languages should be appended to.
Returns:
bool: True if the base langauges are installed. False otherwise.
"""
out = True
for x in cls.base_languages:
if (not out) and (not isinstance(missing, list)): # pragma: no cover
break
out = import_component('model', x).is_installed()
if isinstance(missing, list) and (not out):
missing.append(x)
if missing:
out = False
return out
@classmethod
def are_dependencies_installed(cls):
r"""Determine if the dependencies are installed for the interface (not
including dependencies needed by a particular communication type).
Returns:
bool: True if the dependencies are installed. False otherwise.
"""
out = (cls.language is not None)
for x in cls.interface_dependencies:
if not out: # pragma: no cover
break
out = cls.is_library_installed(x)
return out
@classmethod
def is_interface_installed(cls):
r"""Determine if the interface library for the associated programming
language is installed.
Returns:
bool: True if the interface library is installed.
"""
out = (cls.language is not None)
if out and (cls.interface_library is not None):
out = cls.is_library_installed(cls.interface_library)
return out
@classmethod
def is_language_installed(cls):
r"""Determine if the interpreter/compiler for the associated programming
language is installed.
Returns:
bool: True if the language interpreter/compiler is installed.
"""
out = False
if cls.language is not None:
try:
out = (shutil.which(cls.language_executable()) is not None)
except NotImplementedError: # pragma: debug
out = False
return out
@classmethod
def identify_source_files(cls, args=None, working_dir=None, **kwargs):
r"""Determine the source file based on model arguments.
Args:
args (list, optional): Arguments provided.
working_dir (str, optional): Working directory.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Source files.
"""
out = []
if args:
src = args[0]
if (((not cls.is_source_file(src))
and (cls.language_ext is not None)
and (os.path.splitext(src)[-1]
not in cls.get_all_language_ext()))):
src = os.path.splitext(src)[0] + cls.language_ext[0]
if working_dir and (not os.path.isabs(src)):
src = os.path.normpath(os.path.join(working_dir, src))
if os.path.isfile(src):
out.append(src)
return out
@classmethod
def is_source_file(cls, fname):
r"""Determine if the provided file name points to a source files for
the associated programming language by checking the extension.
Args:
fname (str): Path to file.
Returns:
bool: True if the provided file is a source file, False otherwise.
"""
out = False
model_ext = os.path.splitext(fname)[-1]
if len(model_ext) > 0:
out = (model_ext in cls.get_language_ext())
return out
@classmethod
def is_library_installed(cls, lib, **kwargs):
r"""Determine if a dependency is installed.
Args:
lib (str): Name of the library that should be checked.
**kwargs: Additional keyword arguments are ignored.
Returns:
bool: True if the library is installed, False otherwise.
"""
raise NotImplementedError("Method is_library_installed missing for '%s'"
% cls.language)
@classmethod
def is_disabled(cls):
return (cls.cfg.get(cls.language, 'disable', 'false').lower() == 'true')
@classmethod
def is_configured(cls):
r"""Determine if the appropriate configuration has been performed (e.g.
installation of supporting libraries etc.)
Returns:
bool: True if the language has been configured.
"""
# Check for section & diable
disable_flag = cls.is_disabled()
out = (cls.cfg.has_section(cls.language) and (not disable_flag))
# Check for commtypes
if out and (len(cls.base_languages) == 0):
out = (cls.cfg.get(cls.language, 'commtypes', None) is not None)
# Check for config keys
for k in cls._config_keys:
if not out: # pragma: no cover
break
out = (cls.cfg.get(cls.language, k, None) is not None)
return out
@classmethod
def is_comm_installed(cls, commtype=None, skip_config=False, **kwargs):
r"""Determine if a comm is installed for the associated programming
language.
Args:
commtype (str, optional): If provided, this method will only test
for installation of the specified communication type. Defaults
to None and will check for any installed comm.
skip_config (bool, optional): If True, the config list of comms
installed for this language will not be used to determine if
the comm is installed and the class attribute
supported_comm_options will be processed. Defaults to False and
config options are used in order to improve performance after
initial configuration.
platforms (list, optional): Platforms on which the comm can be
installed. Defaults to None and is ignored unless there is a
value for the commtype in supported_comm_options. This
keyword argument is ignored if skip_config is False.
libraries (list, optional): External libraries that are required
by the specified commtype. Defaults to None and is ignored
unless there is a value for the commtype in supported_comm_options.
This keyword argument is ignored if skip_config is False.
**kwargs: Additional keyword arguments are passed to either
is_comm_installed for the base languages, supported languages,
or is_library_installed as appropriate.
Returns:
bool: True if a comm is installed for this language.
"""
# If there are base_languages for this language, use that language's
# driver to check for comm installation.
if len(cls.base_languages) > 0:
out = True
for x in cls.base_languages:
if not out: # pragma: no cover
break
out = import_component('model', x).is_comm_installed(
commtype=commtype, skip_config=skip_config, **kwargs)
return out
if cls.comms_implicit:
if commtype is None:
return True
return (commtype in tools.get_supported_comm())
# Check for installation based on config option
if not skip_config:
installed_comms = cls.cfg.get(cls.language, 'commtypes', [])
if commtype is None:
return (len(installed_comms) > 0)
else:
return (commtype in installed_comms)
# Check for any comm
if commtype is None:
for c in cls.supported_comms:
if cls.is_comm_installed(commtype=c, skip_config=skip_config,
**kwargs):
return True
# Check that comm is explicitly supported
if commtype not in cls.supported_comms:
return False
# Set & pop keywords
for k, v in cls.supported_comm_options.get(commtype, {}).items():
if kwargs.get(k, None) is None:
kwargs[k] = v
platforms = kwargs.pop('platforms', None)
libraries = kwargs.pop('libraries', [])
# Check platforms
if (platforms is not None) and (platform._platform not in platforms):
return False # pragma: windows
# Check libraries
if (libraries is not None):
for lib in libraries:
if not cls.is_library_installed(lib, **kwargs):
return False
# Check for server on RabbitMQ
if commtype in ['rmq', 'rmq_async']:
from yggdrasil.communication.RMQComm import check_rmq_server
if not check_rmq_server():
return False
return True
@classmethod
def configure(cls, cfg):
r"""Add configuration options for this language.
Args:
cfg (CisConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
out = []
# Section and executable
if (cls.language is not None) and (not cfg.has_section(cls.language)):
cfg.add_section(cls.language)
# Executable type configuration
out += cls.configure_executable_type(cfg)
# Locate executable
if (((not cls.is_language_installed())
and (cls.executable_type is not None))): # pragma: debug
try:
exec_file = cls.language_executable()
if exec_file is not None:
fpath = tools.locate_file(
exec_file, directory_list=cls._executable_search_dirs)
if fpath:
cfg.set(cls.language, cls.executable_type, fpath)
except NotImplementedError:
pass
# Configure libraries
out += cls.configure_libraries(cfg)
# Only do additional configuration if no base languages
if not cls.base_languages:
# Installed comms
comms = []
for c in cls.supported_comms:
if cls.is_comm_installed(commtype=c, cfg=cfg, skip_config=True):
comms.append(c)
cfg.set(cls.language, 'commtypes', comms)
cls.after_registration(cls, cfg=cfg, second_pass=True)
return out
@classmethod
def configure_executable_type(cls, cfg):
r"""Add configuration options specific in the executable type
before the libraries are configured.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
return []
@classmethod
def configure_libraries(cls, cfg):
r"""Add configuration options for external libraries in this language.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
return []
def get_io_env(self, input_drivers=None, output_drivers=None):
r"""Get environment variables set by the input/output drivers.
Args:
input_drivers (list, optional): Input drivers. Defaults to the
yaml entry if not provided.
output_drivers (list, optional): Output drivers. Defaults to the
yaml entry if not provided.
Returns:
dict: Environment variables.
"""
if input_drivers is None:
input_drivers = self.yml.get('input_drivers', [])
if output_drivers is None:
output_drivers = self.yml.get('output_drivers', [])
out = {}
if self.copies > 1:
from yggdrasil.drivers.DuplicatedModelDriver import (
DuplicatedModelDriver)
base_name = DuplicatedModelDriver.get_base_name(self.name)
else:
base_name = self.name
for x in input_drivers + output_drivers:
if 'instance' in x:
model_env = x['instance'].model_env
if self.name in model_env:
out.update(model_env[self.name])
elif base_name in model_env:
out.update(model_env[base_name])
return out
@classmethod
def set_env_class(cls, existing=None, **kwargs):
r"""Set environment variables that are instance independent.
Args:
existing (dict, optional): Existing dictionary of environment
variables that new variables should be added to. Defaults
to a copy of os.environ.
**kwargs: Additional keyword arguments are ignored.
Returns:
dict: Environment variables for the model process.
"""
if existing is None: # pragma: no cover
existing = {}
existing.update(os.environ)
return existing
def set_env(self, existing=None, **kwargs):
r"""Get environment variables that should be set for the model process.
Args:
existing (dict, optional): Existing dictionary of environment
variables that new variables should be added to. Defaults
to a copy of os.environ.
**kwargs: Additional keyword arguments are passed to set_env_class.
Returns:
dict: Environment variables for the model process.
"""
from yggdrasil.config import ygg_cfg
if existing is None:
existing = {}
existing.update(copy.deepcopy(self.env))
existing.update(self.get_io_env())
env = self.set_env_class(existing=existing, **kwargs)
env.update(YGG_SUBPROCESS="True",
YGG_MODEL_INDEX=str(self.model_index),
YGG_MODEL_LANGUAGE=self.language,
YGG_MODEL_COPIES=str(self.copies),
# YGG_PYTHON_EXEC=sys.executable,
YGG_DEFAULT_COMM=tools.get_default_comm(),
YGG_NCLIENTS=str(len(self.clients)))
if multitasking._on_mpi:
env['YGG_MPI_RANK'] = str(multitasking._mpi_rank)
if self.copies > 1:
from yggdrasil.drivers.DuplicatedModelDriver import (
DuplicatedModelDriver)
env['YGG_MODEL_COPY'] = str(self.copy_index)
env['YGG_MODEL_NAME'] = DuplicatedModelDriver.get_base_name(
self.name)
else:
env['YGG_MODEL_NAME'] = self.name
if self.allow_threading or (self.copies > 1):
env['YGG_THREADING'] = '1'
if isinstance(self.is_server, dict):
env['YGG_SERVER_INPUT'] = self.is_server['input']
env['YGG_SERVER_OUTPUT'] = self.is_server['output']
if self.logging_level:
env['YGG_MODEL_DEBUG'] = self.logging_level
replace = [k for k in env.keys() if ':' in k]
for k in replace:
env[k.replace(':', '__COLON__')] = env.pop(k)
if ygg_cfg.get('general', 'allow_multiple_omp', False):
env['KMP_DUPLICATE_LIB_OK'] = 'True'
return env
def before_start(self, no_queue_thread=False, **kwargs):
r"""Actions to perform before the run starts.
Args:
no_queue_thread (bool, optional): If True, the queue_thread is not
created/started. Defaults to False.
**kwargs: Keyword arguments are pased to run_model.
"""
# if multitasking._on_mpi:
# self.init_mpi_env()
self.model_process = self.run_model(**kwargs)
# Start thread to queue output
if not no_queue_thread:
self.queue_thread = multitasking.YggTaskLoop(
target=self.enqueue_output_loop,
name=self.name + '.EnqueueLoop')
self.queue_thread.start()
if multitasking._on_mpi:
self.init_mpi()
def queue_close(self):
r"""Close the queue for messages from the model process."""
self.model_process.stdout.close()
def queue_recv(self):
r"""Receive a message from the model process."""
return self.model_process.stdout.readline()
def enqueue_output_loop(self):
r"""Keep passing lines to queue."""
try:
line = self.queue_recv()
except BaseException as e: # pragma: debug
print(e)
line = ""
if (len(line) == 0):
self.queue_thread.set_break_flag()
try:
self.queue.put(self._exit_line)
except multitasking.AliasDisconnectError: # pragma: debug
self.error("Queue disconnected")
self.debug("End of model output")
try:
self.queue_close()
except BaseException: # pragma: debug
pass
else:
try:
self.queue.put(line.decode('utf-8'))
except BaseException as e: # pragma: debug
warnings.warn("Error in printing output: %s" % e)
def before_loop(self):
r"""Actions before loop."""
self.debug('Running %s from %s with cwd %s and env %s',
self.model_command(), os.getcwd(), self.working_dir,
pformat(self.env))
# def init_mpi_env(self):
# r"""Receive env information to the partner model."""
# self.env = self.recv_mpi(tag=self._mpi_tags['ENV'])
def init_mpi(self):
r"""Initialize MPI communicator."""
if self._mpi_rank == 0:
self._mpi_comm = None
else:
self.recv_mpi(tag=self._mpi_tags['START'])
self._mpi_requests['stopped'] = multitasking.MPIRequestWrapper(
self.recv_mpi(tag=self._mpi_tags['STOP_RANKX'],
dont_block=True))
def send_mpi(self, msg, tag=0, dont_block=False):
r"""Send an MPI message."""
self.debug("send %d (%d) [%s]: %s (blocking=%s)", tag,
self._mpi_tag + tag, self._inv_mpi_tags[tag],
msg, not dont_block)
kws = {'dest': self._mpi_partner_rank, 'tag': (self._mpi_tag + tag)}
if dont_block: # pragma: debug
# return self._mpi_comm.isend(msg, **kws)
raise NotImplementedError("Non-blocking MPI send not tested.")
else:
return self._mpi_comm.send(msg, **kws)
def recv_mpi(self, tag=0, dont_block=False):
r"""Receive an MPI message."""
self.debug('recv %d (%d) [%s] (blocking=%s)', tag,
self._mpi_tag + tag, self._inv_mpi_tags[tag],
not dont_block)
kws = {'source': self._mpi_partner_rank, 'tag': (self._mpi_tag + tag)}
if dont_block:
return self._mpi_comm.irecv(**kws)
else:
return self._mpi_comm.recv(**kws)
def stop_mpi_partner(self, msg=None, dest=0, tag=None):
r"""Send a message to stop the MPI partner model on the main process."""
if self._mpi_comm and (not self.check_mpi_request('stopping')):
if tag is None:
tag = self._mpi_tags['STOP_RANK0']
if msg is None:
if self.errors or self.model_process_returncode:
msg = 'ERROR'
else:
msg = 'STOPPING'
self.debug("stop_mpi_partner: %d, %s", tag, msg)
# Don't call test()
self._mpi_requests['stopping'] = multitasking.MPIRequestWrapper(
self.send_mpi(msg, tag=tag), completed=True)
def wait_on_mpi_request(self, name, timeout=False):
r"""Wait for a request to be completed.
Args:
name (str): Name that request was registered under.
Returns:
bool, str: Received message or False if the request does not
exist or is not complete.
"""
self.debug("Waiting on '%s' (timeout=%s)", name, timeout)
try:
out = self._mpi_requests[name].wait(timeout=timeout)
if out == 'ERROR': # pragma: debug
self.errors.append(out)
return out
except asyncio.TimeoutError: # pragma: debug
self.info("Timeout for MPI '%s' request", name)
def check_mpi_request(self, name):
r"""Check if a request has been completed.
Args:
name (str): Name that request was registered under.
Returns:
bool, str: Received message or False if the request does not
exist or is not complete.
"""
if self._mpi_comm and (name in self._mpi_requests):
out, msg = self._mpi_requests[name].test()
if out and (msg == 'ERROR'): # pragma: debug
self.errors.append(msg)
return out
return False
def set_break_flag(self, *args, **kwargs):
r"""Stop the model loop."""
self.stop_mpi_partner()
super(ModelDriver, self).set_break_flag(*args, **kwargs)
def run_loop(self):
r"""Loop to check if model is still running and forward output."""
# Continue reading until there is not any output
if self.model_process_returncode:
self.errors.append(self.model_process_returncode)
if self.check_mpi_request('stopped'):
self.debug("Stop requested by MPI partner.")
self.set_break_flag()
try:
line = self.queue.get_nowait()
except Empty:
# This sleep is necessary to allow changes in queue without lock
self.sleep()
return
except multitasking.AliasDisconnectError: # pragma: debug
self.error("Queue disconnected")
self.set_break_flag()
else:
if (line == self._exit_line) or self.check_mpi_request('stopped'):
self.debug("No more output")
self.set_break_flag()
else:
self.print_encoded(line, end="")
sys.stdout.flush()
def run_finally(self):
r"""Actions to perform in finally clause of try/except wrapping
run."""
# Ensure the MPI partner gets cleaned up following an error
self.stop_mpi_partner()
super(ModelDriver, self).run_finally()
def after_loop(self):
r"""Actions to perform after run_loop has finished. Mainly checking
if there was an error and then handling it."""
self.debug('')
self.stop_mpi_partner()
if self.queue_thread is not None:
self.queue_thread.join(self.sleeptime)
if self.queue_thread.is_alive():
self.debug("Queue thread still alive")
# Loop was broken from outside, kill the queueing thread
self.kill_process()
return
self.wait_process(self.timeout, key_suffix='.after_loop')
self.kill_process()
self.debug(("Closing input/output drivers:\n"
"\tinput: %s\n\toutput: %s")
% ([drv['name'] for drv in
self.yml.get('input_drivers', [])],
[drv['name'] for drv in
self.yml.get('output_drivers', [])]))
for drv in self.yml.get('input_drivers', []):
if 'instance' in drv:
if self.language == 'mpi':
drv['instance'].wait(self.timeout)
drv['instance'].on_model_exit('output', self.name,
errors=self.errors)
for drv in self.yml.get('output_drivers', []):
if 'instance' in drv:
if self.language == 'mpi':
drv['instance'].wait(self.timeout)
drv['instance'].on_model_exit('input', self.name,
errors=self.errors)
@property
def io_errors(self):
r"""list: Errors produced by input/output drivers to this model."""
errors = []
for drv in self.yml.get('input_drivers', []):
if 'instance' in drv:
errors += drv['instance'].errors
for drv in self.yml.get('output_drivers', []):
if 'instance' in drv:
errors += drv['instance'].errors
return errors
@property
def model_process_complete(self):
r"""bool: Has the process finished or not. Returns True if the process
has not started."""
if self.model_process is None: # pragma: debug
return True
return (self.model_process.poll() is not None)
@property
def model_process_returncode(self):
r"""int: Return code for the model process where non-zero values
indicate that there was an error."""
if self.model_process_complete and (self.model_process is not None):
return self.model_process.returncode
return 0
def wait_process(self, timeout=None, key=None, key_suffix=None):
r"""Wait for some amount of time for the process to finish.
Args:
timeout (float, optional): Time (in seconds) that should be waited.
Defaults to None and is infinite.
key (str, optional): Key that should be used to register the timeout.
Defaults to None and set based on the stack trace.
Returns:
bool: True if the process completed. False otherwise.
"""
if not self.was_started: # pragma: debug
return True
return self.wait_on_function(lambda: self.model_process_complete,
timeout=timeout, key_level=1, key=key,
key_suffix=key_suffix)
def kill_process(self):
r"""Kill the process running the model, checking return code."""
if not self.was_started: # pragma: debug
self.debug('Process was never started.')
self.set_break_flag()
self.event_process_kill_called.set()
self.event_process_kill_complete.set()
if self.event_process_kill_called.is_set(): # pragma: debug
self.debug('Process has already been killed.')
return
self.event_process_kill_called.set()
with self.lock:
self.debug('')
ignore_error_code = False
if not self.model_process_complete: # pragma: debug
self.debug("Process is still running. Killing it.")
try:
self.model_process.kill()
self.debug("Waiting %f s for process to be killed",
self.timeout)
self.wait_process(self.timeout, key_suffix='.kill_process')
except BaseException: # pragma: debug
self.exception("Error killing model process")
if not self.has_sent_messages:
ignore_error_code = True
assert(self.model_process_complete)
if (((self.model_process_returncode != 0)
and (not ignore_error_code))):
self.error(("return code of %s indicates model error. "
"(sent messages: %s)"),
str(self.model_process_returncode),
self.n_sent_messages)
self.event_process_kill_complete.set()
if self.queue_thread is not None:
if not self.was_break: # pragma: debug
# Wait for messages to be printed
self.debug("Waiting for queue_thread to finish up.")
self.queue_thread.wait(self.timeout)
if self.queue_thread.is_alive(): # pragma: debug
self.debug("Setting break flag for queue_thread to finish up.")
self.queue_thread.set_break_flag()
self.queue_thread.wait(self.timeout)
try:
self.queue_close()
self.queue_thread.wait(self.timeout)
except BaseException: # pragma: debug
self.exception("Closed during concurrent action")
if self.queue_thread.is_alive(): # pragma: debug
self.error("Queue thread was not terminated.")
def graceful_stop(self):
r"""Gracefully stop the driver."""
self.debug('')
if self.has_sent_messages:
self.wait_process(self.timeout, key_suffix='.graceful_stop')
super(ModelDriver, self).graceful_stop()
def cleanup_products(self):
r"""Remove products created in order to run the model."""
if self.overwrite and (not self.preserve_cache):
self.remove_products()
self.restore_files()
def cleanup(self):
r"""Remove compile executable."""
self.cleanup_products()
super(ModelDriver, self).cleanup()
def restore_files(self):
r"""Restore modified files to their original form."""
for (original, modified) in self.modified_files:
if os.path.isfile(original):
os.remove(modified)
shutil.move(original, modified)
def remove_products(self):
r"""Delete products produced during the process of running the model."""
products = self.products
source_products = self.source_products + self.wrapper_products
remove_products(products, source_products)
@classmethod
def cleanup_dependencies(cls, products=[], verbose=False):
r"""Cleanup dependencies."""
for x in products:
if os.path.isfile(x):
if verbose: # pragma: debug
print("Removing %s" % x)
os.remove(x)
# Methods for automated model wrapping
@classmethod
def run_code(cls, lines, process_kwargs={}, **kwargs):
r"""Run code by first writing it as an executable and then calling
the driver.
Args:
lines (list): Lines of code to be wrapped as an executable.
process_kwargs (dict, optional): Keyword arguments that should
be passed to run_model. Defaults to {}.
**kwargs: Additional keyword arguments are passed to the
write_executable method.
"""
name = 'test_code_%s' % str(uuid.uuid4())[:13].replace('-', '_')
working_dir = os.getcwd()
code_dir = tempfile.gettempdir()
# code_dir = working_dir
fname = os.path.join(code_dir, name + cls.get_language_ext()[0])
lines = cls.write_executable(lines, **kwargs)
with open(fname, 'w') as fd:
fd.write('\n'.join(lines))
inst = None
try:
assert(os.path.isfile(fname))
inst = cls(name, [fname], working_dir=working_dir)
inst.run_model(return_process=False, **process_kwargs)
except BaseException: # pragma: debug
logger.error('Failed generated code:\n%s' % '\n'.join(lines))
raise
finally:
if os.path.isfile(fname):
os.remove(fname)
if inst is not None:
inst.cleanup()
@classmethod
def format_function_param(cls, key, default=None, replacement=None,
ignore_method=False, **kwargs):
r"""Return the formatted version of the specified key.
Args:
key (str): Key in cls.function_param mapping that should be
formatted.
default (str, optional): Format that should be returned if key
is not in cls.function_param. Defaults to None.
replacement (str, optional): Format that should be used instead
of the one in cls.function_param. Defaults to None.
**kwargs: Additional keyword arguments are used in formatting the
request function parameter.
Returns:
str: Formatted string.
Raises:
NotImplementedError: If key is not in cls.function_param and default
is not set.
"""
if replacement is not None:
fmt = replacement
elif (not ignore_method) and hasattr(cls, 'format_function_param_%s' % key):
return getattr(cls, 'format_function_param_%s' % key)(**kwargs)
else:
if (key not in cls.function_param) and (default is None):
raise NotImplementedError(("Language %s dosn't have an entry in "
"function_param for key '%s'")
% (cls.language, key))
fmt = cls.function_param.get(key, default)
return fmt.format(**kwargs)
@classmethod
def parse_var_definition(cls, io, value, outputs_in_inputs=None):
r"""Extract information about input/output variables from a
string definition.
Args:
io (str): Description of variables contained in the provided
string. Must be 'inputs' or 'outputs'.
value (str): String containing one or more variable definitions.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
Returns:
list: List of information about the variables contained in
the provided string.
Raises:
AssertionError: If io is not 'inputs' or 'outputs'.
NotImplementedError: If the def_regex for the specified
io is not defined.
"""
if outputs_in_inputs is None:
outputs_in_inputs = cls.outputs_in_inputs
assert(io in ['inputs', 'outputs'])
if ('%s_def_regex' % io) not in cls.function_param: # pragma: debug
raise NotImplementedError(
("'%s_def_regex' not defined for "
"language %s.") % (io, cls.language))
if 'multiple_outputs' in cls.function_param:
multi_re = cls.function_param['multiple_outputs']
for x in '[]()':
multi_re = multi_re.replace(x, '\\' + x)
multi_re = multi_re.format(outputs='(.*?)')
match = re.search(multi_re, value)
if match is not None:
value = match.group(1)
new_val = []
io_re = cls.format_function_param('%s_def_regex' % io)
for i, ivar in enumerate(cls.split_variables(value)):
igrp = {'name': ivar}
x = re.search(io_re, ivar)
if x is not None:
igrp = x.groupdict()
for k in list(igrp.keys()):
if igrp[k] is None:
del igrp[k]
if 'native_type' in igrp:
igrp['native_type'] = igrp['native_type'].replace(' ', '')
igrp['datatype'] = cls.get_json_type(igrp['native_type'])
igrp['position'] = i
if (io == 'outputs') and outputs_in_inputs:
igrp = cls.input2output(igrp)
new_val.append(igrp)
return new_val
@classmethod
def parse_function_definition(cls, model_file, model_function,
contents=None, match=None,
expected_outputs=[], outputs_in_inputs=None):
r"""Get information about the inputs & outputs to a model from its
defintition if possible.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
contents (str, optional): String containing the function definition.
If not provided, the function definition is read from model_file.
match (re.Match, optional): Match object for the function regex. If
not provided, a search is performed using function_def_regex.
expected_outputs (list, optional): List of names or variable
information dictionaries for outputs that are expected
to be extracted from the function's definition. This
variable is only used if outputs_in_inputs is True and
outputs are not extracted from the function's defintion
using the regex for this language. Defaults to [].
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
Returns:
dict: Parameters extracted from the function definitions.
"""
if outputs_in_inputs is None:
outputs_in_inputs = cls.outputs_in_inputs
out = {}
if match or ('function_def_regex' in cls.function_param):
if not match:
function_regex = cls.format_function_param(
'function_def_regex', function_name=model_function)
if contents is None:
with open(model_file, 'r') as fd:
contents = fd.read()
match = re.search(function_regex, contents)
if not match: # pragma: debug
raise RuntimeError(("Could not find function match in file:\n"
"%s\nfor regex:\nr'%s'")
% (pformat(contents), function_regex))
# Match brackets to determine where the function definition is
if isinstance(cls.brackets, tuple):
assert(len(cls.brackets) == 2)
contents = match.group(0)
counts = {k: 0 for k in cls.brackets}
first_zero = 0
re_brackets = r'[\%s\%s]' % cls.brackets
for x in re.finditer(re_brackets, contents):
counts[x.group(0)] += 1
if (((counts[cls.brackets[0]] > 0)
and (counts[cls.brackets[0]]
== counts[cls.brackets[1]]))):
first_zero = x.span(0)[1]
break
assert((first_zero == 0) or (first_zero == len(contents)))
# This is currently commented as regex's are
# sufficient so far, but this may be needed in the
# future to isolate single definitions.
# if (first_zero != 0) and first_zero != len(contents):
# contents = contents[:first_zero]
# match = re.search(function_regex, contents)
# assert(match)
out = match.groupdict()
for k in list(out.keys()):
if out[k] is None:
del out[k]
for io in ['inputs', 'outputs']:
if io in out:
out[io] = cls.parse_var_definition(
io, out[io], outputs_in_inputs=outputs_in_inputs)
out['model_file'] = model_file
if outputs_in_inputs and expected_outputs and (not out.get('outputs', False)):
missing_expected_outputs = []
for o in expected_outputs:
if isinstance(o, dict):
o = o['name']
missing_expected_outputs.append(o)
out['outputs'] = []
for x in out['inputs']:
if x['name'] not in missing_expected_outputs:
continue
missing_expected_outputs.remove(x['name'])
out['outputs'].append(cls.input2output(x))
if missing_expected_outputs: # pragma: debug
raise ValueError(("Could not locate %d output "
"variable(s) in input: %s")
% (len(missing_expected_outputs),
missing_expected_outputs))
for x in out['outputs']:
out['inputs'].remove(x)
if out.get('flag_var', None):
flag_var = {'name': out.pop('flag_var'),
'datatype': {'type': 'flag'}}
if out.get('flag_type', None):
flag_var['native_type'] = out.pop('flag_type').replace(' ', '')
flag_var['datatype'] = cls.get_json_type(flag_var['native_type'])
out['flag_var'] = flag_var
cls.check_flag_var(out, outputs_in_inputs=outputs_in_inputs)
return out
@classmethod
def check_flag_var(cls, info, outputs_in_inputs=None):
r"""Check if the flag variable should be treated as an output.
Args:
info (dict): Information about the function.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
"""
if outputs_in_inputs is None: # pragma: debug
outputs_in_inputs = cls.outputs_in_inputs
flag_t = cls.type_map['flag']
if (((info.get('flag_var', {}).get('native_type', flag_t) != flag_t)
or (not outputs_in_inputs))):
if info.get('outputs', []): # pragma: debug
logger.warn("Support for returning outputs via parameter(s) "
"and return value is not yet support. The return "
"value will be assumed to be a flag indicating "
"the success of the model.")
info['outputs_in_inputs'] = True
else:
info['outputs'] = [info.pop('flag_var')]
info['outputs_in_inputs'] = False
@classmethod
def channels2vars(cls, channels):
r"""Convert a list of channels to a list of variables.
Args:
channels (list): List of channel dictionaries.
Returns:
list: List of variables.
"""
if not isinstance(channels, list):
channels = [channels]
variables = []
for x in channels:
variables += x['vars']
def get_pos(x):
return x.get('position', 0)
variables = sorted(variables, key=get_pos)
return variables
@classmethod
def expand_server_io(cls, inputs, outputs, client_comms=[]):
r"""Update inputs/outputs w/ information about server that will be
using them.
Args:
inputs (list): List of model inputs including types.
outputs (list): List of model outputs including types.
client_comms (list, optional): List of the names of client comms
that should be removed from the list of outputs. Defaults to [].
"""
if client_comms:
warnings.warn("When wrapping a model function, client comms "
"must either be initialized outside the function, "
"pass a 'global_scope' parameter to the "
"comm initialization (e.g. Python, R, Matlab), "
"or use a 'WITH_GLOBAL_SCOPE' macro "
"(e.g. C, C++, Fortran) around the initialization "
"so that they are persistent "
"across calls and the call or recv/send methods "
"must be called explicitly (as opposed to the "
"function inputs/outputs which will be handled "
"by the wrapper). This model's client comms are:\n"
"\t%s" % client_comms)
# Replace server input w/ split input/output and remove client
# connections from inputs
for i, x in enumerate(inputs):
if x.get('server_replaces', False):
inputs[x['server_replaces']['input_index']] = (
x['server_replaces']['input'])
outputs.insert(x['server_replaces']['output_index'],
x['server_replaces']['output'])
rm_outputs = [i for i, x in enumerate(outputs)
if x['name'] in client_comms]
for i in rm_outputs[::-1]:
outputs.pop(i)
@classmethod
def preparse_function(cls, yml):
r"""Extract information about inputs and outputs based on the
function being wrapped.
Args:
yml (dict): Options that will be used to initialize the model.
Returns:
dict: Information about the parsed function.
"""
if 'function' not in yml:
return
if yml.get('is_server', False):
assert(isinstance(yml['is_server'], dict))
if cls.function_param is None:
raise ValueError(("Language %s is not parameterized "
"and so functions cannot be automatically "
"wrapped as a model.") % cls.language)
source_files = cls.identify_source_files(**yml)
if not source_files: # pragma: debug
raise ValueError("Could not identify any source files.")
model_function_file = source_files[0]
if not os.path.isfile(model_function_file): # pragma: debug
raise ValueError("Source file does not exist: '%s'"
% model_function_file)
# Update input/outputs based on parsed source code
client_comms = ['%s:%s_%s' % (yml['name'], x, yml['name'])
for x in yml.get('client_of', [])]
model_function_inputs = copy.copy(yml.get('inputs', []))
model_function_outputs = copy.copy(yml.get('outputs', []))
cls.expand_server_io(
model_function_inputs, model_function_outputs,
client_comms=client_comms)
expected_outputs = []
for x in model_function_outputs:
expected_outputs += x.get('vars', [])
model_outputs_in_inputs = yml.get('outputs_in_inputs', None)
model_function_info = cls.parse_function_definition(
model_function_file, yml['function'],
expected_outputs=expected_outputs,
outputs_in_inputs=model_outputs_in_inputs)
if model_outputs_in_inputs is None:
model_outputs_in_inputs = model_function_info.get(
'outputs_in_inputs', None)
model_flag = cls.update_io_from_function(
model_function_info, yml['function'],
inputs=model_function_inputs,
outputs=model_function_outputs,
iter_function_over=yml.get('iter_function_over', []))
yml['preparsed_function'] = {
'model_file': model_function_info,
'model_function': yml['function'],
'inputs': model_function_inputs,
'outputs': model_function_outputs,
'model_flag': model_flag,
'outputs_in_inputs': model_outputs_in_inputs,
'copies': yml.get('copies', 1),
'iter_function_over': yml.get('iter_function_over', []),
'skip_update_io': True}
return yml['preparsed_function']
@classmethod
def update_io_from_function(cls, model_file, model_function,
inputs=[], outputs=[], contents=None,
outputs_in_inputs=None, iter_function_over=[]):
r"""Update inputs/outputs from the function definition.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
inputs (list, optional): List of model inputs including types.
Defaults to [].
outputs (list, optional): List of model outputs including types.
Defaults to [].
contents (str, optional): Contents of file to parse rather than
re-reading the file. Defaults to None and is ignored.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to
an empty array and is ignored.
Returns:
dict, None: Flag variable used by the model. If None, the
model does not use a flag variable.
"""
# Read info from the source code
if (((isinstance(model_file, str) and os.path.isfile(model_file))
or (contents is not None))): # pragma: debug
expected_outputs = []
for x in outputs:
expected_outputs += x.get('vars', [])
info = cls.parse_function_definition(model_file, model_function,
contents=contents,
expected_outputs=expected_outputs)
logger.warn("The new execution pattern reuses the parsed "
"source code parameters. Double check results:\n%s."
% pformat(info))
elif isinstance(model_file, dict):
info = model_file
else:
info = {"inputs": [], "outputs": []}
if outputs_in_inputs is None: # pragma: debug
outputs_in_inputs = info.get('outputs_in_inputs',
cls.outputs_in_inputs)
info_map = {io: OrderedDict([(x['name'], x) for x in info.get(io, [])])
for io in ['inputs', 'outputs']}
# Determine flag variable
flag_var = None
if info.get('flag_var', None):
flag_var = dict(info['flag_var'], name='model_flag')
# Check for vars matching names of input/output channels
for io, io_var in zip(['inputs', 'outputs'], [inputs, outputs]):
if (io == 'outputs') and outputs_in_inputs:
io_map = info_map['inputs']
else:
io_map = info_map[io]
for x in io_var:
if x.get('vars', []):
continue
var_name = x['name'].split(':')[-1]
if var_name in io_map:
x['vars'] = [var_name]
for k in ['length', 'shape', 'ndim']:
kvar = '%s_var' % k
if kvar in io_map[var_name]:
x['vars'].append(io_map[var_name][kvar])
# Move variables if outputs in inputs
if outputs_in_inputs:
if ((((len(inputs) + len(outputs)) == len(info.get('inputs', [])))
and (len(info.get('outputs', [])) == 0))):
for i, vdict in enumerate(info['inputs'][:len(inputs)]):
inputs[i].setdefault('vars', [vdict['name']])
assert(inputs[i]['vars'] == [vdict['name']])
for i, vdict in enumerate(info['inputs'][len(inputs):]):
outputs[i].setdefault('vars', [vdict['name']])
assert(outputs[i]['vars'] == [vdict['name']])
for x in outputs:
for i, v in enumerate(x.get('vars', [])):
if v in info_map['inputs']:
info_map['outputs'][v] = cls.input2output(
info_map['inputs'].pop(v))
for io, io_var in zip(['inputs', 'outputs'], [inputs, outputs]):
for x in io_var:
x['channel_name'] = x['name']
x['channel'] = (x['name'].split(':', 1)[-1]
+ '_%s_channel' % io[:-1])
for i, v in enumerate(x.get('vars', [])):
if v in info_map[io]:
x['vars'][i] = info_map[io][v]
if (len(io_var) == 1) and info_map.get(io, False):
io_var[0].setdefault('vars', list(info_map[io].values()))
for x in io_var:
if 'vars' not in x:
x['vars'] = [copy.deepcopy(x)]
x['vars'][0]['name'] = x['name'].split(':', 1)[-1]
for v in x['vars']:
if isinstance(v.get('datatype', None), str):
v['datatype'] = {'type': v['datatype']}
if isinstance(x.get('datatype', None), str):
x['datatype'] = {'type': x['datatype']}
# Check for user defined length variables and add flag to
# length variables
for x in io_var:
for k in ['length', 'shape', 'ndim']:
for v in x['vars']:
if k + '_var' in v:
v[k + '_var'] = info_map[io][v[k + '_var']]
# v[k + '_var']['is_' + k + '_var'] = True
v[k + '_var']['is_length_var'] = True
else:
v[k + '_var'] = False
# Update datatypes
if cls.is_typed:
for x in io_var:
non_length = []
for v in x['vars']:
if not v.get('is_length_var', False):
non_length.append(v)
if ((x.get('datatype', None)
and (not is_default_typedef(x['datatype'])))):
if (len(non_length) == 1):
non_length[0]['datatype'] = x['datatype']
else:
# TODO: Remove types associated with length?
assert(x['datatype']['type'] == 'array')
assert(len(x['datatype']['items'])
== len(non_length))
for v, t in zip(non_length, x['datatype']['items']):
v['datatype'] = t
else:
if (len(non_length) == 1):
x['datatype'] = non_length[0]['datatype']
else:
x['datatype'] = {
'type': 'array',
'items': [v['datatype'] for v in non_length]}
x['datatype']['from_function'] = True
for v in x['vars']:
if 'native_type' not in v:
v['native_type'] = cls.get_native_type(**v)
# Update types based on iteration
for x in io_var:
for v in x.get('vars', [x]):
if v['name'] in iter_function_over:
v['iter_datatype'] = copy.deepcopy(v.get('datatype', {}))
if v.get('datatype', {}):
assert(v['datatype']['type'] == 'scalar')
v['datatype']['type'] = '1darray'
v.pop('native_type', None)
v['native_type'] = cls.get_native_type(**v)
# Finalize io variables
for x in inputs:
cls.finalize_function_io('input', x)
for x in outputs:
cls.finalize_function_io('output', x)
return flag_var
@classmethod
def finalize_function_io(cls, direction, x):
r"""Finalize info for an input/output channel following function
parsing.
Args:
direction (str): Direction of channel ('input' or 'output')
"""
assert(direction in ['input', 'output'])
@classmethod
def write_model_wrapper(cls, model_file, model_function,
inputs=[], outputs=[], model_flag=None,
outputs_in_inputs=None, verbose=False, copies=1,
iter_function_over=[], verbose_model=False,
skip_update_io=False, model_name=None):
r"""Return the lines required to wrap a model function as an integrated
model.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
inputs (list, optional): List of model inputs including types.
Defaults to [].
outputs (list, optional): List of model outputs including types.
Defaults to [].
model_flag (dict, optional): Information about the flag that
should be used to track the success of yggdrasil send/recv
calls. This should only be provided if update_io_from_function
has already been called. Defaults to None and is determined
by update_io_from_function.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to the class attribute outputs_in_inputs.
verbose (bool, optional): If True, the contents of the created file
are displayed. Defaults to False.
copies (int, optional): Number of times the model driver is
duplicated. If more than one, no error will be raised in the
event there is never a call the the function. Defaults to 1.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to
an empty array and is ignored.
skip_update_io (bool, optional): If True, update_io_from_function
will not be called. Defaults to False.
verbose_model (bool, optional): If True, print statements will
be added after every line in the model. Defaults to False.
model_name (str, optional): Name given to the model. Defaults to
None.
Returns:
list: Lines of code wrapping the provided model with the necessary
code to run it as part of an integration.
"""
if outputs_in_inputs is None:
outputs_in_inputs = cls.outputs_in_inputs
# TODO: Determine how to encode dependencies on external variables in models
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
# Update types based on the function definition for typed languages
if not skip_update_io:
model_flag = cls.update_io_from_function(
model_file, model_function,
inputs=inputs, outputs=outputs,
outputs_in_inputs=outputs_in_inputs,
iter_function_over=iter_function_over)
if isinstance(model_file, dict):
model_file = model_file['model_file']
# Update types based on iteration
iter_function_idx = None
iter_ivars = []
iter_ovars = []
if iter_function_over:
iter_function_idx = {'name': 'idx_func_iter',
'datatype': {'type': 'int'}}
if cls.zero_based:
iter_function_idx['begin'] = int(0)
else:
iter_function_idx['begin'] = int(1)
for x in inputs:
iter_ivars += [v for v in x.get('vars', [x])
if v['name'] in iter_function_over]
if not iter_ivars: # pragma: debug
raise RuntimeError("The iter_function_over model "
"parameter must include an input to "
"iterate over. To expand output arrays "
"into component elements, use the "
"'iterate' transformation.")
for x in outputs:
iter_ovars += [v for v in x.get('vars', [x])
if v['name'] in iter_function_over]
if iter_ivars[0].get('length_var', False):
iter_function_idx['end'] = iter_ivars[0]['length_var']
for v in iter_ovars:
v['length_var'] = iter_ivars[0]['length_var']['name']
if isinstance(iter_function_idx['end'], dict):
iter_function_idx['end'] = iter_function_idx['end']['name']
else:
iter_function_idx['end'] = cls.format_function_param(
'len', variable=iter_ivars[0]['name'],
extra=iter_ivars[0])
for v in iter_ivars + iter_ovars:
v['iter_var'] = iter_function_idx
# Declare variables and flag, then define flag
lines = []
flag_var = {'name': 'flag', 'datatype': {'type': 'flag'}}
iter_var = {'name': 'first_iter', 'datatype': {'type': 'flag'}}
free_vars = []
definitions = []
if 'declare' in cls.function_param:
for x in inputs + outputs:
lines += cls.write_channel_decl(
x, definitions=definitions,
requires_freeing=free_vars)
lines += cls.write_declaration(flag_var,
definitions=definitions,
requires_freeing=free_vars)
lines += cls.write_declaration(iter_var,
definitions=definitions,
requires_freeing=free_vars)
if model_flag:
lines += cls.write_declaration(
model_flag, definitions=definitions,
requires_freeing=free_vars)
if iter_function_idx:
lines += cls.write_declaration(
iter_function_idx, definitions=definitions,
requires_freeing=free_vars)
for x in inputs + outputs:
for v in x.get('vars', [x]):
lines += cls.write_declaration(
v, definitions=definitions,
requires_freeing=free_vars)
lines += definitions
nline_preamble = len(lines)
lines.append(cls.format_function_param(
'assign', name=flag_var['name'],
value=cls.function_param.get(
'true_flag', cls.function_param['true'])))
lines.append(cls.format_function_param(
'assign', name=iter_var['name'],
value=cls.function_param.get(
'true_flag', cls.function_param['true'])))
# Declare/define input and output channels
for x in inputs:
lines += cls.write_channel_def('input',
requires_freeing=free_vars, **x)
for x in outputs:
lines += cls.write_channel_def('output',
requires_freeing=free_vars, **x)
# Receive inputs before loop
for x in inputs:
if x.get('outside_loop', False):
lines += cls.write_model_recv(x['channel'], x,
flag_var=flag_var)
# Loop
loop_lines = []
# Receive inputs
any_loop_inputs = False
loop_iter_var = iter_var
if copies > 1:
loop_iter_var = None
for x in inputs:
if not x.get('outside_loop', False):
any_loop_inputs = True
loop_lines += cls.write_model_recv(x['channel'], x,
flag_var=flag_var,
iter_var=loop_iter_var,
allow_failure=True)
# Prepare output array
if iter_function_over:
for v in iter_ivars:
if v['name'] in iter_function_over:
loop_lines += cls.write_finalize_iiter(v)
for v in iter_ovars:
if v['name'] in iter_function_over:
loop_lines += cls.write_initialize_oiter(v)
# Call model
loop_lines += cls.write_model_function_call(
model_function, model_flag, inputs, outputs,
outputs_in_inputs=outputs_in_inputs,
iter_function_idx=iter_function_idx)
# Finalize output array
if iter_function_over:
for v in iter_ovars:
if v['name'] in iter_function_over:
loop_lines += cls.write_finalize_oiter(v)
# Send outputs
for x in outputs:
if not x.get('outside_loop', False):
loop_lines += cls.write_model_send(x['channel'], x,
flag_var=flag_var)
loop_lines.append(cls.format_function_param(
'assign', name=iter_var['name'],
value=cls.function_param.get('false_flag',
cls.function_param['false'])))
# Add break if there are not any inputs inside the loop
if not any_loop_inputs:
loop_lines.append(cls.format_function_param(
'assign', name=flag_var['name'],
value=cls.function_param.get(
'false_flag', cls.function_param['false'])))
# Add loop in while block
flag_cond = cls.format_function_param('flag_cond',
default='{flag_var}',
flag_var=flag_var['name'])
lines += cls.write_while_loop(flag_cond, loop_lines)
# Send outputs after loop
for x in outputs:
if x.get('outside_loop', False):
lines += cls.write_model_send(x['channel'], x,
flag_var=flag_var)
# Free variables
for x in free_vars:
lines += cls.write_free(x)
# Add prints
if verbose_model: # pragma: debug
idx = len(lines) - 1
while (idx > nline_preamble):
if 'else' not in lines[idx]:
indent = ' ' * (len(lines[idx])
- len(lines[idx].lstrip()))
lines.insert(idx, indent + cls.format_function_param(
'print', message=("%s: line %d" % (model_file, idx))))
idx -= 1
# Wrap as executable with interface & model import
prefix = None
if 'interface' in cls.function_param:
ygglib = cls.interface_library
if ygglib in cls.internal_libraries:
ygglib = cls.internal_libraries[ygglib]['source']
if cls.interface_inside_exec:
lines.insert(0, cls.format_function_param(
'interface', interface_library=ygglib))
else:
prefix = [cls.format_function_param(
'interface', interface_library=ygglib)]
out = cls.write_executable(lines, prefix=prefix,
model_name=model_name,
imports={'filename': model_file,
'function': model_function})
if verbose: # pragma: debug
logger.info('\n' + '\n'.join(out))
else:
logger.debug('\n' + '\n'.join(out))
return out
@classmethod
def write_channel_decl(cls, var, **kwargs):
r"""Write a channel declaration.
Args:
var (dict): Information dictionary for the channel.
being declared.
**kwargs: Additional keyword arguments are passed to class's
write_declaration.
Returns:
list: The lines declaring the variable.
"""
out = []
if not cls.dont_declare_channel:
out = cls.write_declaration(
{'name': var['channel'], 'type': 'comm'}, **kwargs)
if (((var.get('datatype', None) is not None)
and ('{channel_type}' in cls.function_param['input']))):
var['channel_type'] = '%s_type' % var['channel']
out += cls.write_type_decl(
var['channel_type'], var['datatype'],
definitions=kwargs.get('definitions', None),
requires_freeing=kwargs.get('requires_freeing', None))
return out
@classmethod
def write_type_decl(cls, name, datatype, name_base=None,
requires_freeing=None, definitions=None,
no_decl=False):
r"""Get lines declaring the datatype within the language.
Args:
name (str): Name of variable that should be declared.
datatype (dict): Type definition.
requires_freeing (list, optional): List that variables requiring
freeing should be appended to. Defaults to None.
definitions (list, optional): Existing list that variable
definitions should be added to. Defaults to None if not
provided and definitions will be included in the returned
lines.
no_decl (bool, optional): If True, the variable is not
declared, but supporting variables will be. Defaults
to False.
Returns:
list: Lines required to define a type declaration.
"""
out = []
if name_base is None:
name_base = name
if datatype['type'] == 'array':
if 'items' in datatype:
assert(isinstance(datatype['items'], list))
out += cls.write_declaration(
{'name': '%s_items' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'dtype',
'length': len(datatype['items'])}},
definitions=definitions,
requires_freeing=requires_freeing)
for i, x in enumerate(datatype['items']):
# Prevent recusion
x_copy = copy.deepcopy(x)
x_copy.pop('items', None)
x_copy.pop('properties', None)
out += cls.write_type_decl(
None, x_copy,
name_base=('%s_item%d' % (name_base, i)),
definitions=definitions,
requires_freeing=requires_freeing,
no_decl=True)
elif datatype['type'] == 'object':
if 'properties' in datatype:
assert(isinstance(datatype['properties'], dict))
precision = 0
if datatype['properties']:
precision = max([len(k) for k in
datatype['properties'].keys()])
precision = max(80, precision)
out += cls.write_declaration(
{'name': '%s_keys' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'bytes',
'length': len(datatype['properties']),
'precision': precision}},
definitions=definitions,
requires_freeing=requires_freeing)
out += cls.write_declaration(
{'name': '%s_vals' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'dtype',
'length': len(datatype['properties'])}},
definitions=definitions,
requires_freeing=requires_freeing)
for i, (k, v) in enumerate(datatype['properties'].items()):
# Prevent recusion
v_copy = copy.deepcopy(v)
v_copy.pop('items', None)
v_copy.pop('properties', None)
out += cls.write_type_decl(
None, v_copy,
name_base=('%s_prop%d' % (name_base, i)),
requires_freeing=requires_freeing,
definitions=definitions,
no_decl=True)
elif datatype['type'] == 'ndarray':
if 'shape' in datatype:
out += cls.write_declaration(
{'name': '%s_shape' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'int',
'precision': 64, 'length': len(datatype['shape'])}},
definitions=definitions,
requires_freeing=requires_freeing)
elif datatype['type'] in (['ply', 'obj', '1darray', 'scalar',
'boolean', 'null', 'number', 'integer',
'string', 'class', 'function', 'instance',
'schema', 'any']
+ list(constants.VALID_TYPES.keys())):
pass
else: # pragma: debug
raise ValueError(("Cannot create %s version of type "
"'%s'") % (cls.language, datatype['type']))
if not no_decl:
out += cls.write_declaration(
{'name': name, 'type': 'dtype'})
return out
@classmethod
def write_type_def(cls, name, datatype, name_base=None,
use_generic=False):
r"""Get lines declaring the data type within the language.
Args:
name (str): Name of variable that definition should be stored in.
datatype (dict): Type definition.
use_generic (bool, optional): If True variables serialized
and/or deserialized by the type will be assumed to be
generic objects. Defaults to False.
Returns:
list: Lines required to define a type definition.
"""
out = []
fmt = None
keys = {}
if use_generic:
keys['use_generic'] = cls.function_param['true']
else:
keys['use_generic'] = cls.function_param['false']
typename = datatype['type']
if name_base is None:
name_base = name
if datatype['type'] == 'array':
if 'items' in datatype:
assert(isinstance(datatype['items'], list))
keys['nitems'] = len(datatype['items'])
keys['items'] = '%s_items' % name_base
if cls.zero_based:
idx_offset = 0
else:
idx_offset = 1
for i, x in enumerate(datatype['items']):
# Prevent recusion
x_copy = copy.deepcopy(x)
x_copy.pop('items', None)
x_copy.pop('properties', None)
out += cls.write_type_def(
cls.format_function_param(
'index', variable=keys['items'],
index=(i + idx_offset)), x_copy,
name_base=('%s_item%d' % (name_base, i)),
use_generic=use_generic)
else:
keys['nitems'] = 0
keys['items'] = cls.function_param['null']
keys['use_generic'] = cls.function_param['true']
elif datatype['type'] == 'object':
keys['use_generic'] = cls.function_param['true']
if 'properties' in datatype:
assert(isinstance(datatype['properties'], dict))
keys['nitems'] = len(datatype['properties'])
keys['keys'] = '%s_keys' % name_base
keys['values'] = '%s_vals' % name_base
if cls.zero_based:
idx_offset = 0
else:
idx_offset = 1
for i, (k, v) in enumerate(datatype['properties'].items()):
# Prevent recusion
v_copy = copy.deepcopy(v)
v_copy.pop('items', None)
v_copy.pop('properties', None)
out.append(cls.format_function_param(
'assign', value='\"%s\"' % k,
name=cls.format_function_param(
'index', variable=keys['keys'],
index=(i + idx_offset))))
out += cls.write_type_def(
cls.format_function_param(
'index', variable=keys['values'],
index=(i + idx_offset)), v_copy,
name_base=('%s_prop%d' % (name_base, i)),
use_generic=use_generic)
else:
keys['nitems'] = 0
keys['keys'] = cls.function_param['null']
keys['values'] = cls.function_param['null']
elif datatype['type'] in ['ply', 'obj']:
pass
elif datatype['type'] == '1darray':
for k in ['subtype', 'precision']:
keys[k] = datatype[k]
keys['precision'] = int(keys['precision'])
keys['length'] = datatype.get('length', '0')
keys['units'] = datatype.get('units', '')
elif datatype['type'] == 'ndarray':
for k in ['subtype', 'precision']:
keys[k] = datatype[k]
keys['precision'] = int(keys['precision'])
if 'shape' in datatype:
shape_var = '%s_shape' % name_base
if cls.zero_based:
idx_offset = 0
else:
idx_offset = 1
for i, x in enumerate(datatype['shape']):
out.append(cls.format_function_param(
'assign', value=x,
name=cls.format_function_param(
'index', variable=shape_var,
index=(i + idx_offset))))
keys['ndim'] = len(datatype['shape'])
keys['shape'] = shape_var
typename = 'ndarray_arr'
else:
keys['ndim'] = 0
keys['shape'] = cls.function_param['null']
keys['units'] = datatype.get('units', '')
elif (typename == 'scalar') or (typename in constants.VALID_TYPES):
keys['subtype'] = datatype.get('subtype', datatype['type'])
keys['units'] = datatype.get('units', '')
if keys['subtype'] in ['bytes', 'string', 'unicode']:
keys['precision'] = int(datatype.get('precision', 0))
else:
keys['precision'] = int(datatype['precision'])
typename = 'scalar'
elif datatype['type'] in ['boolean', 'null', 'number',
'integer', 'string']:
keys['type'] = datatype['type']
typename = 'default'
elif (typename in ['class', 'function']):
keys['type'] = typename
typename = 'pyobj'
elif typename in ['instance', 'any']:
keys['use_generic'] = cls.function_param['true']
typename = 'empty'
elif typename in ['schema']:
keys['use_generic'] = cls.function_param['true']
else: # pragma: debug
raise ValueError("Cannot create %s version of type '%s'"
% (cls.language, typename))
fmt = cls.format_function_param('init_type_%s' % typename, **keys)
out.append(cls.format_function_param('assign', name=name,
value=fmt))
return out
@classmethod
def write_channel_def(cls, key, datatype=None, **kwargs):
r"""Write an channel definition.
Args:
key (str): Entry in cls.function_param that should be used.
datatype (dict, optional): Data type associated with the channel.
Defaults to None and is ignored.
**kwargs: Additional keyword arguments are passed as parameters
to format_function_param.
Returns:
list: Lines required to declare and define an output channel.
"""
out = []
if (datatype is not None) and ('{channel_type}' in cls.function_param[key]):
kwargs['channel_type'] = '%s_type' % kwargs['channel']
out += cls.write_type_def(
kwargs['channel_type'], datatype,
use_generic=kwargs.get('use_generic', False))
dir_map = {'input': 'recv', 'output': 'send'}
try_keys = [dir_map[key] + '_converter', 'transform']
try_vals = []
if all([bool(kwargs.get(k, False)) for k in try_keys]): # pragma: debug
# TODO: Handling merger of the transforms in yaml or
# remove the *_converter options entirely
raise RuntimeError(("Transforms are specified in multiple "
"locations for this input: %s")
% str(try_keys))
for k in try_keys:
if k in kwargs:
v = kwargs[k]
if not isinstance(v, list):
v = [v]
try_vals += v
# This last transform is used because the others are assumed
# to be applied by the connection driver
if try_vals and isinstance(try_vals[-1], str):
try_key = '%s_%s' % (try_vals[-1], key)
if ((('python_interface' in cls.function_param)
and (try_key in cls.python_interface))):
kwargs['python_interface'] = cls.python_interface[try_key]
if ((('format_str' in kwargs)
and ('python_interface_format' in cls.function_param))):
key = 'python_interface_format'
kwargs['format_str'] = kwargs['format_str'].encode(
"unicode_escape").decode('utf-8')
else:
key = 'python_interface'
out += [cls.format_function_param(key, **kwargs)]
return out
@classmethod
def write_model_function_call(cls, model_function, flag_var, inputs, outputs,
outputs_in_inputs=None, on_failure=None,
format_not_flag_cond=None, format_flag_cond=None,
iter_function_idx=None):
r"""Write lines necessary to call the model function.
Args:
model_function (str): Handle of the model function that should be
called.
flag_var (str): Name of variable that should be used as a flag.
inputs (list): List of dictionaries describing inputs to the model.
outputs (list): List of dictionaries describing outputs from the model.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to the class attribute outputs_in_inputs.
on_failure (list, optional): Lines to be executed if the model
call fails. Defaults to an error message. This variable
is only used if flag_var is not None and outputs_in_inputs
is True.
format_not_flag_cond (str, optional): Format string that produces
a conditional expression that evaluates to False when the
model flag indicates a failure. Defaults to None and the
class's value for 'not_flag_cond' in function_param is used
if it exists. If it does not exist, format_flag_cond is used.
format_flag_cond (str, optional): Format string that produces
a conditional expression that evaluates to True when the
model flag indicates a success. Defaults to None and the
defaults class's value for 'flag_cond' in function_param is
used if it exists. If it does not exist, the flag is
directly evaluated as if it were a boolean.
iter_function_idx (dict, optional): Variable that serves as an
index to iterate over variables. Defaults to None.
Returns:
list: Lines required to carry out a call to a model function in
this language.
"""
if outputs_in_inputs is None: # pragma: debug
outputs_in_inputs = cls.outputs_in_inputs
func_inputs = cls.channels2vars(inputs)
func_outputs = cls.channels2vars(outputs)
if iter_function_idx:
for src in [func_inputs, func_outputs]:
for i, x in enumerate(src):
if 'iter_datatype' in x:
src[i] = dict(
x, datatype=x['iter_datatype'],
name=cls.format_function_param(
'index', variable=x['name'],
index=iter_function_idx['name'],
extra=x),
length_var=False)
if isinstance(flag_var, dict):
flag_var = flag_var['name']
out = cls.write_function_call(
model_function, inputs=func_inputs, outputs=func_outputs,
flag_var=flag_var, outputs_in_inputs=outputs_in_inputs)
if flag_var and outputs_in_inputs:
if (not format_flag_cond) and ('not_flag_cond' in cls.function_param):
flag_cond = cls.format_function_param(
'not_flag_cond', flag_var=flag_var,
replacement=format_not_flag_cond)
else: # pragma: debug
# flag_cond = '%s (%s)' % (
# cls.function_param['not'],
# cls.format_function_param(
# 'flag_cond', default='{flag_var}', flag_var=flag_var,
# replacement=format_flag_cond))
raise RuntimeError("Untested code below. Uncomment "
"at your own risk if you find "
"use case for it.")
if on_failure is None:
on_failure = [cls.format_function_param(
'error', error_msg="Model call failed.")]
out += cls.write_if_block(flag_cond, on_failure)
if iter_function_idx:
out = cls.write_for_loop(iter_function_idx['name'],
iter_function_idx['begin'],
iter_function_idx['end'],
out)
return out
@classmethod
def write_model_recv(cls, channel, recv_var, flag_var='flag',
iter_var=None, allow_failure=False,
alt_recv_function=None):
r"""Write a model receive call include checking the return flag.
Args:
channel (str): Name of variable that the channel being received from
was stored in.
recv_var (dict, list): Information of one or more variables that
receieved information should be stored in.
flag_var (str, optional): Name of flag variable that the flag should
be stored in. Defaults to 'flag',
iter_var (str, optional): Name of flag signifying when the
model is in it's first iteration. If allow_failure is
True and iter_var is provided, an error will be raised
if iter_var is True. Defaults to None.
allow_failure (bool, optional): If True, the returned lines will
call a break if the flag is False. Otherwise, the returned
lines will issue an error. Defaults to False.
alt_recv_function (str, optional): Alternate receive function
format string. Defaults to None and is ignored.
Returns:
list: Lines required to carry out a receive call in this language.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
recv_var_str = recv_var
if not isinstance(recv_var, str):
recv_var_par = cls.channels2vars(recv_var)
recv_var_str = cls.prepare_output_variables(
recv_var_par, in_inputs=cls.outputs_in_inputs,
for_yggdrasil=True)
else:
recv_var_par = cls.split_variables(recv_var_str)
expanded_recv_var = None
if (len(recv_var_par) > 1) and ('multiple_outputs' in cls.function_param):
expanded_recv_var = recv_var_str
recv_var_str = 'temp_%s' % recv_var_par[0]['name']
if isinstance(flag_var, dict):
flag_var = flag_var['name']
if isinstance(iter_var, dict):
iter_var = iter_var['name']
if cls.outputs_in_inputs:
inputs = [recv_var_str]
outputs = [flag_var]
else:
inputs = []
outputs = [flag_var, recv_var_str]
if cls.include_channel_obj:
inputs.insert(0, channel)
lines = cls.write_function_call(
cls.format_function_param('recv_function', channel=channel,
replacement=alt_recv_function),
inputs=inputs, outputs=outputs, include_arg_count=cls.include_arg_count)
if 'not_flag_cond' in cls.function_param:
flag_cond = cls.format_function_param('not_flag_cond',
flag_var=flag_var)
else:
flag_cond = '%s (%s)' % (
cls.function_param['not'],
cls.format_function_param('flag_cond', default='{flag_var}',
flag_var=flag_var))
fail_message = cls.escape_quotes(
"Could not receive %s." % recv_var_str)
if allow_failure:
fail_message = cls.escape_quotes(
'End of input from %s.' % recv_var_str)
if_block = [cls.format_function_param('print', message=fail_message),
cls.function_param.get('break', 'break')]
if iter_var is not None:
if_block = cls.write_if_block(
iter_var,
[cls.format_function_param(
'error', error_msg=cls.escape_quotes(
'No input from %s.' % recv_var_str))],
if_block)
else:
if_block = [cls.format_function_param('error', error_msg=fail_message)]
lines += cls.write_if_block(flag_cond, if_block)
# Check if single element should be expanded
if expanded_recv_var:
# lines.append(cls.format_function_param(
# 'print_generic', object=recv_var_str))
if 'expand_mult' in cls.function_param: # pragma: matlab
lines.append(cls.format_function_param(
'expand_mult', name=expanded_recv_var, value=recv_var_str))
elif 'assign_mult' in cls.function_param:
lines.append(cls.format_function_param(
'assign_mult', name=expanded_recv_var, value=recv_var_str))
else:
lines.append(cls.format_function_param(
'assign', name=expanded_recv_var, value=recv_var_str))
elif len(recv_var_par) == 1:
lines += cls.write_expand_single_element(recv_var_str)
return lines
@classmethod
def write_model_send(cls, channel, send_var, flag_var='flag',
allow_failure=False):
r"""Write a model send call include checking the return flag.
Args:
channel (str): Name of variable that the channel being sent to
was stored in.
send_var (dict, list): Information on one or more variables
containing information that will be sent.
flag_var (str, optional): Name of flag variable that the flag should
be stored in. Defaults to 'flag',
allow_failure (bool, optional): If True, the returned lines will
call a break if the flag is False. Otherwise, the returned
lines will issue an error. Defaults to False.
Returns:
list: Lines required to carry out a send call in this language.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
send_var_str = send_var
if not isinstance(send_var_str, str):
send_var_par = cls.channels2vars(send_var)
send_var_str = cls.prepare_input_variables(
send_var_par, for_yggdrasil=True)
if isinstance(flag_var, dict):
flag_var = flag_var['name']
if cls.include_channel_obj:
send_var_str = [channel, send_var_str]
lines = cls.write_function_call(
cls.format_function_param('send_function', channel=channel),
inputs=send_var_str,
outputs=flag_var, include_arg_count=cls.include_arg_count)
flag_cond = '%s (%s)' % (
cls.function_param['not'],
cls.format_function_param('flag_cond', default='{flag_var}',
flag_var=flag_var))
fail_message = cls.escape_quotes(
"Could not send %s." % send_var_str)
if allow_failure: # pragma: no cover
# This is not particularly useful, but is included for completion
if_block = [cls.format_function_param('print', message=fail_message),
cls.function_param.get('break', 'break')]
else:
if_block = [cls.format_function_param('error', error_msg=fail_message)]
lines += cls.write_if_block(flag_cond, if_block)
return lines
@classmethod
def write_print_var(cls, var, prefix_msg=None):
r"""Get the lines necessary to print a variable in this language.
Args:
var (dict): Variable information.
prefix_msg (str, optional): Message that should be printed
before the variable. Defaults to None and is ignored.
Returns:
list: Lines printing the specified variable.
"""
out = []
print_key = None
varname = var
if isinstance(var, dict):
varname = var['name']
typename = var.get(
'datatype',
{'type': var.get('type', None)}).get('type', None)
if ('print_%s' % typename) in cls.function_param:
print_key = ('print_%s' % typename)
elif 'print_generic' in cls.function_param:
print_key = 'print_generic'
elif 'print_generic' in cls.function_param:
print_key = 'print_generic'
if print_key:
if prefix_msg is not None:
out.append(cls.format_function_param(
'print', message=prefix_msg))
out += [cls.format_function_param(
print_key, object=varname)]
return out
@classmethod
def write_print_input_var(cls, var, **kwargs):
r"""Get the lines necessary to print an input variable in this
language.
Args:
var (dict): Variable information.
**kwargs: Additional keyword arguments are passed to write_print_var.
Returns:
list: Lines printing the specified variable.
"""
return cls.write_print_var(var, **kwargs)
@classmethod
def write_print_output_var(cls, var, in_inputs=False, **kwargs):
r"""Get the lines necessary to print an output variable in this
language.
Args:
var (dict): Variable information.
in_inputs (bool, optional): If True, the output variable
is passed in as an input variable to be populated.
Defaults to False.
**kwargs: Additional keyword arguments are passed to write_print_var.
Returns:
list: Lines printing the specified variable.
"""
return cls.write_print_var(var, **kwargs)
@classmethod
def write_function_def(cls, function_name, inputs=[], outputs=[],
input_var=None, output_var=None,
function_contents=[],
outputs_in_inputs=False,
opening_msg=None, closing_msg=None,
print_inputs=False, print_outputs=False,
skip_interface=False, function_keys=None,
verbose=False, **kwargs):
r"""Write a function definition.
Args:
function_name (str): Name fo the function being defined.
inputs (list, optional): List of inputs to the function.
Defaults to []. Ignored if input_var provided.
outputs (list, optional): List of outputs from the function.
Defaults to []. If not provided, no return call is
added to the function body. Ignored if output_var
provided.
input_var (str, optional): Full string specifying input in
the function definition. If not provided, this will be
created based on the contents of the inputs variable.
output_var (str, optional): Full string specifying output in
the function definition. If not provided, this will be
created based on the contents of the outputs variable.
function_contents (list, optional): List of lines comprising
the body of the function. Defaults to [].
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
opening_msg (str, optional): String that should be printed
before the function contents (and inputs if print_inputs
is True). Defaults to None and is ignored.
closing_msg (str, optional): String that should be printed
after the function contents (and outputs if print_outputs
is True). Defaults to None and is ignored.
print_inputs (bool, optional): If True, the input variables
will be printed before the function contents. Defaults
to False.
print_outputs (bool, optional): If True, the output variables
will be printed after the function contents. Defaults to
False.
skip_interface (bool, optional): If True, the line including
the interface will be skipped. Defaults to False.
function_keys (tuple, optional): 2 element tuple that
specifies the keys for the function_param entries that
should be used to begin & end a function definition.
Defaults to ('function_def_begin', function_def_end').
verbose (bool, optional): If True, the contents of the created file
are displayed. Defaults to False.
**kwargs: Additional keyword arguments are passed to
cls.format_function_param.
Returns:
list: Lines completing the function call.
Raises:
NotImplementedError: If the function_param attribute for the
class is not defined.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
if function_keys is None:
function_keys = ('function_def_begin', 'function_def_end')
out = []
interface_lines = []
if ('interface' in cls.function_param) and (not skip_interface):
ygglib = cls.interface_library
if ygglib in cls.internal_libraries:
ygglib = cls.internal_libraries[ygglib]['source']
interface_lines.append(cls.format_function_param(
'interface', interface_library=ygglib))
if not cls.interface_inside_exec:
out += interface_lines
flag_var = {}
if input_var is None:
input_var = cls.prepare_input_variables(
inputs, in_definition=True)
if output_var is None:
output_var = cls.prepare_output_variables(
outputs, in_inputs=outputs_in_inputs, in_definition=True)
print_input_lines = []
if print_inputs and inputs:
for x in inputs:
print_input_lines += cls.write_print_input_var(
x, prefix_msg=('INPUT[%s]:' % x['name']))
print_output_lines = []
if print_outputs and outputs:
for x in outputs:
print_output_lines += cls.write_print_output_var(
x, prefix_msg=('OUTPUT[%s]:' % x['name']),
in_inputs=outputs_in_inputs)
old_outputs = []
if outputs_in_inputs:
if output_var:
input_var = cls.prepare_input_variables(
[input_var, output_var])
flag_var = kwargs.get('flag_var', 'flag')
if isinstance(flag_var, str):
flag_var = {'name': flag_var}
flag_var.setdefault('datatype', 'flag')
flag_var.setdefault('value', cls.function_param.get(
'true_flag', cls.function_param['true']))
old_outputs = outputs
outputs = [flag_var]
output_var = cls.prepare_output_variables(outputs)
out.append(cls.format_function_param(
function_keys[0], function_name=function_name,
input_var=input_var, output_var=output_var, **kwargs))
if cls.interface_inside_exec:
out += [cls.function_param['indent'] + x
for x in interface_lines]
free_vars = []
if 'declare' in cls.function_param:
definitions = []
if not cls.types_in_funcdef:
for o in (inputs + old_outputs):
out += [cls.function_param['indent'] + x for
x in cls.write_declaration(
o, definitions=definitions,
requires_freeing=free_vars,
is_argument=True)]
for o in outputs:
out += [cls.function_param['indent'] + x for
x in cls.write_declaration(
o, definitions=definitions,
requires_freeing=free_vars)]
out += [cls.function_param['indent'] + x
for x in definitions]
if outputs_in_inputs:
out.append(cls.function_param['indent']
+ cls.format_function_param(
'assign', **flag_var))
if opening_msg:
out.append(cls.function_param['indent']
+ cls.format_function_param(
'print', message=opening_msg))
if print_inputs:
for x in print_input_lines:
out.append(cls.function_param['indent'] + x)
for x in function_contents:
out.append(cls.function_param['indent'] + x)
if print_outputs:
for x in print_output_lines:
out.append(cls.function_param['indent'] + x)
if closing_msg:
out.append(cls.function_param['indent']
+ cls.format_function_param(
'print', message=closing_msg))
# This is not currently used by the tests, but may be
# needed in the future
assert(not free_vars)
# for x in free_vars:
# out += [cls.function_param['indent'] + line
# for line in cls.write_free(x)]
if output_var and ('return' in cls.function_param):
out.append(cls.function_param['indent']
+ cls.format_function_param(
'return', output_var=output_var))
if function_keys[1] in cls.function_param:
out.append(cls.format_function_param(
function_keys[1], function_name=function_name))
else:
out.append(cls.function_param.get('block_end', ''))
if verbose: # pragma: debug
logger.info('\n' + '\n'.join(out))
else:
logger.debug('\n' + '\n'.join(out))
return out
@classmethod
def write_function_call(cls, function_name, inputs=[], outputs=[],
include_arg_count=False,
outputs_in_inputs=False, **kwargs):
r"""Write a function call.
Args:
function_name (str): Name of the function being called.
inputs (list, optional): List of inputs to the function.
Defaults to [].
outputs (list, optional): List of outputs from the function.
Defaults to [].
include_arg_count (bool, optional): If True, the count of input
arguments is included as the first argument. Defaults to
False.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
**kwargs: Additional keyword arguments are passed to
cls.format_function_param.
Returns:
list: Lines completing the function call.
"""
if outputs_in_inputs:
inputs = inputs + [cls.prepare_output_variables(
outputs, in_inputs=outputs_in_inputs)]
flag_var = kwargs.get('flag_var', None)
if (flag_var is None) and ('function_call_noout' not in cls.function_param):
flag_var = 'flag'
outputs = []
if flag_var:
outputs.append(flag_var)
kwargs.setdefault('input_var', cls.prepare_input_variables(inputs))
kwargs.setdefault('output_var', cls.prepare_output_variables(outputs))
nout = len(cls.split_variables(kwargs['output_var']))
if include_arg_count:
narg = len(cls.split_variables(kwargs['input_var']))
kwargs['input_var'] = cls.prepare_input_variables(
[str(narg), kwargs['input_var']])
if (nout == 0) and ('function_call_noout' in cls.function_param):
call_str = cls.format_function_param(
'function_call_noout', function_name=function_name, **kwargs)
else:
call_str = cls.format_function_param(
'function_call', default='{function_name}({input_var})',
function_name=function_name, **kwargs)
if nout == 0:
out = [call_str + cls.function_param.get('line_end', '')]
elif (nout > 1) and ('assign_mult' in cls.function_param):
out = [cls.format_function_param(
'assign_mult', name=kwargs['output_var'], value=call_str)]
else:
out = [cls.format_function_param(
'assign', name=kwargs['output_var'], value=call_str)]
return out
@classmethod
def write_executable_import(cls, model_name=None, **kwargs):
r"""Add import statements to executable lines.
Args:
**kwargs: Keyword arguments for import statement.
Returns:
list: Lines required to complete the import.
"""
# This code is currently unused, but may be needed in the
# future to import a dependency directly
# if ('filename' not in kwargs) and ('import_nofile' in cls.function_param):
# key = 'import_nofile'
# else:
# key = 'import'
# return [cls.format_function_param(key, **kwargs)]
out = []
if 'import' in cls.function_param:
out = [cls.format_function_param('import', **kwargs)]
return out
@classmethod
def write_executable(cls, lines, prefix=None, suffix=None,
function_definitions=None, imports=None,
model_name=None):
r"""Return the lines required to complete a program that will run
the provided lines.
Args:
lines (list): Lines of code to be wrapped as an executable.
prefix (list, optional): Lines of code that should proceed the
wrapped code. Defaults to None and is ignored. (e.g. C/C++
include statements).
suffix (list, optional): Lines of code that should follow the
wrapped code. Defaults to None and is ignored.
function_definitions (list, optional): Lines of code defining
functions that will beused by the code contained in lines.
Defaults to None and is ignored.
imports (list, optional): Kwargs for packages that should
be imported for use by the executable. Defaults to
None and is ignored.
model_name (str, optional): Name given to the model. Defaults to
None.
Returns:
lines: Lines of code wrapping the provided lines with the
necessary code to run it as an executable (e.g. C/C++'s main).
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
# Add imports
if imports is not None:
if not isinstance(imports, list):
imports = [imports]
import_lines = []
for kws in imports:
import_lines += cls.write_executable_import(**kws)
if prefix is None:
prefix = []
prefix += import_lines
# Add standard & user defined prefixes
if ((('exec_prefix' in cls.function_param)
and (cls.function_param['exec_prefix'] not in lines))):
out.append(cls.function_param['exec_prefix'])
out.append('')
if prefix is not None:
if not isinstance(prefix, (list, tuple)):
prefix = [prefix]
out += prefix
out.append('')
if (((not cls.function_param.get('functions_defined_last', False))
and (function_definitions is not None))):
out += function_definitions
out.append('')
# Add code with begin/end book ends
if ((('exec_begin' in cls.function_param)
and (cls.function_param['exec_begin'] not in '\n'.join(lines)))):
out.append(cls.function_param['exec_begin'])
if not isinstance(lines, (list, tuple)):
lines = [lines]
for x in lines:
out.append(cls.function_param['indent'] + x)
out.append(cls.function_param.get('exec_end',
cls.function_param.get(
'block_end', '')))
else:
out += lines
if out[-1]:
out.append('')
# Add standard & user defined suffixes
if suffix is not None:
if not isinstance(suffix, (list, tuple)):
suffix = [suffix]
out += suffix
out.append('')
if ((('exec_suffix' in cls.function_param)
and (cls.function_param['exec_suffix'] not in lines))):
out.append(cls.function_param['exec_suffix'])
out.append('')
if (((cls.function_param.get('functions_defined_last', False))
and (function_definitions is not None))): # pragma: matlab
out += function_definitions
out.append('')
if cls.max_line_width:
new_out = []
for iout in out:
new_out += cls.split_line(iout)
out = new_out
return out
@classmethod
def escape_quotes(cls, x):
r"""Escape quotes in a string.
Args:
x (str): String to escape quotes in.
Returns:
str: x with escaped quotes.
"""
out = x.replace('"', '\\\"')
out = out.replace("'", "\\\'")
return out
@classmethod
def split_line(cls, line, length=None, force_split=False):
r"""Split a line as close to (or before) a given character as
possible.
Args:
line (str): Line to split.
length (int, optional): Maximum length of split lines. Defaults
to cls.max_line_width if not provided.
force_split (bool, optional): If True, force a split to
occur at the specified length. Defauts to False.
Returns:
list: Set of lines resulting from spliting the provided line.
"""
out = []
if not line.lstrip():
return [line]
nindent = line.index(line.lstrip()[0])
block_end = cls.function_param['block_end'].lower()
if '\n' in line:
out = line.split('\n')
for i in range(1, len(out)):
if out[i].lstrip().lower().startswith(block_end):
nindent -= len(cls.function_param['indent'])
out[i] = (nindent * ' ') + out[i]
new_out = []
for x in out:
new_out += cls.split_line(x, length=length,
force_split=force_split)
return new_out
if length is None:
length = cls.max_line_width
if (length is None) or (len(line) < length):
return [line]
length_allow = (length - len(cls.function_param.get(
'continuation_before', '')))
if force_split:
isplit = length_allow
else:
isplit = line[:length_allow].rindex(' ') + 1
if (isplit < nindent + 1) or (isplit >= len(line)):
out = [line]
else:
out.append(line[:isplit] + cls.function_param.get(
'continuation_before', ''))
out += cls.split_line(
((nindent * ' ') + cls.function_param.get(
'continuation_after', '') + line[isplit:]),
length=length, force_split=force_split)
return out
@classmethod
def input2output(cls, var):
r"""Perform conversion necessary to turn a variable extracted from a
function definition from an input to an output.
Args:
var (dict): Variable definition.
Returns:
dict: Updated variable definition.
"""
return var
@classmethod
def output2input(cls, var, in_definition=True):
r"""Perform conversion necessary to turn an output variable
into an corresponding input that can be used to format a
function definition.
Args:
var (dict): Variable definition.
in_definition (bool, optional): If True, the returned
dictionary corresponds to an input variable in a
function definition. If False, the returned value
will correspond to an input to a function. Defaults to
True.
Returns:
dict: Updated variable definition.
"""
return var
@classmethod
def get_native_type(cls, **kwargs):
r"""Get the native type.
Args:
type (str, optional): Name of |yggdrasil| extended JSON
type or JSONSchema dictionary defining a datatype.
**kwargs: Additional keyword arguments may be used in determining
the precise declaration that should be used.
Returns:
str: The native type.
"""
if 'native_type' in kwargs:
return kwargs['native_type']
assert('json_type' not in kwargs)
json_type = kwargs.get('datatype', kwargs)
if isinstance(json_type, dict):
type_name = json_type.get('type', 'bytes')
else:
type_name = json_type
json_type = kwargs
if type_name == 'scalar':
type_name = json_type['subtype']
if (type_name == 'flag') and (type_name not in cls.type_map):
type_name = 'boolean'
return cls.type_map[type_name]
@classmethod
def get_json_type(cls, native_type):
r"""Get the JSON type from the native language type.
Args:
native_type (str): The native language type.
Returns:
str, dict: The JSON type.
"""
return cls.get_inverse_type_map()[native_type]
@classmethod
def write_finalize_iiter(cls, var):
r"""Get the lines necessary to finalize an input array for iteration.
Args:
var (dict, str): Name or information dictionary for the variable
finalized.
Returns:
list: The lines finalizing the variable.
"""
return []
@classmethod
def write_initialize_oiter(cls, var, value=None, requires_freeing=None):
r"""Get the lines necessary to initialize an array for iteration
output.
Args:
var (dict, str): Name or information dictionary for the variable
being initialized.
value (str, optional): Value that should be assigned to the
variable.
requires_freeing (list, optional): Existing list that variables
requiring freeing should be appended to. Defaults to None
and is ignored.
Returns:
list: The lines initializing the variable.
"""
return cls.write_initialize(var, value=value,
requires_freeing=requires_freeing)
@classmethod
def write_finalize_oiter(cls, var, value=None, requires_freeing=None):
r"""Get the lines necessary to finalize an array after iteration.
Args:
var (dict, str): Name or information dictionary for the variable
being initialized.
value (str, optional): Value that should be assigned to the
variable.
requires_freeing (list, optional): Existing list of variables
requiring freeing. Defaults to None and is ignored.
Returns:
list: The lines finalizing the variable.
"""
return []
@classmethod
def write_initialize(cls, var, value=None, requires_freeing=None):
r"""Get the code necessary to initialize a variable.
Args:
var (dict, str): Name or information dictionary for the variable
being declared.
value (str, optional): Value that should be assigned to the
variable after it is declared.
requires_freeing (list, optional): Existing list that variables
requiring freeing should be appended to. Defaults to None
and is ignored.
Returns:
list: The lines initializing the variable.
"""
out = []
if isinstance(var, str): # pragma: no cover
var = {'name': var}
if (value is None) and isinstance(var.get('datatype', False), dict):
init_type = 'init_%s' % var['datatype']['type']
free_type = 'free_%s' % var['datatype']['type']
if init_type in cls.function_param:
assert(free_type in cls.function_param)
# value = cls.format_function_param(init_type, **var['datatype'])
value = cls.function_param[init_type]
if requires_freeing is not None:
requires_freeing.append(var)
if value is not None:
out.append(cls.format_function_param(
'assign', name=var['name'], value=value))
return out
@classmethod
def write_declaration(cls, var, value=None, requires_freeing=None,
definitions=None, is_argument=False):
r"""Return the lines required to declare a variable with a certain
type.
Args:
var (dict, str): Name or information dictionary for the variable
being declared.
value (str, optional): Value that should be assigned to the
variable after it is declared.
requires_freeing (list, optional): Existing list that variables
requiring freeing should be appended to. Defaults to None
and is ignored.
definitions (list, optional): Existing list that variable
definitions should be added to. Defaults to None if not
provided and definitions will be included in the returned
lines.
dont_define (bool, optional): If True, the variable will not
be defined. Defaults to False.
is_argument (bool, optional): If True, the variable being
declared is an input argument. Defaults to False.
Returns:
list: The lines declaring the variable.
"""
if isinstance(var, str): # pragma: no cover
var = {'name': var}
type_name = cls.get_native_type(**var)
out = [cls.format_function_param('declare',
type_name=type_name,
variable=cls.get_name_declare(var))]
if is_argument:
return out
if definitions is None:
definitions = out
definitions += cls.write_initialize(var, value=value,
requires_freeing=requires_freeing)
return out
@classmethod
def get_name_declare(cls, var):
r"""Determine the name that should be used for declaration.
Args:
var (str, dict): Name of variable or dictionary of information.
Returns:
str: Modified name for declaration.
"""
if isinstance(var, str): # pragma: no cover
return var
assert(isinstance(var, dict))
out = var['name']
return out
@classmethod
def write_free(cls, var, **kwargs):
r"""Return the lines required to free a variable with a certain type.
Args:
var (dict, str): Name or information dictionary for the variable
being declared.
**kwargs: Additional keyword arguments are passed to format_function_param.
Returns:
list: The lines freeing the variable.
"""
if isinstance(var, str): # pragma: no cover
var = {'name': var}
out = []
if not var.get('dont_free', False):
if ((isinstance(var.get('datatype', False), dict)
and (('free_%s' % var['datatype']['type'])
in cls.function_param))):
out = [cls.format_function_param(
'free_%s' % var['datatype']['type'],
variable=var['name'], **kwargs)]
else:
out = [cls.format_function_param(
'free', variable=var['name'], **kwargs)]
return out
@classmethod
def write_assign_to_output(cls, dst_var, src_var, copy=False,
outputs_in_inputs=False, **kwargs):
r"""Write lines assigning a value to an output variable.
Args:
dst_var (str, dict): Name or information dictionary for
variable being assigned to.
src_var (str, dict): Name or information dictionary for
value being assigned to dst_var.
copy (bool, optional): If True, the assigned value is copied
during assignment. Defaults to False.
outputs_in_inputs (bool, optional): If True, outputs are passed
as input parameters. In some languages, this means that a
pointer or reference is passed (e.g. C) and so the assignment
should be to the memory indicated rather than the variable.
Defaults to False.
Returns:
list: Lines achieving assignment.
"""
datatype = None
if isinstance(dst_var, dict):
kwargs['name'] = dst_var['name']
datatype = dst_var['datatype']
else:
kwargs['name'] = dst_var
if isinstance(src_var, dict):
kwargs['value'] = src_var['name']
datatype = src_var['datatype']
else:
kwargs['value'] = src_var
if ((outputs_in_inputs and isinstance(dst_var, dict)
and isinstance(dst_var['datatype'], dict)
and ('copy_' + dst_var['datatype']['type']
in cls.function_param))):
copy = True
if copy:
if ((isinstance(datatype, dict)
and ('copy_' + datatype['type'] in cls.function_param))):
return [cls.format_function_param(
'copy_' + datatype['type'], **kwargs)]
else:
return [cls.format_function_param('assign_copy', **kwargs)]
else:
return [cls.format_function_param('assign', **kwargs)]
@classmethod
def write_expand_single_element(cls, output_var, add_cond=False):
r"""Write lines allowing extraction of the only element from a single
element array as a stand-alone variable if the variable is an array
and only has one element.
Args:
output_var (str): Name of the variable that should be conditionally
expanded.
add_cond (list, optional): Additional conditions that must be
satisfied for the array element to be extracted. Defaults to
False and is ignored.
Returns:
list: Lines added the conditional expansion of single element
arrays.
"""
if 'istype' not in cls.function_param:
return []
cond = ('(%s) %s (%s %s 1)' % (
cls.format_function_param('istype',
variable=output_var,
type=cls.type_map['array']),
cls.function_param.get('and', '&&'),
cls.format_function_param('len',
variable=output_var),
cls.function_param.get('equ', '==')))
if add_cond:
for x in add_cond:
cond += f" {cls.function_param.get('and', '&&')} {x}"
out = cls.write_if_block(
cond,
cls.format_function_param(
'assign', name=output_var,
value=cls.format_function_param(
'index', variable=output_var,
index=int(cls.function_param.get('first_index', 0)))))
return out
@classmethod
def split_variables(cls, var_str):
r"""Split variable string include individual variables.
Args:
var_str (str): String containing multiple variables.
Returns:
list: Split variables.
"""
out = []
if var_str:
pairs = [(r'\[', r'\]'),
(r'\(', r'\)'),
(r'\{', r'\}'),
(r"'", r"'"),
(r'"', r'"')]
regex_ele = r''
present = False
for p in pairs:
if not any([(str(ip)[-1] in var_str) for ip in p]):
continue
present = True
regex_ele += (r'(?:%s[.\n]*?%s)|' % p)
if present:
regex_ele += '(?:.+?)'
regex_ele = r'\s*(%s)\s*(?:,|$)' % regex_ele
out = [x.group(1) for x in re.finditer(regex_ele, var_str)]
else:
out = [x.strip() for x in var_str.split(',')]
return out
@classmethod
def prepare_variables(cls, vars_list, in_definition=False,
for_yggdrasil=False):
r"""Concatenate a set of input variables such that it can be passed as a
single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
input/output to/from a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying variables
in a function definition. Defaults to False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
name_list = []
if not isinstance(vars_list, list):
vars_list = [vars_list]
for x in vars_list:
if isinstance(x, str):
name_list.append(x)
else:
assert(isinstance(x, dict))
name_list.append(x['name'])
return ', '.join(name_list)
@classmethod
def prepare_input_variables(cls, vars_list, in_definition=False,
for_yggdrasil=False):
r"""Concatenate a set of input variables such that it can be passed as a
single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
input to a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying input
variables in a function definition. Defaults to False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
return cls.prepare_variables(vars_list, in_definition=in_definition,
for_yggdrasil=for_yggdrasil)
@classmethod
def prepare_output_variables(cls, vars_list, in_definition=False,
in_inputs=False, for_yggdrasil=False):
r"""Concatenate a set of output variables such that it can be passed as
a single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
output from a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying output
variables in a function definition. Defaults to False.
in_inputs (bool, optional): If True, the output variables should
be formated to be included as input variables. Defaults to
False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
if in_inputs:
vars_list = [cls.output2input(x, in_definition=in_definition)
for x in vars_list]
out = cls.prepare_variables(vars_list, in_definition=in_definition,
for_yggdrasil=for_yggdrasil)
if isinstance(vars_list, list) and (len(vars_list) > 1):
if in_definition and ('multiple_outputs_def' in cls.function_param):
out = cls.format_function_param('multiple_outputs_def', outputs=out)
elif 'multiple_outputs' in cls.function_param:
out = cls.format_function_param('multiple_outputs', outputs=out)
return out
@classmethod
def write_if_block(cls, cond, block_contents, else_block_contents=False):
r"""Return the lines required to complete a conditional block.
Args:
cond (str): Conditional that should determine block execution.
block_contents (list): Lines of code that should be executed inside
the block.
else_block_contents (list, optional): Lines of code that should be
executed inside the else clause of the block. Defaults to False
if not provided and an else clause is omitted.
Returns:
list: Lines of code performing conditional execution of a block.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
if not isinstance(cond, list):
cond = [cond]
block_contents = [block_contents]
assert(len(cond) == len(block_contents))
for i, (icond, iblock_contents) in enumerate(zip(cond, block_contents)):
if i == 0:
out.append(cls.format_function_param('if_begin', cond=icond))
else:
out.append(cls.format_function_param('if_elif', cond=icond))
if not isinstance(iblock_contents, (list, tuple)):
iblock_contents = [iblock_contents]
for x in iblock_contents:
out.append(cls.function_param['indent'] + x)
if else_block_contents:
out.append(cls.format_function_param('if_else'))
if not isinstance(else_block_contents, (list, tuple)):
else_block_contents = [else_block_contents]
for x in else_block_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('if_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_for_loop(cls, iter_var, iter_begin, iter_end, loop_contents):
r"""Return the lines required to complete a for loop.
Args:
iter_var (str): Name of variable that iterator should use.
iter_begin (int): Beginning of iteration.
iter_end (int): End of iteration.
loop_contents (list): Lines of code that should be executed inside
the loop.
Returns:
list: Lines of code performing a loop.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
# Opening for statement line
out.append(cls.format_function_param('for_begin', iter_var=iter_var,
iter_begin=iter_begin,
iter_end=iter_end))
# Indent loop contents
if not isinstance(loop_contents, (list, tuple)):
loop_contents = [loop_contents]
for x in loop_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('for_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_while_loop(cls, cond, loop_contents):
r"""Return the lines required to complete a for loop.
Args:
cond (str): Conditional that should determine loop execution.
loop_contents (list): Lines of code that should be executed inside
the loop.
Returns:
list: Lines of code performing a loop.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
# Opening for statement line
out.append(cls.format_function_param('while_begin', cond=cond))
# Indent loop contents
if not isinstance(loop_contents, (list, tuple)):
loop_contents = [loop_contents]
for x in loop_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('while_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_try_except(cls, try_contents, except_contents, error_var='e',
error_type=None):
r"""Return the lines required to complete a try/except block.
Args:
try_contents (list): Lines of code that should be executed inside
the try block.
except_contents (list): Lines of code that should be executed inside
the except block.
error_var (str, optional): Name of variable where the caught error
should be stored. Defaults to 'e'.
error_type (str, optional): Name of error type that should be caught.
If not provided, defaults to None and will be set based on the
class function_param entry for 'try_error_type'.
Returns:
Lines of code perfoming a try/except block.
"""
if (cls.function_param is None) or ('try_begin' not in cls.function_param):
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
if error_type is None:
error_type = cls.function_param.get('try_error_type', None)
out = []
# Try block contents
if not isinstance(try_contents, (list, tuple)):
try_contents = [try_contents]
out.append(cls.function_param['try_begin'])
for x in try_contents:
out.append(cls.function_param['indent'] + x)
# Except block contents
if not isinstance(except_contents, (list, tuple)):
except_contents = [except_contents]
out.append(cls.format_function_param('try_except', error_var=error_var,
error_type=error_type))
for x in except_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('try_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def get_testing_options(cls):
r"""Method to return a dictionary of testing options for this class.
Returns:
dict: Dictionary of variables to use for testing. Key/value pairs:
kwargs (dict): Keyword arguments for driver instance.
deps (list): Dependencies to install.
"""
out = dict(
kwargs={}, deps=[],
write_function_def_params=[
{'inputs': [{'name': 'x', 'value': 1.0,
'datatype': {'type': 'float',
'precision': 32,
'units': 'cm'}}],
'outputs': [{'name': 'y',
'datatype': {'type': 'float',
'precision': 32,
'units': 'cm'}}]}],
split_lines=[('abcdef', {'length': 3, 'force_split': True},
['abc', 'def']),
(' abc', {'length': 3, 'force_split': True},
[' abc'])])
return out
| StarcoderdataPython |
66437 | import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, ClassifierMixin
from suricate.preutils import concatixnames, createmultiindex, addsuffix
# THIS SHOULD BE OBSOLETE
class PipeDfClf(ClassifierMixin):
def __init__(self,
transformer,
classifier,
ixname='ix',
source_suffix='source',
target_suffix='target',
**kwargs):
"""
Args:
transformer (TransformerMixin): Transformer --> CLF
classifier (ClassifierMixin):
ixname (str):
source_suffix (str):
target_suffix (str):
n_jobs (int):
pruning_ths (float): return only the pairs which have a score greater than the store_ths
"""
ClassifierMixin.__init__(self)
self.ixname = ixname
self.source_suffix = source_suffix
self.target_suffix = target_suffix
self.ixnamesource, self.ixnametarget, self.ixnamepairs = concatixnames(
ixname=self.ixname,
source_suffix=self.source_suffix,
target_suffix=self.target_suffix
)
self.fitted = False
self.transformer = transformer
self.classifier = classifier
pass
def fit(self, X, y):
"""
Fit the transformer
Args:
X (list): list of [df_source, df_target]
y (pd.Series): pairs {['ix_source', 'ix_target']: y_true}
Returns:
self
"""
X_score = self.transformer.fit_transform(X=X, y=None)
X_slice, y_slice, ix_slice = self.slice(X=X, X_score=X_score, y=y)
self.classifier.fit(X=pd.DataFrame(X_slice, index=ix_slice), y=pd.Series(y_slice, index=ix_slice))
return self
def slice(self, X, X_score, y=None):
"""
Transform X_score, output of X through the score, into X_slice, sliced according to y_true (pd.Series)
X [df_source, df_target] -[scorer]-> X_score -[reindex]-> X_score.loc[y_true.index]
Into
Args:
X (list) : is a list containing (df_source, df_target)
X_score (np.ndarray): X is a numpy.ndarray which is a cartesian product of df_source and df_target
y (pd.Series/np.ndarray): y is either: /
- pd.Series containing the supervised scores: pairs {['ix_source', 'ix_target']: y_true} which can be a slice of x
- numpy.ndarray of [0,1 ,0 ,1 ...] which must be same length as x
- None --> return X
Returns:
np.ndarray, np.ndarray, pd.Index: Slice of X_score, y, common index
"""
ix_all = createmultiindex(X=X, names=self.ixnamepairs)
X_score = pd.DataFrame(X_score, index=ix_all)
if isinstance(y, pd.Series) or isinstance(y, pd.DataFrame):
commonindex = X_score.index.intersection(y.index)
return X_score.loc[commonindex].values, y.loc[commonindex].values, commonindex
elif y is None:
return X_score.values, None, ix_all
else:
return X_score.values, y, ix_all
def predict(self, X):
X_score = self.transformer.transform(X=X)
return self.classifier.predict(X=X_score)
def fit_predict(self, X, y):
self.fit(X=X, y=y)
y_pred = self.predict(X=X)
return y_pred
def predict_proba(self, X):
X_score = self.transformer.transform(X=X)
return self.classifier.predict_proba(X=X_score)
def score(self, X, y, sampleweight=None):
X_score = self.transformer.transform(X=X)
X_slice, y_slice, ix_slice = self.slice(X=X, X_score=X_score, y=y)
return self.classifier.score(X=X_slice, y=y_slice, sample_weight=sampleweight)
def return_pairs(self, X):
return pd.Series(
index=createmultiindex(X=X, names=self.ixnamepairs),
data=self.predict(X)
)
def show_pairs(self, X, y=None, use_cols=None):
"""
Create a side by side table from a list of pairs (as a DataFrame)
Args:
X
y (pd.DataFrame/pd.Series): of the form {['ix_source', 'ix_target']:['y_true']}
use_cols (list): columns to use
Returns:
pd.DataFrame {['ix_source', 'ix_target'] : ['name_source', 'name_target', .....]}
"""
source = X[0]
target = X[1]
if y is None:
xpairs = pd.DataFrame(index=createmultiindex(X=X, names=self.ixnamepairs))
elif isinstance(y, pd.DataFrame):
xpairs = y.copy()
else:
assert isinstance(y, pd.Series)
xpairs = pd.DataFrame(y.copy())
xpairs = xpairs.reset_index(drop=False)
if use_cols is None or len(use_cols) == 0:
use_cols = source.columns.intersection(target.columns)
xsource = source[use_cols].copy().reset_index(drop=False)
xright = target[use_cols].copy().reset_index(drop=False)
xsource = addsuffix(xsource, self.source_suffix).set_index(self.ixnamesource)
xright = addsuffix(xright, self.target_suffix).set_index(self.ixnametarget)
sbs = xpairs.join(
xsource, on=self.ixnamesource, how='left'
).join(
xright, on=self.ixnametarget, how='left'
).set_index(
self.ixnamepairs
)
return sbs
| StarcoderdataPython |
297071 | from django.apps import AppConfig
class LikesConfig(AppConfig):
name = 'likes'
def ready(self):
super(LikesConfig,self).ready()
from .import signals | StarcoderdataPython |
1636327 | """Top-level package for EWAH Bool Utils."""
__version__ = '0.1.0'
from .ewah_bool_wrap import *
| StarcoderdataPython |
12803381 | <gh_stars>0
from flask_restful import Resource, reqparse, abort
from flask import request
from toko.models.servico_model import ServicoModel
from toko.schemas.servico_schema import ServicoSchema
class ServicoResource(Resource):
parser = reqparse.RequestParser()
parser.add_argument("id_proposta",
type=int,
required=True,
help="O ID da proposta não pode estar em branco."
)
parser.add_argument("titulo",
type=str,
required=True,
help="O titulo de Usuário não pode estar em branco."
)
parser.add_argument('descricao',
type=str,
required=True,
help="A descricao do Servico não pode estar em branco."
)
parser.add_argument('id_cronograma',
type=int,
required=True,
help="O cronograma do Servico não pode estar em branco."
)
def get(self,id):
json = ''
try:
servico = ServicoModel.encontrar_pelo_id(id)
print(servico)
if servico:
schema = ServicoSchema()
json = schema.dump(servico).data
else:
return {"message":"Servico {} não existe".format(id)},404
except Exception as e:
print(e)
return {"message","Erro na requisição".format(id)},500
return json,200
def delete(self,id):
json = []
try:
servico = ServicoModel.encontrar_pelo_id(id)
if servico:
servico.remover()
lista = ServicoModel.listar()
schema = ServicoSchema(many=True,exclude=['listas'])
json = schema.dump(lista).data
else:
return {"message":"Servico {} não está na lista".format(id)},404
except Exception as e:
print(e)
return json, 201
def post(self):
try:
data = ServicoResource.parser.parse_args()
if not data:
return {"message": "Requisição sem JSON"}, 400
if ServicoModel.encontrar_pelo_titulo(data['titulo']):
return {"message": "Usuário ja existe"}, 400
else:
servico = ServicoModel(data['id_proposta'],
data['titulo'],
data['descricao'],
data['id_cronograma'],
)
servico.adicionar()
servico = ServicoModel.encontrar_pelo_titulo(data['titulo'])
user_schema = ServicoSchema()
json = user_schema.dump(servico).data
return json, 201
except Exception as ex:
print(ex)
return {"message": "erro"}, 500
def put(self):
json = ''
try:
data = ServicoResource.parser.parse_args()
id_proposta = data['id_proposta']
titulo = data['titulo']
descricao = data['descricao']
id_cronograma = data['id_cronograma']
servico = ServicoModel.encontrar_pelo_titulo(titulo)
if servico:
return {"message":"Servico {} já está na lista".format(servico.titulo)},200
else:
servico = ServicoModel(
id_proposta=id_proposta,
titulo=titulo,
descricao=descricao,
id_cronograma=id_cronograma
)
servico.adicionar()
schema = ServicoSchema(many=False)
servico = ServicoModel.encontrar_pelo_titulo(titulo)
json = schema.dump(servico).data
except Exception as e:
print(e)
return json, 201
class ServicosResource(Resource):
def get(self):
json = ""
try:
servicos = ServicoModel.listar()
schema = ServicoSchema(many=True)
json = schema.dump(servicos).data
except Exception as e:
print(e)
return {"message": "Aconteceu um erro tentando retornar a lista de servicos."}, 500
return json, 200
| StarcoderdataPython |
3522895 | import unittest
from double_index_max_subsequence import double_index_max_subsequence
class Test_Case_Double_Index_Max_Subsequence(unittest.TestCase):
def test_double_index_max_subsequence(self):
self.assertEqual("[(56, 90), (60, 95), (65, 100), (68, 110), (70, 150), (75, 190)]", str(double_index_max_subsequence([(65, 100), (70, 150), (56, 90), (75, 190), (60, 95), (68, 110)]))) | StarcoderdataPython |
5090292 | # -*- coding: utf-8 -*-
import logging
from typing import NoReturn, Text
import coloredlogs
from settings.handlers import (BaseFileHandler, BaseStreamHandler,
ContextHandler)
from tools.os import OS
# ==============================================================================
# CLASS
# ==============================================================================
class SingletonLogger(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(
SingletonLogger, cls
).__call__(*args, **kwargs)
return cls._instances[cls]
class Log(OS, metaclass=SingletonLogger):
LEVELS = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]
def __init__(self, log_path: Text,
log_file: Text,
log_level: Text,
logger_name: Text) -> NoReturn:
self._log_path = log_path
self._log_file = self.join_directory_with_file(self.log_path, log_file)
self._log_level = log_level if log_level in self.LEVELS else "DEBUG"
self._logger_name = logger_name
self.formatter = "%(levelname)s - %(asctime)s - %(message)s - %(pathname)s - %(funcName)s"
self.check_if_path_and_file_exist(self._log_path, self._log_file)
self._logger = logging.getLogger(self.logger_name)
self._logger.setLevel(self.log_level)
self._base_configuration_log_colored()
self._logger.addHandler(ContextHandler(
BaseFileHandler()
).get_handler(
log_file=self.log_file,
log_level=self.log_level,
formatter=self.formatter)
)
def _base_configuration_log_colored(self) -> coloredlogs.install:
coloredlogs.install(level=self._log_level,
logger=self.logger,
fmt=self.formatter,
milliseconds=True)
@property
def log_path(self) -> Text:
return self._log_path
@property
def log_file(self) -> Text:
return self._log_file
@property
def log_level(self) -> Text:
return self._log_level
@property
def logger_name(self) -> Text:
return self._logger_name
@property
def logger(self) -> Text:
return self._logger
| StarcoderdataPython |
9781127 | """ Loading Raw data """
import logging
from dataclasses import dataclass
from numbers import Number
from pathlib import Path
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
import xarray as xr
import nowcasting_dataset.filesystem.utils as nd_fs_utils
from nowcasting_dataset.data_sources.data_source import DataSource
from nowcasting_dataset.data_sources.metadata.metadata_model import SpaceTimeLocation
from nowcasting_dataset.data_sources.sun.raw_data_load_save import load_from_zarr, x_y_to_name
from nowcasting_dataset.data_sources.sun.sun_model import Sun
from nowcasting_dataset.geospatial import calculate_azimuth_and_elevation_angle
logger = logging.getLogger(__name__)
@dataclass
class SunDataSource(DataSource):
"""Add azimuth and elevation angles of the sun."""
zarr_path: Union[str, Path]
def __post_init__(self):
"""Post Init"""
super().__post_init__()
self._load()
@staticmethod
def get_data_model_for_batch():
"""Get the model that is used in the batch"""
return Sun
def check_input_paths_exist(self) -> None:
"""Check input paths exist. If not, raise a FileNotFoundError."""
nd_fs_utils.check_path_exists(self.zarr_path)
def get_example(self, location: SpaceTimeLocation) -> xr.Dataset:
"""
Get example data from t0_dt and x and y xoordinates
Args:
location: A location object of the example which contains
- a timestamp of the example (t0_datetime_utc),
- the x center location of the example (x_location_osgb)
- the y center location of the example(y_location_osgb)
Returns: Dictionary of azimuth and elevation data
"""
# all sun data is from 2019, analaysis showed over the timescale we are interested in the
# elevation and azimuth angles change by < 1 degree, so to save data, we just use data
# from 2019.
t0_datetime_utc = location.t0_datetime_utc
x_center_osgb = location.x_center_osgb
y_center_osgb = location.y_center_osgb
t0_datetime_utc = t0_datetime_utc.replace(year=2019)
start_dt = self._get_start_dt(t0_datetime_utc)
end_dt = self._get_end_dt(t0_datetime_utc)
# The names of the columns get truncated when saving, therefore we need to look for the
# name of the columns near the location we are looking for
locations = np.array(
[[float(z.split(",")[0]), float(z.split(",")[1])] for z in self.azimuth.columns]
)
location = locations[
np.isclose(locations[:, 0], x_center_osgb) & np.isclose(locations[:, 1], y_center_osgb)
]
# lets make sure there is atleast one
assert len(location) > 0
# Take the first location, and x and y coordinates are the first and center entries in
# this array.
location = location[0]
# make name of column to pull data from. The columns name will be about
# something like '22222.555,3333.6666'
name = x_y_to_name(x=location[0], y=location[1])
del x_center_osgb, y_center_osgb
azimuth = self.azimuth.loc[start_dt:end_dt][name]
elevation = self.elevation.loc[start_dt:end_dt][name]
azimuth = azimuth.to_xarray().rename({"index": "time"})
elevation = elevation.to_xarray().rename({"index": "time"})
sun = azimuth.to_dataset(name="azimuth")
sun["elevation"] = elevation
return sun
def _load(self):
logger.info(f"Loading Sun data from {self.zarr_path}")
self.azimuth, self.elevation = load_from_zarr(zarr_path=self.zarr_path)
def get_locations(
self, t0_datetimes_utc: pd.DatetimeIndex
) -> Tuple[List[Number], List[Number]]:
"""Sun data should not be used to get batch locations"""
raise NotImplementedError("Sun data should not be used to get batch locations")
def datetime_index(self) -> pd.DatetimeIndex:
"""Get datetimes where elevation >= 10"""
# get the lat and lon from london
latitude = 51
longitude = 0
# get elevation for all datetimes
azimuth_elevation = calculate_azimuth_and_elevation_angle(
latitude=latitude, longitude=longitude, datestamps=self.elevation.index
)
# only select elevations > 10
mask = azimuth_elevation["elevation"] >= 10
# create warnings, so we know how many datetimes will be dropped.
# Should be slightly more than half as its night time 50% of the time
n_dropping = len(azimuth_elevation) - sum(mask)
logger.debug(
f"Will be dropping {n_dropping} datetimes "
f"out of {len(azimuth_elevation)} as elevation is < 10"
)
datetimes = self.elevation[mask].index
# Sun data is only for 2019, so to expand on these by
# repeating data from 2014 to 2023
all_datetimes = pd.DatetimeIndex([])
for delta_years in range(-5, 5, 1):
on_year = datetimes + pd.offsets.DateOffset(months=12 * delta_years)
all_datetimes = all_datetimes.append(on_year)
return all_datetimes
| StarcoderdataPython |
1819358 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from tqdm import trange
from scipy.io import savemat, loadmat
from scipy.stats import norm
import matplotlib.pyplot as plt
from sklearn.isotonic import IsotonicRegression
from datetime import datetime
from src.modeling.utils.TensorStandardScaler import TensorStandardScaler
from src.modeling.layers.FC_v2 import FC
from src.modeling.layers.RecalibrationLayer import RecalibrationLayer
from src.misc.DotmapUtils import *
import math
class BNN_trainer:
def __init__(self, args, model):
self.args = args
self.epochs = args.epochs
self.batch_size = args.batch_size
self.num_nets = args.ensemble_size
self.model = model
# Training objects
self.optimizer = tf.keras.optimizers.Adam()
self.mse_loss = None
# Prediction objects
self.sy_pred_mean, self.sy_pred_var = (
None,
None,
)
self.cal_optimizer = tf.keras.optimizers.Adam()
self.cal_loss = None
# TODO: saving and loading model
@tf.function
def compute_losses(self, targets, mean, log_var, incl_var_loss=True):
inv_var = tf.math.exp(-log_var)
if incl_var_loss:
mse_losses = tf.math.reduce_mean(
tf.math.reduce_mean(tf.math.square(mean - targets) * inv_var, axis=-1),
axis=-1,
)
var_losses = tf.math.reduce_mean(
tf.math.reduce_mean(log_var, axis=-1), axis=-1
)
total_losses = mse_losses + var_losses
else:
total_losses = tf.math.reduce_mean(
tf.reduce_mean(tf.math.square(mean - targets), axis=-1), axis=-1
)
return total_losses
@tf.function
def train_step(self, inputs, targets):
inputs = tf.cast(inputs, dtype=tf.float32)
targets = tf.cast(targets, dtype=tf.float32)
with tf.name_scope("train_step"):
with tf.GradientTape() as tape:
mean, log_var = self.model(inputs, ret_log_var=True)
train_loss = tf.math.reduce_sum(
self.compute_losses(targets, mean, log_var, True)
)
# train_loss+= #TODO: Add Decays to the Loss Function
train_loss += 0.01 * tf.math.reduce_sum(
self.model.max_logvar
) - 0.01 * tf.math.reduce_sum(self.model.min_logvar)
grads = tape.gradient(train_loss, self.model.trainable_variables)
self.optimizer.apply_gradients(
grads_and_vars=zip(grads, self.model.trainable_variables),
name="gradient_application_train_step",
)
mse_loss = self.compute_losses(targets, mean, log_var, False)
return train_loss, mse_loss
# TODO: epochs and batch_size
def train(
self, inputs, targets, hide_progress=False, holdout_ratio=0.2, max_logging=1000
):
def shuffle_rows(arr):
idxs = np.argsort(np.random.uniform(size=arr.shape), axis=-1)
return arr[np.arange(arr.shape[0])[:, None], idxs]
# Split into training and holdout sets
num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)
permutation = np.random.permutation(inputs.shape[0])
inputs, holdout_inputs = (
inputs[permutation[num_holdout:]],
inputs[permutation[:num_holdout]],
)
targets, holdout_targets = (
targets[permutation[num_holdout:]],
targets[permutation[:num_holdout]],
)
holdout_inputs = np.tile(holdout_inputs[None], [self.num_nets, 1, 1])
holdout_targets = np.tile(holdout_targets[None], [self.num_nets, 1, 1])
idxs = np.random.randint(inputs.shape[0], size=[self.num_nets, inputs.shape[0]])
self.model.scaler.fit(inputs)
if hide_progress:
epoch_range = range(self.epochs)
else:
epoch_range = trange(self.epochs, unit="epoch(s)", desc="Network training")
for epoch in epoch_range:
for batch_num in range(int(np.ceil(idxs.shape[-1] / self.batch_size))):
batch_idxs = idxs[
:, batch_num * self.batch_size : (batch_num + 1) * self.batch_size
]
# Call train step
train_loss, mse_loss = self.train_step(
inputs[batch_idxs], targets[batch_idxs]
)
idxs = shuffle_rows(idxs)
# TODO: holdout loss
if not hide_progress:
if holdout_ratio < 1e-12:
epoch_range.set_postfix({"Training loss(es)": mse_loss})
else:
epoch_range.set_postfix(
{"Training loss(es)": mse_loss, "Holdout loss(es)": mse_loss}
)
def create_prediction_tensors(self, inputs, factored=False):
factored_mean, factored_variance = self.model(inputs)
if len(inputs.shape) == 2 and not factored:
mean = tf.math.reduce_mean(factored_mean, axis=0)
variance = tf.math.reduce_mean(
tf.math.square(factored_mean - mean), axis=0
) + tf.math.reduce_mean(factored_variance, axis=0)
return mean, variance
return factored_mean, factored_variance
def predict(self, inputs, factored=False):
with tf.name_scope("create_predict_tensors"):
self.sy_pred_mean, self.sy_pred_var = self.create_prediction_tensors(
inputs, factored
)
@tf.function
def cal_step(self, inputs, targets):
with tf.name_scope("cal_step"):
with tf.GradientTape() as tape:
cdf_pred = self.model.recalibrator(inputs)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
labels=targets, logits=cdf_pred
)
self.cal_loss = tf.math.reduce_mean(
tf.math.reduce_mean(cross_entropy, axis=-1), axis=-1
)
grads = tape.gradient(self.cal_loss, self.model.cal_vars)
self.cal_optimizer.apply_gradients(
grads_and_vars=zip(grads, self.model.cal_vars), name="cal_step"
)
return self.cal_loss
def calibrate(
self, inputs, targets, hide_progress=False, holdout_ratio=0.0, max_logging=5000
):
inputs, targets = tf.cast(inputs, dtype=tf.float32), tf.cast(
targets, dtype=tf.float32
)
self.model.scaler.fit(inputs)
self.predict(inputs)
all_ys = targets
train_x = np.zeros_like(all_ys)
train_y = np.zeros_like(all_ys)
for d in range(self.sy_pred_mean.shape[1]):
mu = self.sy_pred_mean[:, d]
var = self.sy_pred_var[:, d]
ys = all_ys[:, d]
cdf_pred = norm.cdf(ys, loc=mu, scale=tf.math.sqrt(var))
cdf_true = np.array(
[np.sum(cdf_pred < p) / len(cdf_pred) for p in cdf_pred]
)
train_x[:, d] = cdf_pred
train_y[:, d] = cdf_true
if hide_progress:
epoch_range = range(self.epochs)
else:
epoch_range = trange(
self.epochs, unit="epoch(s)", desc="Calibration training"
)
def iterate_minibatches(inp, targs, batchsize, shuffle=True):
assert inp.shape[0] == targs.shape[0]
indices = np.arange(inp.shape[0])
if shuffle:
np.random.shuffle(indices)
last_idx = 0
for curr_idx in range(
0, inp.shape[0] - self.batch_size + 1, self.batch_size
):
curr_batch = indices[curr_idx : curr_idx + self.batch_size]
last_idx = curr_idx + self.batch_size
yield inp[curr_batch], targs[curr_batch]
if inp.shape[0] % self.batch_size != 0:
last_batch = indices[last_idx:]
yield inp[last_batch], targs[last_batch]
for _ in epoch_range:
for x_batch, y_batch in iterate_minibatches(
train_x, train_y, self.batch_size
):
self.cal_loss = self.cal_step(x_batch, y_batch)
if not hide_progress:
epoch_range.set_postfix({"Training loss(es)": self.cal_loss})
# if __name__ == "__main__":
# from dotmap import DotMap
# NUM_SAMPLES = 1024
# IN_DIM = 100
# HIDDEN_DIM = 10
# OUT_DIM = 2
# model_config = [
# DotMap(
# {
# "layer_name": "FC",
# "input_dim": 32,
# "output_dim": 32,
# "activation": "swish",
# "weight_decay": 0.05,
# "ensemble_size": 1,
# }
# ),
# DotMap(
# {
# "layer_name": "FC",
# "input_dim": 32,
# "output_dim": 4,
# "activation": "swish",
# "weight_decay": 0.05,
# "ensemble_size": 1,
# }
# ),
# ]
# model = BNN(DotMap(name="test"), model_config)
# a = tf.random.uniform(shape=(32, 32))
# print(model(a)[0]) | StarcoderdataPython |
4842219 | import shutil
import tempfile
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.conf import settings
from ..forms import PostForm
from ..models import Post, Group, Comment
User = get_user_model()
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class PostFormTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.form = PostForm()
cls.user = User.objects.create_user(username='Username')
cls.user2 = User.objects.create_user(username='Name')
cls.post = Post.objects.create(
author=cls.user,
text='Тестовый текст',
)
cls.group1 = Group.objects.create(
title='Тестовое название',
slug='test-slug',
description='Тестовое описание',
)
cls.group2 = Group.objects.create(
title='Тестовое название 2',
slug='test-slug-2',
description='Тестовое описание 2',
)
cls.comment = Comment.objects.create(
post=cls.post,
author=cls.user,
text='Тестовый комментарий'
)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def setUp(self):
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
def test_create_post_form(self):
"""Валидная форма создает запись в Post."""
posts_count = Post.objects.count()
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
form_data = {
'text': 'Тестовый текст',
'author': self.user,
'group': self.group1.id,
'image': uploaded
}
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_data,
follow=True
)
self.assertRedirects(
response, reverse(
'posts:profile', kwargs={'username': self.user}
)
)
self.assertEqual(Post.objects.count(), posts_count + 1)
def test_edit_post_form(self):
"""Валидная форма редактирует запись в Post."""
form_data = {
'text': 'Измененный текст',
'group': self.group2.id
}
response = self.authorized_client.post(
reverse('posts:post_edit', kwargs={'post_id': self.post.id}),
data=form_data,
follow=True
)
self.assertRedirects(
response, reverse(
'posts:post_detail', kwargs={'post_id': self.post.id}
)
)
self.assertTrue(Post.objects.filter(
text='Измененный текст',
group=self.group2
).exists())
def test_comment_form(self):
"""Валидная форма создает комментарий."""
comment_count = Comment.objects.count()
form_data = {
'post': self.post,
'author': self.authorized_client,
'text': 'Тестовый комментарий',
}
response = self.authorized_client.post(
reverse('posts:add_comment', kwargs={'post_id': self.post.id}),
data=form_data,
follow=True
)
self.assertRedirects(
response, reverse(
'posts:post_detail', kwargs={'post_id': self.post.id}
)
)
self.assertTrue(Comment.objects.filter(
post=self.post,
author=self.user,
text='Тестовый комментарий'
).exists())
self.assertEqual(Comment.objects.count(), comment_count + 1)
| StarcoderdataPython |
8104975 | """
This is test file for node_holder.py
"""
import unittest
import node_holder
class TestNodeHolder(unittest.TestCase):
"""Unit test for functions in node_holder.py"""
def test_fetch_change_id_postive_test(self):
"""positive unit test for fetch_change_id func"""
url = ('https://review.rdoproject.org/r/c/testproject/'
'+/28446/63/.zuul.yaml')
self.assertEqual(node_holder.fetch_change_id(url), '28446')
def test_fetch_change_id_negative_test(self):
"""negative unit test for fetch_change_id func"""
with self.assertRaises(AttributeError):
node_holder.fetch_change_id(
'https://review.rdoproject.org/r/c/testproject/+/')
def test_fetch_patchset_number_postive_test(self):
"""positive unit test for fetch_patchset_number func"""
url = ('https://review.rdoproject.org/r/c/testproject'
'/+/28446/63/.zuul.yaml')
self.assertEqual(
node_holder.fetch_patchset_number(url), '63')
def test_fetch_patchset_number_negative_test(self):
"""negative unit test for fetch_patchset_number func"""
with self.assertRaises(IndexError):
node_holder.fetch_patchset_number(
'https://review.rdoproject.org/r/c/testproject/+/')
def test_fetch_project_name_positive_test(self):
"""positive unit test for fetch_project_name func"""
url = ('https://review.rdoproject.org/r/c/testproject'
'/+/28446/63/.zuul.yaml')
self.assertEqual(node_holder.fetch_project_name(url), 'testproject')
def test_fetch_project_name_positive_test_something_before_project(self):
"""unit tests for fetch_project_name func"""
self.assertEqual(node_holder.fetch_project_name(
'https://review.opendev.org/c/openstack/tripleo-ci/+/706288/'),
'tripleo-ci')
self.assertEqual(node_holder.fetch_project_name(
'https://review.rdoproject.org/r/c/rdo-infra/ci-config/+/36911'),
'ci-config')
def test_fetch_project_name_negative_test(self):
"""Negative unit tests for fetch_project_name func"""
with self.assertRaises(IndexError):
node_holder.fetch_project_name(
'https://review.rdoproject.org/r/c/')
def test_gerrit_check_postive(self):
"""Positive unit tests for gerrit_check func"""
url = ('https://review.rdoproject.org/r/c/testproject'
'/+/28446/63/.zuul.yaml')
self.assertEqual(node_holder.gerrit_check(url), 'rdo')
self.assertEqual(node_holder.gerrit_check(
'https://review.opendev.org/c/openstack/tripleo-ci/+/706288/'),
'upstream')
def test_gerrit_check_negative(self):
"""Negative unit tests for gerrit_check func"""
with self.assertRaises(Exception):
node_holder.gerrit_check('https://google.com')
def test_fetch_file_name_postive_test(self):
"""Positive unit tests for fetch_file_name func"""
url = ('https://review.rdoproject.org/r/c/testproject'
'/+/28446/63/.zuul.yaml')
url2 = ('https://review.rdoproject.org/r/c/rdo-jobs/+/'
'37139/1/zuul.d/projects.yaml')
self.assertEqual(node_holder.fetch_file_name(url), '.zuul.yaml')
self.assertEqual(node_holder.fetch_file_name(
url2), 'zuul.d%2Fprojects.yaml')
def test_fetch_file_name_negative_test(self):
"""Negative unit tests for fetch_file_name func"""
with self.assertRaises(Exception):
node_holder.fetch_file_name(
'https://review.rdoproject.org/r/c/testproject/')
def test_convert_patch_url_to_download_url_postive_test(self):
"""Positive unit tests for convert_patch_url_to_download_url func"""
url = ('https://review.rdoproject.org/r/c/testproject'
'/+/28446/63/.zuul.yaml')
expected = ("https://review.rdoproject.org/r/changes/testproject~"
"28446/revisions/63/files/.zuul.yaml/download")
self.assertEqual(node_holder.convert_patch_url_to_download_url(
url, '28446', 'testproject', '63', '.zuul.yaml'),
expected)
def test_convert_patch_url_to_download_url_n_test_c_not_present(self):
"""Negative unit tests for convert_patch_url_to_download_url func"""
url1 = ('https://review.rdoproject.org/r/testproject'
'/+/28446/63/.zuul.yaml')
with self.assertRaises(Exception):
node_holder.convert_patch_url_to_download_url(
url1, '28446', 'testproject', '63', '.zuul.yaml')
| StarcoderdataPython |
11266256 | """ Client classes for individual A/V Receivers to be used by the PolyGlot node classes """
from .av_device import AvDevice
from .pioneer_vsx1021_device import PioneerVSX1021Device
from .sony_bravia_xbr_65x810c_device import SonyBraviaXBR65X810CDevice
| StarcoderdataPython |
9673288 | <filename>Testing/Python/AnalogOffsetRemoverTest.py
import btk
import unittest
import _TDDConfigure
import numpy
class AnalogOffsetRemoverTest(unittest.TestCase):
def test_Test0(self):
acq = btk.btkAcquisition()
acq.Init(0,25,1,1)
data = numpy.empty([25,1]);
data.fill(5.0)
acq.GetAnalog(0).SetValues(data)
remover = btk.btkAnalogOffsetRemover()
remover.SetRawInput(acq)
remover.SetOffsetInput(acq)
remover.Update()
output = remover.GetOutput()
self.assertEqual(output.GetAnalogNumber(), 1)
self.assertEqual(output.GetAnalog(0).GetValues().sum(), 0.0)
def test_Test1234(self):
acq = btk.btkAcquisition()
acq.Init(0,25,4,1)
data = numpy.empty([25,1]);
data.fill(5.0)
acq.GetAnalog(0).SetValues(data)
data.fill(4.0)
acq.GetAnalog(1).SetValues(data)
data.fill(3.0)
acq.GetAnalog(2).SetValues(data)
data.fill(2.0)
acq.GetAnalog(3).SetValues(data)
acq2 = btk.btkAcquisition()
acq2.Init(0,25,4,1)
data.fill(1.0)
acq2.GetAnalog(0).SetValues(data)
data.fill(2.0)
acq2.GetAnalog(1).SetValues(data)
data.fill(3.0)
acq2.GetAnalog(2).SetValues(data)
data.fill(4.0)
acq2.GetAnalog(3).SetValues(data)
remover = btk.btkAnalogOffsetRemover()
remover.SetRawInput(acq)
remover.SetOffsetInput(acq2)
remover.Update()
output = remover.GetOutput()
self.assertEqual(output.GetAnalogNumber(), 4)
self.assertEqual(output.GetAnalog(0).GetValues().sum() / 25.0, 4.0)
self.assertEqual(output.GetAnalog(1).GetValues().sum() / 25.0, 2.0)
self.assertEqual(output.GetAnalog(2).GetValues().sum() / 25.0, 0.0)
self.assertEqual(output.GetAnalog(3).GetValues().sum() / 25.0, -2.0)
def test_Test3Over4(self):
acq = btk.btkAcquisition()
acq.Init(0,25,4,1)
data = numpy.empty([25,1]);
data.fill(5.0)
acq.GetAnalog(0).SetValues(data)
data.fill(4.0)
acq.GetAnalog(1).SetValues(data)
data.fill(3.0)
acq.GetAnalog(2).SetValues(data)
data.fill(2.0)
acq.GetAnalog(3).SetValues(data)
acq2 = btk.btkAcquisition()
acq2.Init(0,25,4,1)
data.fill(1.0)
acq2.GetAnalog(0).SetValues(data)
data.fill(2.0)
acq2.GetAnalog(1).SetValues(data)
data.fill(3.0)
acq2.GetAnalog(2).SetValues(data)
remover = btk.btkAnalogOffsetRemover()
remover.SetRawInput(acq)
remover.SetOffsetInput(acq2)
remover.Update()
output = remover.GetOutput()
self.assertEqual(output.GetAnalogNumber(), 4)
self.assertEqual(output.GetAnalog(0).GetValues().sum() / 25.0, 4.0)
self.assertEqual(output.GetAnalog(1).GetValues().sum() / 25.0, 2.0)
self.assertEqual(output.GetAnalog(2).GetValues().sum() / 25.0, 0.0)
self.assertEqual(output.GetAnalog(3).GetValues().sum() / 25.0, 2.0)
def test_Test4Over3(self):
acq = btk.btkAcquisition()
acq.Init(0,25,3,1)
data = numpy.empty([25,1]);
data.fill(5.0)
acq.GetAnalog(0).SetValues(data)
data.fill(4.0)
acq.GetAnalog(1).SetValues(data)
data.fill(3.0)
acq.GetAnalog(2).SetValues(data)
acq2 = btk.btkAcquisition()
acq2.Init(0,25,4,1)
data.fill(1.0)
acq2.GetAnalog(0).SetValues(data)
data.fill(2.0)
acq2.GetAnalog(1).SetValues(data)
data.fill(3.0)
acq2.GetAnalog(2).SetValues(data)
data.fill(4.0)
acq2.GetAnalog(3).SetValues(data)
remover = btk.btkAnalogOffsetRemover()
remover.SetRawInput(acq)
remover.SetOffsetInput(acq2)
remover.Update()
output = remover.GetOutput()
self.assertEqual(output.GetAnalogNumber(), 3)
self.assertEqual(output.GetAnalog(0).GetValues().sum() / 25.0, 4.0)
self.assertEqual(output.GetAnalog(1).GetValues().sum() / 25.0, 2.0)
self.assertEqual(output.GetAnalog(2).GetValues().sum() / 25.0, 0.0)
def test_TestNoCommonLabel(self):
acq = btk.btkAcquisition()
acq.Init(0,25,3,1)
acq.GetAnalog(0).SetLabel("FOO")
acq.GetAnalog(1).SetLabel("BAR")
acq.GetAnalog(2).SetLabel("FOOBAR")
acq2 = btk.btkAcquisition()
acq2.Init(0,25,3,1)
data = numpy.empty([25,1]);
data.fill(1.0)
acq2.GetAnalog(0).SetValues(data)
data.fill(2.0)
acq2.GetAnalog(1).SetValues(data)
data.fill(3.0)
acq2.GetAnalog(2).SetValues(data)
remover = btk.btkAnalogOffsetRemover()
remover.SetRawInput(acq)
remover.SetOffsetInput(acq2)
remover.Update()
output = remover.GetOutput()
self.assertEqual(output.GetAnalogNumber(), 3)
self.assertEqual(output.GetAnalog(0).GetValues().sum(), 0.0)
self.assertEqual(output.GetAnalog(1).GetValues().sum(), 0.0)
self.assertEqual(output.GetAnalog(2).GetValues().sum(), 0.0)
def test_TestOneCommonLabel(self):
acq = btk.btkAcquisition()
acq.Init(0,25,3,1)
acq.GetAnalog(0).SetLabel("FOO")
acq.GetAnalog(1).SetLabel("BAR")
acq.GetAnalog(2).SetLabel("FOOBAR")
acq2 = btk.btkAcquisition()
acq2.Init(0,25,3,1)
data = numpy.empty([25,1]);
data.fill(1.0)
acq2.GetAnalog(0).SetValues(data)
data.fill(2.0)
acq2.GetAnalog(1).SetValues(data)
data.fill(3.0)
acq2.GetAnalog(2).SetValues(data)
acq2.GetAnalog(2).SetLabel("FOO")
remover = btk.btkAnalogOffsetRemover()
remover.SetRawInput(acq)
remover.SetOffsetInput(acq2)
remover.Update()
output = remover.GetOutput()
self.assertEqual(output.GetAnalogNumber(), 3)
self.assertEqual(output.GetAnalog(0).GetValues().sum() / 25.0, -3.0)
self.assertEqual(output.GetAnalog(1).GetValues().sum() / 25.0, 0.0)
self.assertEqual(output.GetAnalog(2).GetValues().sum() / 25.0, 0.0)
| StarcoderdataPython |
9656957 | # Copyright 2021 Foundries.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aiohttp
import asyncio
import contextlib
import json
import logging
import signal
import subprocess
import sys
import time
from asgiref.sync import sync_to_async
from django.core.management.base import BaseCommand
from django.db.models import Field
from django.db.utils import OperationalError
from conductor.core.models import LAVABackend
from conductor.core.tasks import process_testjob_notification, process_device_notification
logger = logging.getLogger()
async def listener_main(backend):
event = asyncio.Event()
await asyncio.gather(
listen_for_events(event, backend)
)
async def listen_for_events(event: asyncio.Event, backend) -> None:
logger.info("Starting event listener")
while True:
with contextlib.suppress(aiohttp.ClientError):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(backend.websocket_url) as ws:
logger.info("Session connected")
async for msg in ws:
if msg.type != aiohttp.WSMsgType.TEXT:
continue
try:
data = json.loads(msg.data)
logger.info(data)
(topic, _, dt, username, data) = data
data = json.loads(data)
if topic.endswith(".testjob"):
logger.info(f"dispatching testjob {data['job']}")
#await sync_to_async(process_testjob_notification.delay, thread_sensitive=True)(data)
process_testjob_notification.delay(data)
if topic.endswith(".device"):
await sync_to_async(process_device_notification.delay, thread_sensitive=True)(data)
except ValueError:
logger.error("Invalid message: %s", msg)
continue
await asyncio.sleep(1)
class Listener(object):
def __init__(self, backend):
self.backend = backend
def run(self):
backend = self.backend
if not backend.websocket_url:
logger.info("Websocket URL missing. Exiting")
sys.exit()
logger.info("Backend %s starting" % backend.name)
signal.signal(signal.SIGINT, self.stop)
signal.signal(signal.SIGTERM, self.stop)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(listener_main(backend))
loop.close()
logger.info("Backend %s exited on its own" % backend.name)
def stop(self, signal, stack_frame):
logger.info("Backend %s finishing ..." % self.backend.name)
sys.exit()
class ListenerManager(object):
def __init__(self):
self.__processes__ = {}
def run(self):
self.setup_signals()
self.wait_for_setup()
self.loop()
self.cleanup()
def setup_signals(self):
# make SIGTERM equivalent to SIGINT (e.g. control-c)
signal.signal(signal.SIGTERM, signal.getsignal(signal.SIGINT))
def wait_for_setup(self):
n = 0
while n < 24: # wait up to 2 min
try:
LAVABackend.objects.count()
logger.info("listener manager started")
return
except OperationalError:
logger.info("Waiting to database to be up; will retry in 5s ...")
time.sleep(5)
n += 1
logger.error("Timed out waiting for database to be up")
sys.exit(1)
def keep_listeners_running(self):
ids = list(self.__processes__.keys())
for backend in LAVABackend.objects.all():
process = self.__processes__.get(backend.id)
if not process:
self.start(backend)
if backend.id in ids:
ids.remove(backend.id)
# remaining backends were removed from the database, stop them
for backend_id in ids:
self.stop(backend_id)
def start(self, backend):
argv = [sys.executable, '-m', 'conductor.manage', 'lava_listener', backend.name]
listener = subprocess.Popen(argv)
self.__processes__[backend.id] = listener
def loop(self):
try:
while True:
self.keep_listeners_running()
# FIXME: ideally we should have a blocking call here that waits
# for a change to happen in the database, but we didn't find a
# simple/portable way of doing that yet. Let's just sleep for a
# few seconds instead, for now.
time.sleep(60)
except KeyboardInterrupt:
pass # cleanup() will terminate sub-processes
def cleanup(self):
for backend_id in list(self.__processes__.keys()):
self.stop(backend_id)
def stop(self, backend_id):
process = self.__processes__[backend_id]
if not process.poll():
process.terminate()
process.wait()
self.__processes__.pop(backend_id)
class Command(BaseCommand):
help = "Listen to LAVA websocket events"
def add_arguments(self, parser):
parser.add_argument(
'BACKEND',
nargs='?',
type=str,
help='LAVA Backend name to listen to. If ommited, start the master process.',
)
def handle(self, *args, **options):
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
if options['verbosity'] == 0:
handler.setLevel(logging.WARNING)
if options['verbosity'] == 1:
handler.setLevel(logging.INFO)
logger.addHandler(handler)
logger.info("Starting lava_listener command")
backend_name = options.get("BACKEND")
if backend_name:
backend = LAVABackend.objects.get(name=backend_name)
Listener(backend).run()
else:
ListenerManager().run()
| StarcoderdataPython |
5143178 | import torch
import tqdm.auto as tqdm
import fairseq
from config import arch_args, config, task
from utils.env import enable_log, get_device
from utils.model import (build_model, inference_step, load_data_iterator,
try_load_checkpoint)
def main():
logger = enable_log()
device = get_device()
model = build_model(arch_args, task).to(device)
try_load_checkpoint(logger, model)
idxs, hyps = pred(model, task, device)
save_pred(idxs, hyps)
def pred(model, task, device, split="test"):
task.load_dataset(split=split, epoch=1)
itr = load_data_iterator(config.seed, task, split, 1, config.max_tokens,
config.num_workers).next_epoch_itr(shuffle=False)
idxs = []
hyps = []
model.eval()
progress = tqdm.tqdm(itr, desc=f"prediction")
with torch.no_grad():
for i, sample in enumerate(progress):
# validation loss
# move dict to a device(only `to()` method is not valid)
sample = fairseq.utils.move_to_cuda(sample, device=device)
# do inference
s, h, r = inference_step(sample, model)
hyps.extend(h)
idxs.extend(list(sample['id']))
return idxs, hyps
def save_pred(idxs, hyps, outfile="./prediction.txt"):
# sort according to preprocess
hyps = [x for _, x in sorted(zip(idxs, hyps))]
with open(outfile, "w", encoding='utf-8') as f:
for h in hyps:
f.write(h + "\n")
if __name__ == '__main__':
main()
| StarcoderdataPython |
4892529 | <gh_stars>1-10
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import blocks.urls
import prices.urls
websocket_urlpatterns = (
blocks.urls.websocket_urlpatterns + prices.urls.websocket_urlpatterns
)
application = ProtocolTypeRouter(
{"websocket": AuthMiddlewareStack(URLRouter(websocket_urlpatterns))}
)
| StarcoderdataPython |
12806643 | import torch
def weight_init(m):
# print(m)
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_normal_(m.weight)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_normal_(m.weight)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm1d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
def get_graph_feature(x, k, idx):
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
idx_base = torch.arange(0, batch_size, device=x.device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous()
neighbor = x.view(batch_size * num_points, -1)[idx, :]
neighbor = neighbor.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((neighbor - x, neighbor), dim=3) # (xj-xi, xj): b,n,k,2c
return feature
def assign_score(score, point_input):
B, N, K, m = score.size()
score = score.view(B, N, K, 1, m)
point_output = torch.matmul(score, point_input).view(B, N, K, -1) # b,n,k,cout
return point_output
def get_ed(x, y):
ed = torch.norm(x - y, dim=-1).reshape(x.shape[0], 1)
return ed
def assign_kernel_withoutk(in_feat, kernel, M):
B, Cin, N0 = in_feat.size()
in_feat_trans = in_feat.permute(0, 2, 1)
out_feat_half1 = torch.matmul(in_feat_trans, kernel[:Cin]).view(B, N0, M, -1) # b,n,m,o1
out_feat_half2 = torch.matmul(in_feat_trans, kernel[Cin:]).view(B, N0, M, -1) # b,n,m,o1
if in_feat.size(1) % 2 != 0:
out_feat_half_coord = torch.matmul(in_feat_trans[:, :, :3], kernel[Cin: Cin + 3]).view(B, N0, M, -1) # b,n,m,o1
else:
out_feat_half_coord = torch.zeros_like(out_feat_half2)
return out_feat_half1 + out_feat_half2, out_feat_half1 + out_feat_half_coord | StarcoderdataPython |
11245728 | <reponame>lkljlh1001/HelloPython<filename>Python3Lab/Lab03.py
# ((**lab01 ~ lab02의 예제를 함수로 작성**))
#<이름 짓기>
# 파스칼 표기법 : 첫단어를 대문자로 시작하며 이름을 지음
# ex) Employees, Departments
# RegisterEmployee,JoinMember
# 카멜 표기법 : 첫단어를 소문자로 시작하며 이름을 지음
# ex) registerEmployee
# 스네이크 표기법 : 소문자와 _기호를 이용해서 이름을 지음
# ex) register_employee
# 헝가리안 표기법 : 자료형을 의미하는 접두사를 이용해서 이름을 지음
# ex) strName,isMarried,boolMarried
#8 생활속 문제를 파이썬으로 풀기
# 자취방을 구하는데 마음에드는 방 두개를 찾았다 .
# 방 A 는 가로2.5m 세로 3m 이고 월세 27만원이다.
# 방 B 는 가로 4m 세로 2m 이고 월세 30만원이다.
def compareRoom(width,height,price):
return (width*height)/price
roomA = compareRoom(2.5, 3, 27)
roomB = compareRoom(4, 2, 30)
if(roomA > roomB):
print('방A가 낫네요')
else:
print('방B가 낫네요')
#10 주식회사 선박중공업은 한해 동안 철강 석유 등의
# 원자재(불변자본)를 구매하는데 30억원을,
# 노동자를 고용(가변자본)하는 데
# 15억원을 사용했다. 선방중공업은
# 이를 통해 선박을 제조/판매 하여
# 45억원의 순수익(잉여가치)을 냈다.
# 선박중공업의 한해 이윤율을 다음
# 송식을 이용해서 계산하세요
# 이윤율 = 잉여가치액/(불변자본 + 가변자본)
def computeProfit():
c=int(input('불변자본을 입력하세요'))
v=int(input('가변자본을 입력하세요'))
s=int(input('잉여가치액을 입력하세요'))
return (c+v)/s
print(computeProfit())
#11한국에 사는 당신은 외국 인터넷 쇼핑몰에서 노트북을 구매하려 한다.
# 이쇼핑몰에서는 달러(USD) 또는 유(EUR)료로 결제할 수 있고
# 노트북의 가격은 780달러 또는 650 유로다.
# 달러로 사는것과 유로로 사는것중 어느쪽이 더저렴한다?
# 이 문제를 파이썬 프로그램을 작성하여
# 해결하고 인터넷 검색을 현재의 '달러->원' 환율과
# '유로->원' 환율을 조사한 후 계산하세요
# --------------------------------------------------------
# 달러환율 = 1071
# 유로환율 = 1309
def getExchageRate(country):
rate=0
if country =='us':
rate=1071
elif country =='euro':
rate=1309
return rate
buyUS=780*getExchageRate('us')
buyEuro = 650*getExchageRate('euro')
print(buyEuro,buyUS)
if buyUS > buyEuro:
print('유로화로 구입하는게 더 싸네요')
else:
print('달러로 구입하는게 더 싸네요')
# 12당신이 다니는 학교의 운동장은 원형이고 지름이 100m다.
# 어느 체육 시간, 두 명씩 나란히 운동장을 한바퀴 달리는 시합을 하게 되었다.
# 그런데 안쪽선수는 바깥쪽선수보다 5m 안쪽에서 달린다.
# 당신은 바깥족에서 달리는 선수가 불리하다고 이의를 제기했다
# 바깥쪽 선수가 안족 선수보다 몇 미터 더달려야 하기 때문인가?
# 이 문제를 파이썬 프로그램을 작성하여 해결해 보자
# ----------------------------------------------------------------
# 원형 지름이 100m
# 안쪽선수는 바깥쪽 선수보다 5m 안쪽에서 달린다.
# 바깥쪽에서 달리는 서순사 불리하다고 이의를 제기함
# 바깥쪽 선수가 안쪽 선수보다 몇 미터 더달려야 하기때문인가
# pi * r
def howManyRun(radius):
pi = 3.14
return radius * pi
studentA = howManyRun(100)
studentB = howManyRun(90)
print('학생 A는 학생 B보다 %d만큼 더 뜀' % (studentA-studentB))
# 17 사용자로부터 두 개의 정수를 입력받아 사칙연산 계산 결과를 출력하는 프로그램을 작성해 보자.
# 예를 들어, 사용자가 입력한 수가 10과 20일떄, 프로그램의 실행 결과는 다음과 같다.
# 첫번째 정수를 입력하세요
# 10
# 두번째 정수를 입력하세요
# 20
# 10+20=30
# 10-20= -10
# 10*20=200
# 10/20 =0.5
def intCalu():
num1=int(input('좌변값을 하나 입력하세요'))
num2=int(input('우변값을 하나 입력하세요'))
fmt ="%d + %d = %d \n %d - %d = %d\n"
fmt +="%d * %d = %d \n %d / %d = %d\n"
fmt +="%d ** %d = %d"
print(fmt % (num1,num2,num1+num2,\
num1,num2,num1-num2,\
num1,num2,num1*num2,\
num1,num2,num1/num2,\
num1,num2,num1**num2,))
intCalu()
#18 사용자가 연봉과 결혼 여부를 입력하면 다음의
# 세금율에 의해 납부해야 할 세금을 계산하는 프로그램을 작성
def computeTax():
salary = int(input('연봉을 입력하세요'))
isMarried = input('결혼여부를 입력하세요 (Y/N)')
tax=0
if isMarried.upper() == 'N':
if salary <3000 :
tax = salary * 0.1
else :
tax = salary * 0.25
isMarried ="아니오"
else:
if salary :
tax= salary*0.1
else :
tax = salary*0.25
isMarried ="예"
fmt = "연봉 : %d, 결혼여부 : %s,세금:%.1f"
print(fmt % (salary,isMarried,tax))
computeTax()
# 19다음 조건을 이용해서 현재 연도를 입력하면
# 윤년 여부를 출력하는 프로그램을 작성하세요
def isLeapYear():
year = int(input('윤년여부를 알고 싶은 년도를 입력하세요'))
isleap = '윤년아닙니다'
if year / 4 == 0 and year / 100 != 0 or year / 400 == 0:
isleap = '윤년입니다'
print("%d는 %s" % (year, isleap))
isLeapYear()
# 20다음 조건을 만족하는 복권 발행 프로그램을 작성하세요
# a 사용자로부터 복권 숫자 3자리를 입력받으세요
# b 프로그램상에서 난수 생성을 이용해서 복권 3자리 수를 생성
# c 사용자가 입력한 복권 숫자가 모두 일치 :상금 백만원
# c 사용자가 입력한 복권 숫자가 2개 일치 :상금 1만원
# c 사용자가 입력한 복권 숫자가 3개 일치 :상금 1천원
def rouletteLotto():
import random
lotto = str(random.randint(100, 999))
lucky = input('복권번호를 입력하세요')
match = 0 # 일치여부
prize = '꽝 다음 기회에!'
for i in [0, 1, 2]:
for j in [0, 1, 2]:
if (lucky[i] == lotto[j] or
lucky[i] == lotto[j] or
lucky[i] == lotto[j]):
match += 1
if match == 3:
prize = '1등 당첨! 상금 백만원!'
elif match == 2:
prize = '2등 당첨! 상금 만원!'
elif match == 1:
prize = '3등 당첨! 상금 천원!'
print(lucky, lotto, prize)
rouletteLotto() | StarcoderdataPython |
6493970 | #!/usr/bin/python
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import *
from User import *
db=create_engine("sqlite:///tutorial.db")
metadata = MetaData(db)
users_table = Table('users',metadata,
Column('id',Integer,primary_key=True),
Column('name',String(20)),
Column('fullname',String(20)),
Column('password',String(20)))
metadata.create_all(db)
Session = sessionmaker(bind = db)
session = Session()
user1 = User('name1','fullname','ps')
user2 = User('name2','fullname','ps')
user3 = User('name3','fullname','ps')
session.add(user1)
session.add(user2)
session.add(user3)
#session.flush()
session.commit()
# query by name
userlist = session.query(User).filter_by(name='name1')
for user in userlist:
print user
# query all
userlist = session.query(User)
for user in userlist:
print user
# update
userlist = session.query(User).filter_by(name='name1')
for user in userlist:
user.name='user55'
user.password='<PASSWORD>'
session.commit()
userlist=session.query(User).filter_by(name='user55')
for user in userlist:
print user
| StarcoderdataPython |
4893782 | <filename>tests/agents_tests/test_pgt.py
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import basetest_pgt as base
from chainerrl.agents.pgt import PGT
# Currently PGT does not support recurrent models
# class TestPGTOnContinuousPOABC(base._TestPGTOnContinuousPOABC):
#
# def make_pgt_agent(self, env, model, actor_opt, critic_opt, explorer,
# rbuf, gpu):
# return PGT(model, actor_opt, critic_opt, rbuf, gpu=gpu, gamma=0.9,
# explorer=explorer, replay_start_size=100,
# target_update_method='soft', target_update_interval=1,
# episodic_update=True, update_interval=1,
# act_deterministically=True)
class TestPGTOnContinuousABC(base._TestPGTOnContinuousABC):
def make_pgt_agent(self, env, model, actor_opt, critic_opt, explorer,
rbuf, gpu):
return PGT(model, actor_opt, critic_opt, rbuf, gpu=gpu, gamma=0.9,
explorer=explorer, replay_start_size=100,
target_update_method='soft', target_update_interval=1,
act_deterministically=True)
| StarcoderdataPython |
12815399 | <filename>autojs.py
#coding:utf-8
import types
import sublime
import sublime_plugin
import socket
import json
import threading
import traceback
from io import StringIO
from contextlib import closing
hostname = ''
port = 1209
class AutoJsServer:
def __init__(self, hostname, port):
self.hostname = hostname
self.ip=None
self.port = port
self.conn = None
self.server = None
self.t=None
def get_host_ip(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
#print("请连接至"+ip)
return ip
def connect(self):
if self.t is not None:
#sublime.status_message("Can't start server because server is running!")
print("服务正在运行中(请连接:"+self.get_host_ip()+")...")
return
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((hostname, port))
self.server.listen(1)
#print("server listening at {0}:{1}".format(self.hostname, self.port))
self.t = threading.Thread(target=self.listen)
self.t.setDaemon(True)
self.t.start()
except Exception as e:
print(Exception, ":", e)
traceback.print_exc()
def listen(self):
print("等待连接...")
print("请连接至:"+self.get_host_ip())
if self.server is None:
return
self.conn, addr = self.server.accept()
print("已连接: {0}:{1}".format(addr[0], addr[1]))
sublime.status_message("{0}:{1} connected".format(addr[0], addr[1]))
self.ip=addr[0]
try:
with closing(self.conn.makefile(encoding='utf-8')) as f:
for line in f:
try:
# 修复了:读取到末尾时,可能会读取到一个空格.假如继续则报错:No JSON object could be decoded
if len(line.strip()) == 0:
continue
# 修复了:[sublime test plugin 异常 · Issue #249 · hyb1996/Auto.js](https://github.com/hyb1996/Auto.js/issues/249)
# Extra data: line 1 column 57 - line 2 column 1 (char 56 - 139) 异常
# 原因是一次性返回了多条JSON对象. {"type":"log","log":"X"}{"type":"log","log":"X"}{"type":"log","log":"X"}
if line.find('}{') != -1:
for item in line.split('}{'):
if item.find('{') == -1:
item = '{' + item
if item.find('}') == -1:
item = item + '}'
json_obj = json.loads(item)
self.on_receive(json_obj)
continue
json_obj = json.loads(line)
self.on_receive(json_obj)
except Exception as ex:
print("Error line:",line)
print(Exception, ":", ex)
traceback.print_exc()
except Exception as e:
print(Exception, ":", e)
traceback.print_exc()
finally:
self.disconnect()
def on_receive(self, data):
if data['type'] == 'log':
print("Log: {0}".format(data['log']))
def send(self, obj):
if self.conn is None:
sublime.error_message("请先连接到设备!")
else:
print("send", obj)
self.conn.sendall(bytes(json.dumps(obj) + "\n", 'utf-8'))
def disconnect(self):
if self.ip is None:
#print("未连接因此无法断开")
return
if self.server is not None:
try:
self.server.close()
print('断开连接')
except Exception as e:
print(Exception, ":", e)
finally:
self.ip=None
self.server = None
self.conn = None
self.t=None
def __del__(self):
self.disconnect()
server = AutoJsServer(hostname, port)
class RunCommand(sublime_plugin.TextCommand):
def run(self, edit):
content = self.view.substr(sublime.Region(0, self.view.size()))
server.send({
'type': 'command',
'view_id': self.view.id(),
'name': self.view.file_name(),
'command': 'run',
'script': content
})
class StopCommand(sublime_plugin.TextCommand):
def run(self, edit):
server.send({
'type': 'command',
'view_id': self.view.id(),
'command': 'stop',
})
class RerunCommand(sublime_plugin.TextCommand):
def run(self, edit):
content = self.view.substr(sublime.Region(0, self.view.size()))
server.send({
'type': 'command',
'view_id': self.view.id(),
'name': self.view.file_name(),
'script': content,
'command': 'rerun',
})
class StopAllCommand(sublime_plugin.TextCommand):
def run(self, edit):
global server
server.send({
'type': 'command',
'command': 'stopAll'
})
class SaveToPhoneCommand(sublime_plugin.TextCommand):
def run(self, edit):
global server
content = self.view.substr(sublime.Region(0, self.view.size()))
server.send({
'type': 'command',
'view_id': self.view.id(),
'name': self.view.file_name(),
'script': content,
'command': 'save',
})
class ConnectCommand(sublime_plugin.TextCommand):
def run(self, edit):
global server
server.connect()
class DisconnectCommand(sublime_plugin.TextCommand):
def run(self, edit):
global server
server.disconnect()
| StarcoderdataPython |
3415481 | <reponame>Exterminus/myftp
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
"""
Cliente
<NAME>
UFSJ
"""
import sys
import socket
import getpass
#utilizado para serialização de dados..
import pickle
#conexao segura.. todas as conexoes serao segura
import ssl
class Cliente(object):
"""Classe cliente conecta
ao servido usando tcp"""
#ip do servidor de conexao
#self.ip=""
def __init__(self):
self.ip_conexao=0
self.porta=0
self.tcp_seguro=""
self.tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#conexao segura#-----------------------------------------
#self.context = ssl.create_default_context()
self.context = ssl.SSLContext()
self.context.verify_mode = ssl.CERT_REQUIRED
#self.context.check_hostname = True
#Carrega a chave publica do servidor...
self.context.load_verify_locations('chave_cliente/server.pem')
self.tcp.settimeout(3)
self.home=""
def verifica_comando(self,comando):
"""verifica se o comando esta formatado antes de enviar"""
particao=comando.split(" ")
if("newuser" in comando):
#newuser carlos 4363 0
if(len(particao)<4):
return False,None,None,None
cmd=particao[0]
user=particao[1]
senha=particao[2]
permissao=particao[3]
return cmd,user,senha,permissao
if("put" in comando):
#carrega o arquivo...
cmd=particao[1]
try:
arq=open(cmd,"rb")
file=arq.read()
arq.close()
return True, particao[0],particao[1],file
except Exception as e:
print("erro put",e)
return False,None,None,None
else:
#estado,comando,
if(len(particao)>1):
return True,particao[0],particao[1],None
return True,particao[0],None,None
def login(self,usuario):
"""Realiza o pedido de Login"""
print("Dados conexão\nIP:",self.ip_conexao,"Porta:",self.porta,"Usuario:",usuario)
senha=getpass.getpass("senha: ")
#print("IP:",self.ip_conexao,"Porta:",self.porta,"Usuario:",usuario,"senha:",<PASSWORD>ha)
msg={"usuario":usuario,"senha":<PASSWORD>ha}
#dicionario é serializado em bytes..
msg_b=pickle.dumps(msg)
self.tcp.sendall(msg_b)
retorno=self.tcp.recv(1024)
#print("Retorno",pickle.loads(retorno))
retorno=pickle.loads(retorno)
if(retorno['estado']):
#retorna a mensagem de sucesso de login e o caminho da home
return True,retorno['home']
else:
return False,False
def inicia_conexao(self,ip,porta):
"""Inicia a conexão com servidor."""
self.ip_conexao=ip
self.porta = porta
destino=(self.ip_conexao,int(self.porta))
self.tcp_seguro=self.context.wrap_socket(self.tcp)
#transforma o tcp em uma conexao segura
self.tcp=self.tcp_seguro
self.tcp.connect(destino)
#self.tcp=self.tcp_seguro
retorno=self.tcp.recv(1024)
retorno=pickle.loads(retorno)
print("Mensagem Inicial:",retorno)
def encerrar_conexao(self):
"""encerra a conexão com o servidor de conexão"""
print("Conexão encerrada.\nBye.")
self.tcp.close()
exit(-1)
##---------------------
def exibe_lista(self,lista):
"""exibe uma lista"""
if(len(lista)<1):
print("lista vazia")
else:
for i in lista:
print("-",i)
def salvar_arquivo(self,nome,file):
"""salva um arquivo, utilizado para get"""
arquivo=open(nome,"wb")
#print("File salvar",file)
arquivo.write(file.data)
print("transferência concluída.")
arquivo.close()
##-------------------------------------
def processa_resposta(self,resposta):
"""processa a resposta recebida do servidor"""
if(resposta is True or resposta is None):
print(resposta)
if("help" in resposta):
print("---Help---\n")
for i in resposta:
print("comando:",i,"-",resposta[i])
print("------")
elif("rmdir" in resposta):
#print(resposta['rmdir'])
if(resposta['rmdir'] is True):
#print(resposta['rmdir'])
print("diretório removido")
else:
print("erro ao remover o diretório.")
elif("delete" in resposta):
#print(resposta['rmdir'])
if(resposta['delete'] is True):
#print(resposta['delete'])
print("arquivo removido")
else:
print("erro ao remover o arquivo.")
elif("mkdir" in resposta):
#print(resposta['mkdir'])
if(resposta['mkdir'] is True):
print(resposta['mkdir'])
print("diretório criado")
else:
print("erro ao criar o diretório.")
elif("quit" in resposta):
self.encerrar_conexao()
elif("get" in resposta):
if(resposta['get']):
self.salvar_arquivo(resposta['nome'],resposta['file'])
elif("put" in resposta):
if(resposta['put']):
print("Arquivo enviado com sucesso.")
else:
print("Erro ao enviar o arquivo.")
elif("newuser" in resposta):
if(resposta['newuser']):
print("Usuario criado com sucesso.")
else:
print("Erro ao criar o usuario.")
if(resposta['root'] is False):
print("Você não é um usuário root!!.")
print("Esta ocorrência será relatada!")
elif("ls" in resposta):
self.exibe_lista(resposta['ls'])
elif("cd" in resposta):
if(resposta['cd']):
#atualiza o caminho da home
self.home=resposta['home']
#print(self.home)
else:
print(resposta)
def console(self,home):
"""Inicia o console de comandos"""
#print("Digite a sua mensagem ")
self.home=home
while(True):
try:
comando=""
comando=input(self.home+">>")
#comando_inst=pickle.dumps(comando)
#realiza uma pré verificação do comando digitado.
#cd casa
estado,comando_inst,caminho,file=self.verifica_comando(comando)
#return cmd,user,senha,permissao
if(estado):
if("newuser" in comando):
cmd={}
cmd['cmd']=estado
cmd['user']=comando_inst
cmd['senha']=caminho
cmd['permissao']=file
comando_inst=pickle.dumps(cmd)
self.tcp.sendall(comando_inst)
else:
cmd={}
cmd['cmd']=comando_inst
cmd['caminho']=caminho
cmd['file']=file
comando_inst=pickle.dumps(cmd)
self.tcp.sendall(comando_inst)
else:
print("verifique o comando digitado.")
rec=[]
recebido=0
while True:
#print("entrou")
resposta=""
#print("w")
resposta=self.tcp.recv(1024)
rec.append(resposta)
#print("P",len(resposta))
recebido=len(resposta)-1024
if(recebido<0):
break
resposta=pickle.loads(b"".join(rec))
self.processa_resposta(resposta)
except Exception as e:
print(e)
self.encerrar_conexao()
def coleta_dados(self):
"""realiza a coleta dos dados de login"""
try:
ip=sys.argv[1]
porta=sys.argv[2]
login=sys.argv[3]
except Exception as e:
print("Erro!\nDigite:\nip porta login\n")
exit()
return ip,porta,login
cliente=Cliente()
ip,porta,login=cliente.coleta_dados()
cliente.inicia_conexao(ip,porta)
senha,home=cliente.login(login)
if(senha):
cliente.console(home)
else:
print("Não foi possível efetuar o login.")
| StarcoderdataPython |
122972 | <filename>Hamming Distance.py
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
count = 0
for i in range(30, -1, -1):
mask = 1 << i
digitX = x & mask
digitY = y & mask
if digitX != digitY:
count += 1
return count | StarcoderdataPython |
12806210 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""MetricsService for publishing stress test qps data."""
import time
from src.proto.grpc.testing import metrics_pb2
GAUGE_NAME = 'python_overall_qps'
class MetricsServer(metrics_pb2.BetaMetricsServiceServicer):
def __init__(self, histogram):
self._start_time = time.time()
self._histogram = histogram
def _get_qps(self):
count = self._histogram.get_data().count
delta = time.time() - self._start_time
self._histogram.reset()
self._start_time = time.time()
return int(count/delta)
def GetAllGauges(self, request, context):
qps = self._get_qps()
return [metrics_pb2.GaugeResponse(name=GAUGE_NAME, long_value=qps)]
def GetGauge(self, request, context):
if request.name != GAUGE_NAME:
raise Exception('Gauge {} does not exist'.format(request.name))
qps = self._get_qps()
return metrics_pb2.GaugeResponse(name=GAUGE_NAME, long_value=qps)
| StarcoderdataPython |
4976917 | from .core.cloud_logger import CloudLogger as __CloudLogger
__version__ = "0.2.3"
__all__ = ["cloud_logger"]
cloud_logger = __CloudLogger() | StarcoderdataPython |
9682854 | <filename>tests/slack_sdk_async/audit_logs/test_async_client.py
import unittest
from slack_sdk.audit_logs.async_client import AsyncAuditLogsClient
from slack_sdk.audit_logs import AuditLogsResponse
from tests.helpers import async_test
from tests.slack_sdk.audit_logs.mock_web_api_server import (
cleanup_mock_web_api_server,
setup_mock_web_api_server,
)
class TestAsyncAuditLogsClient(unittest.TestCase):
def setUp(self):
self.client = AsyncAuditLogsClient(
token="<PASSWORD>-", base_url="http://localhost:8888/"
)
setup_mock_web_api_server(self)
def tearDown(self):
cleanup_mock_web_api_server(self)
@async_test
async def test_logs(self):
resp: AuditLogsResponse = await self.client.logs(limit=1, action="user_login")
self.assertEqual(200, resp.status_code)
self.assertIsNotNone(resp.body.get("entries"))
self.assertEqual(resp.typed_body.entries[0].id, "xxx-yyy-zzz-111")
@async_test
async def test_actions(self):
resp: AuditLogsResponse = await self.client.actions()
self.assertEqual(200, resp.status_code)
self.assertIsNotNone(resp.body.get("actions"))
@async_test
async def test_schemas(self):
resp: AuditLogsResponse = await self.client.schemas()
self.assertEqual(200, resp.status_code)
self.assertIsNotNone(resp.body.get("schemas"))
| StarcoderdataPython |
5001359 | <gh_stars>1-10
import boto3
import rasterio
from rasterio.io import MemoryFile
s3 = boto3.client('s3')
class Outputs(object):
def __init__(self, arr, profile):
self.arr = arr
self.profile = profile
def to_array(self):
return self.arr
def to_gtiff(self, outfile):
with rasterio.open(outfile, 'w', **self.profile) as dst:
dst.write(self.arr)
def to_s3(self, bucket, key):
#https://github.com/mapbox/rasterio/issues/899#issuecomment-253133665
memfile = MemoryFile()
with memfile.open(**self.profile) as gtiff:
gtiff.write(self.arr)
s3.put_object(Bucket=bucket, Key=key, Body=memfile) | StarcoderdataPython |
4832783 | import io
import os
import typer
from .core import DrCov
app = typer.Typer()
@app.command()
def extract_specific_cov_info(
in_file_path: str, out_file_path: str, module_name: str
) -> None:
dr_cov = DrCov()
dr_cov.import_from_file(in_file_path)
dr_cov.export_specific_module_to_file(module_name, out_file_path)
@app.command()
def show_specific_cov_info(in_file_path: str, module_name: str) -> None:
dr_cov_in = DrCov()
dr_cov_in.import_from_file(in_file_path)
bio = io.BytesIO()
dr_cov_in.export_specific_module_to_binaryio(bio, module_name)
bio.seek(0)
dr_cov_out = DrCov()
dr_cov_out.import_from_binaryio(bio)
typer.echo(str(dr_cov_out))
@app.command()
def show_all_cov_info(in_file_path: str) -> None:
dr_cov = DrCov()
dr_cov.import_from_file(in_file_path)
typer.echo(str(dr_cov))
| StarcoderdataPython |
3324812 | import os
from os import path
from collections import namedtuple
from hashlib import sha1
import cPickle as pickle
from itertools import imap, ifilterfalse
from contextlib import contextmanager
import re
import json
FileItem = namedtuple('FileItem', 'path checksum size time')
CHUNK_SIZE = 64 * 1024 # 64 KB
def repo_files(root_path, skip):
assert not root_path.endswith('/')
for parent_path, dir_names, file_names in os.walk(root_path):
parent_rel_path = parent_path[len(root_path):]
if parent_rel_path == '':
dir_names.remove('.mf')
dir_names[:] = ifilterfalse(skip, dir_names)
for name in ifilterfalse(skip, file_names):
yield (parent_rel_path + '/' + name)[1:]
def parse_ignore_file(f):
def rule(line):
if line.startswith('*'):
return lambda p: p.endswith(line[1:])
elif line.endswith('*'):
return lambda p: p.startswith(line[:-1])
else:
return lambda p: p == line
rules = []
for line in imap(str.strip, f):
rules.append(rule(line))
def skip(p):
for r in rules:
if r(p):
return True
else:
return False
return skip
def repo_file_events(root_path, use_cache=False):
ignore_path = path.join(root_path, '.mfignore')
if path.isfile(ignore_path):
with open(ignore_path, 'r') as f:
skip = parse_ignore_file(f)
else:
skip = lambda p: False
cache_path = path.join(root_path, '.mf/cache')
if use_cache and path.isfile(cache_path):
with open(cache_path, 'rb') as f:
cache = pickle.load(f)
else:
cache = {}
new_cache = {}
for file_path in repo_files(root_path, skip):
file_full_path = path.join(root_path, file_path)
file_stat = os.stat(file_full_path)
file_size = file_stat.st_size
file_time = file_stat.st_mtime
file_item = None
if file_path in cache:
cached_item = cache[file_path]
if (file_size, file_time) == (cached_item.size, cached_item.time):
file_item = cached_item
if file_item is None:
sha1_hash = sha1()
size_count = 0
with open(file_full_path, 'rb') as f:
while True:
data = f.read(CHUNK_SIZE)
if not data:
break
sha1_hash.update(data)
size_count += len(data)
assert size_count == file_size
file_checksum = sha1_hash.hexdigest()
file_item = FileItem(file_path, file_checksum,
file_size, file_time)
yield file_item
new_cache[file_path] = file_item
with open(cache_path, 'wb') as f:
pickle.dump(new_cache, f, protocol=2)
file_item_pattern = re.compile(r'^(?P<checksum>"[0-9a-f]{40}")\s*'
r'(?P<size>\d+)\s*'
r'(?P<path>".*")\s*$')
def jstr_load(s):
assert isinstance(s, str)
return json.loads(s).encode('latin-1')
def jstr_dump(s):
assert isinstance(s, str)
return json.dumps(s.decode('latin-1'))
def string_to_file_item(s):
m = file_item_pattern.match(s)
assert m is not None, "malformed file entry: %r" % s
return FileItem(jstr_load(m.group('path')),
jstr_load(m.group('checksum')),
int(m.group('size')),
None)
def file_item_to_string(file_item):
return "%s %10d %s" % (jstr_dump(file_item.checksum),
file_item.size,
jstr_dump(file_item.path))
def read_version_file(fh):
return imap(string_to_file_item, fh)
@contextmanager
def write_version_file(fh):
def write_file_item(file_item):
fh.write(file_item_to_string(file_item) + '\n')
yield write_file_item
| StarcoderdataPython |
1865474 | <filename>serie primos (1).py
#!/usr/bin/env python
# coding: utf-8
# In[3]:
def SeriePrimos(limite1):
n = 0
d = 3
divisor = 1
suma = 0
primo = 0
estado = False
for k in range(1, limite1 + 1):
n = n + 1
while not estado:
for i in range(1, primo + 1):
if primo % i == 0:
divisor += 1
if divisor == 2:
d = primo
primo += 1
estado = True
else:
primo = primo + 1
estado = False
divisor = 0
if k % 2 == 0:
suma = suma - (n / d)
print(f' - {n} / {d}', end=" ")
else:
suma = suma + (n / d)
if k == 1:
print(f'{n} / {d}', end=" ")
else:
print(f' + {n} / {d}', end=" ")
estado = False
return suma
limite1 = int(input("Ingrese un límite:"))
print(' = ', SeriePrimos(limite1))
# In[ ]:
| StarcoderdataPython |
5055332 | # E M O J I E S
SLOTH_EMOJI = "\U0001F9A5"
GLOBE_EMOJI = "\U0001F310"
AUTO_EMOJI = "\U0001F30E"
STAR_EMOJI = "\U00002B50"
CLOCK_EMOJI = "\U0001F552"
SAD_EMOJI = "\U00002639"
SLEEP_EMOJI = "\U0001F634"
REMOVE_EMOJI = "\U0001F5D1\U0000FE0F"
ADD_EMOJI = "\U00002795"
VIEW_EMOJI = "\U0001F4C4"
NEUTRAL_FACE_EMOJI = "\U0001F610"
CALL_ME_EMOJI = "\U0001F919"
COUPLE_EMOJI = "\U0001F469\U0000200D\U00002764\U0000FE0F\U0000200D\U0001F468"
# F L A G - E M O J I E S
WHITE_FLAG_EMOJI = "\U0001F3F3"
AFRIKAANS_FLAG_EMOJI = "\U0001F1FF\U0001F1E6"
ALBANIAN_FLAG_EMOJI = "\U0001F1E6\U0001F1F1"
AMHARIC_FLAG_EMOJI = "\U0001F1EA\U0001F1F9"
ARABIC_FLAG_EMOJI = "\U0001F1F8\U0001F1E6"
ARMENIAN_FLAG_EMOJI = "\U0001F1E6\U0001F1F2"
AZERBAIJANI_FLAG_EMOJI = "\U0001F1E6\U0001F1FF"
BASQUE_FLAG_EMOJI = WHITE_FLAG_EMOJI
BELARUSIAN_FLAG_EMOJI = "\U0001F1E7\U0001F1FE"
BENGALI_FLAG_EMOJI = "\U0001F1E7\U0001F1E9"
BOSNIAN_FLAG_EMOJI = "\U0001F1E7\U0001F1E6"
BULGARIAN_FLAG_EMOJI = "\U0001F1E7\U0001F1EC"
CATALAN_FLAG_EMOJI = "\U0001F1E6\U0001F1E9"
CEBUANO_FLAG_EMOJI = "\U0001F1F5\U0001F1ED"
CHICHEWA_FLAG_EMOJI = "\U0001F1F2\U0001F1FC"
CHINESE_SIMPLIFIED_FLAG_EMOJI = "\U0001F1E8\U0001F1F3"
CHINESE_TRADITIONAL_FLAG_EMOJI = "\U0001F1E8\U0001F1F3"
CORSICAN_FLAG_EMOJI = "\U0001F1EB\U0001F1F7"
CROATIAN_FLAG_EMOJI = "\U0001F1ED\U0001F1F7"
CZECH_FLAG_EMOJI = "\U0001F1E8\U0001F1FF"
DANISH_FLAG_EMOJI = "\U0001F1E9\U0001F1F0"
DUTCH_FLAG_EMOJI = "\U0001F1F3\U0001F1F1"
ENGLISH_FLAG_EMOJI = "\U0001F1EC\U0001F1E7"
ESPERANTO_FLAG_EMOJI = WHITE_FLAG_EMOJI
ESTONIAN_FLAG_EMOJI = "\U0001F1EA\U0001F1EA"
FILIPINO_FLAG_EMOJI = "\U0001F1F5\U0001F1ED"
FINNISH_FLAG_EMOJI = "\U0001F1EB\U0001F1EE"
FRENCH_FLAG_EMOJI = "\U0001F1EB\U0001F1F7"
FRISIAN_FLAG_EMOJI = "\U0001F1F3\U0001F1F1"
GALICIAN_FLAG_EMOJI = "\U0001F1EA\U0001F1F8"
GEORGIAN_FLAG_EMOJI = "\U0001F1EC\U0001F1EA"
GERMAN_FLAG_EMOJI = "\U0001F1E9\U0001F1EA"
GREEK_FLAG_EMOJI = "\U0001F1EC\U0001F1F7"
GUJARATI_FLAG_EMOJI = "\U0001F1EE\U0001F1F3"
HAITIAN_CREOLE_FLAG_EMOJI = "\U0001F1ED\U0001F1F9"
HAUSA_FLAG_EMOJI = "\U0001F1F3\U0001F1EA"
HAWAIIAN_FLAG_EMOJI = "\U0001F1FA\U0001F1F8"
HEBREW_FLAG_EMOJI = "\U0001F1EE\U0001F1F1"
HINDI_FLAG_EMOJI = "\U0001F1EE\U0001F1F3"
HMONG_FLAG_EMOJI = "\U0001F1E8\U0001F1F3"
HUNGARIAN_FLAG_EMOJI = "\U0001F1ED\U0001F1FA"
ICELANDIC_FLAG_EMOJI = "\U0001F1EE\U0001F1F8"
IGBO_FLAG_EMOJI = "\U0001F1F3\U0001F1EC"
INDONESIAN_FLAG_EMOJI = "\U0001F1EE\U0001F1E9"
IRISH_FLAG_EMOJI = "\U0001F1EE\U0001F1EA"
ITALIAN_FLAG_EMOJI = "\U0001F1EE\U0001F1F9"
JAPANESE_FLAG_EMOJI = "\U0001F1EF\U0001F1F5"
JAVANESE_FLAG_EMOJI = "\U0001F1EE\U0001F1E9"
KANNADA_FLAG_EMOJI = "\U0001F1EE\U0001F1F3"
KAZAKH_FLAG_EMOJI = "\U0001F1F0\U0001F1FF"
KHMER_FLAG_EMOJI = "\U0001F1F0\U0001F1ED"
KOREAN_FLAG_EMOJI = "\U0001F1F0\U0001F1F7"
KURDISH_KURMANJI_FLAG_EMOJI = "\U0001F1EE\U0001F1F6"
KYRGYZ_FLAG_EMOJI = "\U0001F1F0\U0001F1EC"
LAO_FLAG_EMOJI = "\U0001F1F1\U0001F1E6"
LATIN_FLAG_EMOJI = WHITE_FLAG_EMOJI
LATVIAN_FLAG_EMOJI = "\U0001F1F1\U0001F1FB"
LITHUANIAN_FLAG_EMOJI = "\U0001F1F1\U0001F1F9"
LUXEMBOURGISH_FLAG_EMOJI = "\U0001F1F1\U0001F1FA"
MACEDONIAN_FLAG_EMOJI = "\U0001F1F2\U0001F1F0"
MALAGASY_FLAG_EMOJI = "\U0001F1F2\U0001F1EC"
MALAY_FLAG_EMOJI = "\U0001F1F2\U0001F1FE"
MALAYALAM_FLAG_EMOJI = "\U0001F1EE\U0001F1F3"
MALTESE_FLAG_EMOJI = "\U0001F1F2\U0001F1F9"
MAORI_FLAG_EMOJI = "\U0001F1F3\U0001F1FF"
MARATHI_FLAG_EMOJI = "\U0001F1F3\U0001F1FF"
MONGOLIAN_FLAG_EMOJI = "\U0001F1F2\U0001F1F3"
MYANMAR_BURMESE_FLAG_EMOJI = "\U0001F1F2\U0001F1F2"
NEPALI_FLAG_EMOJI = "\U0001F1F3\U0001F1F5"
NORWEGIAN_FLAG_EMOJI = "\U0001F1F3\U0001F1F4"
ODIA_FLAG_EMOJI = "\U0001F1EE\U0001F1F3"
PASHTO_FLAG_EMOJI = "\U0001F1E6\U0001F1EB"
PERSIAN_FLAG_EMOJI = "\U0001F1EE\U0001F1F7"
POLISH_FLAG_EMOJI = "\U0001F1F5\U0001F1F1"
PORTUGUESE_FLAG_EMOJI = "\U0001F1F5\U0001F1F9"
PUNJABI_FLAG_EMOJI = "\U0001F1EE\U0001F1F3"
ROMANIAN_FLAG_EMOJI = "\U0001F1F7\U0001F1F4"
RUSSIAN_FLAG_EMOJI = "\U0001F1F7\U0001F1FA"
SAMOAN_FLAG_EMOJI = "\U0001F1FC\U0001F1F8"
SCOTS_GAELIC_FLAG_EMOJI = "\U0001F1EC\U0001F1E7"
SERBIAN_FLAG_EMOJI = "\U0001F1F7\U0001F1F8"
SHONA_FLAG_EMOJI = "\U0001F1FF\U0001F1FC"
SINDHI_FLAG_EMOJI = "\U0001F1F5\U0001F1F0"
SINHALA_FLAG_EMOJI = "\U0001F1F1\U0001F1F0"
SLOVAK_FLAG_EMOJI = "\U0001F1F8\U0001F1F0"
SLOVENIAN_FLAG_EMOJI = "\U0001F1F8\U0001F1EE"
SOMALI_FLAG_EMOJI = "\U0001F1F8\U0001F1F4"
SPANISH_FLAG_EMOJI = "\U0001F1EA\U0001F1F8"
SUNDANESE_FLAG_EMOJI = "\U0001F1EE\U0001F1E9"
SWAHILI_FLAG_EMOJI = "\U0001F1F8\U0001F1F8"
SWEDISH_FLAG_EMOJI = "\U0001F1F8\U0001F1EA"
TAJIK_FLAG_EMOJI = "\U0001F1F9\U0001F1EF"
TAMIL_FLAG_EMOJI = "\U0001F1F1\U0001F1F0"
TELUGU_FLAG_EMOJI = "\U0001F1EE\U0001F1F3"
THAI_FLAG_EMOJI = "\U0001F1F9\U0001F1ED"
TURKISH_FLAG_EMOJI = "\U0001F1F9\U0001F1F7"
UKRAINIAN_FLAG_EMOJI = "\U0001F1FA\U0001F1E6"
URDU_FLAG_EMOJI = "\U0001F1F5\U0001F1F0"
UYGHUR_FLAG_EMOJI = "\U0001F1E8\U0001F1F3"
UZBEK_FLAG_EMOJI = "\U0001F1FA\U0001F1FF"
VIETNAMESE_FLAG_EMOJI = "\U0001F1FB\U0001F1F3"
WELSH_FLAG_EMOJI = "\U0001F3F4\U000E0067\U000E0062\U000E0077\U000E006C\U000E0073\U000E007F"
XHOSA_FLAG_EMOJI = "\U0001F1FF\U0001F1E6"
YIDDISH_FLAG_EMOJI = "\U0001F1F7\U0001F1FA"
YORUBA_FLAG_EMOJI = "\U0001F1F3\U0001F1EC"
ZULU_FLAG_EMOJI = "\U0001F1FF\U0001F1E6"
| StarcoderdataPython |
3494913 | <gh_stars>0
from setuptools import setup, find_packages
setup(
name='nixmyshell',
version='0.0.1',
description='A script to create shell.nix files',
author="<NAME>",
license="MIT",
packages=find_packages(),
entry_points={
'console_scripts': [
'nixmyshell=nixmyshell.nixmyshell:main'
]
}
)
| StarcoderdataPython |
271921 | import tensorflow as tf
tf.set_random_seed(777) # for reproducibility
x_data = [1, 2, 3]
y_data = [1, 2, 3]
# W의 initial value 지정
W = tf.Variable(tf.random_normal([1]), name='weight')
# training 변수 지정(placeholder)
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# 기울기 없는 regression model 가정
hypothesis = X * W
# cost/loss function (MSE)
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Gradient Descent 알고리즘을 이용하여 minimize하는 방법!!!
# => 앞서 써왔던 tensorflow 기본 내장 optimize 함수를 풀어쓴 것임!!
# Minimize: Gradient Descent using derivative: W -= learning_rate * derivative
learning_rate = 0.1
gradient = tf.reduce_mean((W * X - Y) * X)
descent = W - learning_rate * gradient
update = W.assign(descent) # 새로 계산된 descent를 W 변수에 저장하고(assign)하고 이 과정을 update 객체에 저장
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(21):
sess.run(update, feed_dict={X: x_data, Y: y_data}) # update 객체(tf graph)를 실행하면 전 과정들이 모두 실행됨
print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W))
# sess.run(W)는 바로 위에서 initialize를 했기 때문에 맨 처음에 저장한 값이 아닌 fitting된 값의 W(weight)가 출력됨
'''
0 1.93919 [ 1.64462376]
1 0.551591 [ 1.34379935]
2 0.156897 [ 1.18335962]
3 0.0446285 [ 1.09779179]
4 0.0126943 [ 1.05215561]
5 0.00361082 [ 1.0278163]
6 0.00102708 [ 1.01483536]
7 0.000292144 [ 1.00791216]
8 8.30968e-05 [ 1.00421977]
9 2.36361e-05 [ 1.00225055]
10 6.72385e-06 [ 1.00120032]
11 1.91239e-06 [ 1.00064015]
12 5.43968e-07 [ 1.00034142]
13 1.54591e-07 [ 1.00018203]
14 4.39416e-08 [ 1.00009704]
15 1.24913e-08 [ 1.00005174]
16 3.5322e-09 [ 1.00002754]
17 9.99824e-10 [ 1.00001466]
18 2.88878e-10 [ 1.00000787]
19 8.02487e-11 [ 1.00000417]
20 2.34053e-11 [ 1.00000226]
''' | StarcoderdataPython |
11271415 | from compressario.compress import Compress
from compressario.diagnostics import (
compress_report,
savings,
savings_report,
storage_size,
)
#from compressario.formatter import StorageSize
from compressario.type_compressor import BaseTypeCompressor, DefaultCompressor
from compressario.typing import pdT
from compressario.compression_algorithms import type_compressions
__all__ = [
"Compress",
"storage_size",
"TypeCompressor",
"StorageSize",
"savings",
"savings_report",
"compress_report",
]
| StarcoderdataPython |
8046704 | <reponame>Michael-F-Bryan/cheesecake_kwalitee_index<filename>cheesecake_kwalitee_index/utils.py
"""
Miscellaneous utility functions, classes, constants and decorators.
"""
import logging
import sys
def get_logger(name, log_file, log_level=None):
"""
Get a logger object which is set up properly with the correct formatting,
logfile, etc.
Parameters
----------
name: str
The __name__ of the module calling this function.
log_file: str
The filename of the file to log to.
Returns
-------
logging.Logger
A logging.Logger object that can be used to log to a common file.
"""
logger = logging.getLogger(name)
logger.setLevel(log_level or logging.INFO)
if log_file == 'stdout':
handler = logging.StreamHandler(sys.stdout)
elif log_file == 'stderr':
handler = logging.StreamHandler(sys.stderr)
else:
handler = logging.FileHandler(log_file)
if not len(logger.handlers):
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s: %(message)s',
datefmt='%Y/%m/%d %I:%M:%S %p'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
| StarcoderdataPython |
222101 | <filename>gwsc_ingest/cli.py
import argparse
from .era5.cli import add_era5_parsers
def create_gwsc_command_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='Commands', dest='sub-command')
subparsers.required = True
add_era5_parsers(subparsers)
return parser
def gwsc_command():
parser = create_gwsc_command_parser()
args = parser.parse_args()
args.func(args)
| StarcoderdataPython |
5152050 | # -*- coding: utf-8 -*-
from collections import defaultdict
from functools import wraps
from time import time
from notetool.tool.log import logger
class Cache(object):
"""
具有大小限制和命中计数器的简单词典。
"""
def __init__(self, max_size=10 ** 5, increase=False, do_not_increase_after=10 ** 6):
"""
:param max_size: maximum number of items that the cache can store
:param increase: whether to increase the size dynamically when reached the max.
If you specify the True, the size will simply doubled. If you specify a number,
the size will be multiplied by that amount.
:param do_not_increase_after: prevent the cache from growing
at certain number of items
"""
self._storage = dict()
self.init_size = max_size
self.max_size = max_size
self.hits = 0
self.total_queries = 0
if increase is True:
increase = 2
self.increase = increase
self.do_not_increase_after = do_not_increase_after
def __len__(self):
return len(self._storage)
def _clear(self):
self._storage.clear()
def save(self, name, value, **kwargs):
"""Write the value to cache."""
if len(self) >= self.max_size:
logger.warning(
'Maximum size for cache reached (%s).', self.max_size)
self._clear()
self._increase_size()
self._save(name, value, **kwargs)
# noinspection PyUnusedLocal
def _save(self, name, value, **kwargs):
self._storage[name] = value
def get(self, name):
"""Get the value from a cache"""
self.total_queries += 1
value = self._get(name)
if value is None:
return None
self.hits += 1
return value
def _get(self, name):
return self._storage.get(name)
def _increase_size(self):
if self.max_size >= self.do_not_increase_after:
return
if self.increase and self.increase > 1:
new_max = self.max_size * self.increase
self.max_size = min(new_max, self.do_not_increase_after)
else:
logger.info('Bad increase multiplier: %s', self.increase)
def delete(self, name):
"""Just drop the value from a cache"""
return bool(self._storage.pop(name, False))
@property
def hit_rate(self):
"""How much queries successfully reached the cache"""
if not self.total_queries:
return 0
return float(self.hits) / self.total_queries
# noinspection SpellCheckingInspection
# https://english.stackexchange.com/a/312087
class ExpirableCache(Cache):
"""
The cache with limited support for expiration.
"""
def _save(self, name, value, **kwargs):
"""Optionally you can specify an expiration timeout"""
_time = kwargs.get('time')
self._storage[name] = (time(), _time, value)
def _get(self, name):
item = self._storage.get(name)
if item is None:
return None
start, _time, value = item
# expires
if _time is not None and time() - start > _time:
self.delete(name)
return value
def memoized_two_args(func, cache=None): # pragma: no cover
"""
Memoize results of two-argument function.
The first argument should be more 'volatile' and the second one more 'constant'.
"""
if cache is None:
cache = defaultdict(dict)
@wraps(func)
def wrapper(arg1, arg2):
arg2_cache = cache[arg2]
try:
return arg2_cache[arg1]
except KeyError:
arg2_cache[arg1] = value = func(arg1, arg2)
return value
return wrapper
def init_once(func):
"""
Implements common behaviour
def some_param(self):
if self._some_param is None:
self._some_param = ... # init code
return self._some_param
"""
func_name = func.__name__
result_member = '__result_' + func_name
@wraps(func)
def wrapper(arg):
self = arg
try:
return getattr(self, result_member)
except AttributeError:
res = func(self)
# only save the result if `func` is an instance or class method
if hasattr(arg, func_name):
setattr(self, result_member, res)
return res
return wrapper
| StarcoderdataPython |
6630824 | <reponame>listuser/jc<filename>tests/test_df.py
import os
import json
import unittest
import jc.parsers.df
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df.out'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df-h.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df-h.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df-h.out'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df-h.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/df-long-filesystem.out'), 'r', encoding='utf-8') as f:
self.generic_df_long_filesystem = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df.json'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df-h.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df-h.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df-h.json'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df-h.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/df-long-filesystem.json'), 'r', encoding='utf-8') as f:
self.generic_df_long_filesystem_json = json.loads(f.read())
def test_df_nodata(self):
"""
Test plain 'df' with no data
"""
self.assertEqual(jc.parsers.df.parse('', quiet=True), [])
def test_df_centos_7_7(self):
"""
Test plain 'df' on Centos 7.7
"""
self.assertEqual(jc.parsers.df.parse(self.centos_7_7_df, quiet=True), self.centos_7_7_df_json)
def test_df_ubuntu_18_4(self):
"""
Test plain 'df' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.df.parse(self.ubuntu_18_4_df, quiet=True), self.ubuntu_18_4_df_json)
def test_df_osx_10_11_6(self):
"""
Test plain 'df' on OSX 10.11.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_11_6_df, quiet=True), self.osx_10_11_6_df_json)
def test_df_osx_10_14_6(self):
"""
Test plain 'df' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_14_6_df, quiet=True), self.osx_10_14_6_df_json)
def test_df_h_centos_7_7(self):
"""
Test 'df -h' on Centos 7.7
"""
self.assertEqual(jc.parsers.df.parse(self.centos_7_7_df_h, quiet=True), self.centos_7_7_df_h_json)
def test_df_h_ubuntu_18_4(self):
"""
Test 'df -h' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.df.parse(self.ubuntu_18_4_df_h, quiet=True), self.ubuntu_18_4_df_h_json)
def test_df_h_osx_10_11_6(self):
"""
Test 'df -h' on OSX 10.11.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_11_6_df_h, quiet=True), self.osx_10_11_6_df_h_json)
def test_df_h_osx_10_14_6(self):
"""
Test 'df -h' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_14_6_df_h, quiet=True), self.osx_10_14_6_df_h_json)
def test_df_long_filesystem(self):
"""
Test older version of 'df' with long filesystem data
"""
self.assertEqual(jc.parsers.df.parse(self.generic_df_long_filesystem, quiet=True), self.generic_df_long_filesystem_json)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8070119 | """Testing parser.py module."""
from pyclip2org import parser
from pyclip2org.parser import Book
from pyclip2org.parser import Clipping
def test_book_add_none_clipping() -> None:
"""Add none as clipping."""
book = Book("Test", "Author")
book.add_clipping(None)
assert len(book.clippings) == 0
assert str(book) == "Title:Test\tAuthor:Author\tClippings:0"
def test_clipping_head() -> None:
"""Add none as clipping."""
clipping = Clipping(
"Test",
"Author",
"- Mi nota Posición 128 | Añadido el miércoles 6 de febrero de 2013, 13:00:19",
"Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?",
)
assert (
clipping.get_header()
== "** Mi nota Posición 128 | Añadido el miércoles 6 de febrero de 2013, 13:00:19"
)
assert (
clipping.get_wrap_content()
== "Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?"
)
def test_parse_bad_clipping_size() -> None:
"""Testing a bad clipping format."""
raw_string = """Book 2 (Spanish Edition) (Author 2)
- Your Bookmark on Location 1012 | Added on Saturday, February 9, 2013 10:40:33"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is None
def test_parse_bad_clipping_author() -> None:
"""Testing a bad clipping format."""
raw_string = """Book 2
- Your Highlight on Location 1012 | Added on Saturday, February 9, 2013 10:40:33
Testing"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is None
def test_parse_bad_clipping_title() -> None:
"""Testing a bad clipping format."""
raw_string = """(Book 2)
- Your Highlight on Location 1012 | Added on Saturday, February 9, 2013 10:40:33
Testing"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is None
def test_parse_multiple_books_es() -> None:
"""Testing multiple clippings in Spanish."""
raw_data = """Book 1 (Author 1)
- Mi nota Posición 128 | Añadido el miércoles 6 de febrero de 2013, 13:00:19
Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?
==========
Book 1 (Author 1)
- Tu subrayado en la página 23 | Posición 2015-2065 | Añadido el lunes, 4 de \\
agosto de 2014 19:52:23
Delectus hic ipsam iure quae exercitationem distinctio voluptatem.
==========
Book 2 (Spanish Edition) (Author 2)
- Mi marcador Posición 1012 | Añadido el sábado 9 de febrero de 2013, 10:40:33
=========="""
library = parser.parser_my_clippings_data(raw_data, "es")
assert len(library) == 2
assert len(library[0].get_highlights()) == 1
assert len(library[0].get_notes()) == 1
assert len(library[0].get_marks()) == 0
assert len(library[1].get_highlights()) == 0
assert len(library[1].get_notes()) == 0
assert len(library[1].get_marks()) == 1
def test_parse_single_note_es() -> None:
"""Testing single note in Spanish."""
raw_string = """Book 1 (Author 1)
- Mi nota Posición 128 | Añadido el miércoles 6 de febrero de 2013, 13:00:19
Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is not None
assert clipping.title == "Book 1"
assert clipping.author == "Author 1"
assert (
clipping.metadata
== "- Mi nota Posición 128 | Añadido el miércoles 6 de febrero de 2013, 13:00:19"
)
assert (
clipping.content
== "Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?"
)
clipping.parse_metadata("es")
assert clipping.position_start == 128
assert clipping.type == "NOTE"
def test_parse_single_highlight_es() -> None:
"""Testing single highlight in spanish."""
raw_string = """Book 2 (Spanish Edition) (Author 2)
- Tu subrayado en la página 23 | Posición 2015-2065 | Añadido el lunes, 4 de agosto de 2014 19:52:23
Omnis animi sunt praesentium beatae fugiat, sequi hic debitis deleniti eum, ad eaque dignissimos"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is not None
assert clipping.title == "Book 2 (Spanish Edition)"
assert clipping.author == "Author 2"
assert (
clipping.metadata
== "- Tu subrayado en la página 23 | Posición 2015-2065 | Añadido el lunes, 4 de agosto de 2014 19:52:23"
)
assert (
clipping.content
== "Omnis animi sunt praesentium beatae fugiat, sequi hic debitis deleniti eum, ad eaque dignissimos"
)
clipping.parse_metadata("es")
assert clipping.position_start == 2015
assert clipping.position_end == 2065
assert clipping.page == 23
assert clipping.type == "HIGHLIGHT"
def test_parse_single_mark_es() -> None:
"""Testing single mark in Spanish."""
raw_string = """Book 2 (Spanish Edition) (Author 2)
- Mi marcador Posición 1012 | Añadido el sábado 9 de febrero de 2013, 10:40:33
"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is not None
assert clipping.title == "Book 2 (Spanish Edition)"
assert clipping.author == "Author 2"
assert (
clipping.metadata
== "- Mi marcador Posición 1012 | Añadido el sábado 9 de febrero de 2013, 10:40:33"
)
assert clipping.content == ""
clipping.parse_metadata("es")
assert clipping.position_start == 1012
assert clipping.type == "MARK"
def test_parse_multiple_books_en() -> None:
"""Testing multiples clippings in English."""
raw_data = """Book 1 (Author 1)
- Your note on Location 128 | Added on Wednesday, February 6, 2013 13:00:19
Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?
==========
Book 1 (Author 1)
- Your Highlight on Page 73 | Location 1111-1111 | Added on Saturday, November 15, 2014 11:00:28
Delectus hic ipsam iure quae exercitationem distinctio voluptatem autem aliquam assumenda reiciendis.
==========
Book 2 (Spanish Edition) (Author 2)
- Your Bookmark on Location 1012 | Added on Saturday, February 9, 2013 10:40:33
=========="""
library = parser.parser_my_clippings_data(raw_data, "en")
assert len(library) == 2
assert len(library[0].get_highlights()) == 1
assert len(library[0].get_notes()) == 1
assert len(library[0].get_marks()) == 0
assert len(library[1].get_highlights()) == 0
assert len(library[1].get_notes()) == 0
assert len(library[1].get_marks()) == 1
def test_parse_multiple_books_wrong_language() -> None:
"""Testing multiples clippings in English."""
raw_data = """Book 1 (Author 1)
- Your note on Location 128 | Added on Wednesday, February 6, 2013 13:00:19
Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?
==========
Book 1 (Author 1)
- Your Highlight on Page 73 | Location 1111-1111 | Added on Saturday, November 15, 2014 11:00:28
Delectus hic ipsam iure quae exercitationem distinctio voluptatem autem aliquam assumenda reiciendis.
==========
Book 2 (Spanish Edition) (Author 2)
- Your Bookmark on Location 1012 | Added on Saturday, February 9, 2013 10:40:33
=========="""
library = parser.parser_my_clippings_data(raw_data, "es")
assert len(library) == 2
assert len(library[0].get_highlights()) == 0
assert len(library[0].get_notes()) == 0
assert len(library[0].get_marks()) == 0
assert len(library[1].get_highlights()) == 0
assert len(library[1].get_notes()) == 0
assert len(library[1].get_marks()) == 0
def test_parse_single_note_en() -> None:
"""Testing a single note in English."""
raw_string = """Book 1 (Author 1)
- Your note on Location 128 | Added on Wednesday, February 6, 2013 13:00:19
Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is not None
assert clipping.title == "Book 1"
assert clipping.author == "Author 1"
assert (
clipping.metadata
== "- Your note on Location 128 | Added on Wednesday, February 6, 2013 13:00:19"
)
assert (
clipping.content
== "Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?"
)
clipping.parse_metadata("en")
assert clipping.position_start == 128
assert clipping.type == "NOTE"
def test_parse_single_highlight_en() -> None:
"""Testing a single highlight in English."""
raw_string = """Book 2 (Spanish Edition) (Author 2)
- Your Highlight on Page 104 | Location 1581-1586 | Added on Thuesday, February 7, 2013 15:54:12
Omnis animi sunt praesentium beatae fugiat, sequi hic debitis deleniti eum, ad eaque dignissimos"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is not None
assert clipping.title == "Book 2 (Spanish Edition)"
assert clipping.author == "Author 2"
assert (
clipping.metadata
== "- Your Highlight on Page 104 | Location 1581-1586 | Added on Thuesday, February 7, 2013 15:54:12"
)
assert (
clipping.content
== "Omnis animi sunt praesentium beatae fugiat, sequi hic debitis deleniti eum, ad eaque dignissimos"
)
clipping.parse_metadata("en")
assert clipping.position_start == 1581
assert clipping.position_end == 1586
assert clipping.page == 104
assert clipping.type == "HIGHLIGHT"
def test_parse_single_mark_en() -> None:
"""Testing a single bookmark in English."""
raw_string = """Book 2 (Spanish Edition) (Author 2)
- Your Bookmark on Location 1012 | Added on Saturday, February 9, 2013 10:40:33
"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is not None
assert clipping.title == "Book 2 (Spanish Edition)"
assert clipping.author == "Author 2"
assert (
clipping.metadata
== "- Your Bookmark on Location 1012 | Added on Saturday, February 9, 2013 10:40:33"
)
assert clipping.content == ""
clipping.parse_metadata("en")
assert clipping.position_start == 1012
assert clipping.type == "MARK"
def test_parse_single_note_en_incomplete() -> None:
"""Testing a single note in English."""
raw_string = """Book 1 (Author 1)
- Your note on
Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is not None
assert clipping.title == "Book 1"
assert clipping.author == "Author 1"
assert clipping.metadata == "- Your note on"
assert (
clipping.content
== "Magni iste minus vitae, laudantium vero laborum obcaecati recusandae ipsum?"
)
clipping.parse_metadata("en")
assert clipping.position_start == -1
assert clipping.type == "NOTE"
def test_parse_single_highlight_en_incomplete() -> None:
"""Testing a single highlight in English."""
raw_string = """Book 2 (Spanish Edition) (Author 2)
- Your Highlight on
Omnis animi sunt praesentium beatae fugiat, sequi hic debitis deleniti eum, ad eaque dignissimos"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is not None
assert clipping.title == "Book 2 (Spanish Edition)"
assert clipping.author == "Author 2"
assert clipping.metadata == "- Your Highlight on"
assert (
clipping.content
== "Omnis animi sunt praesentium beatae fugiat, sequi hic debitis deleniti eum, ad eaque dignissimos"
)
clipping.parse_metadata("en")
assert clipping.position_start == -1
assert clipping.position_end == -1
assert clipping.page == -1
assert clipping.type == "HIGHLIGHT"
def test_parse_single_mark_en_incomplete() -> None:
"""Testing a single bookmark in English."""
raw_string = """Book 2 (Spanish Edition) (Author 2)
- Your Bookmark on
"""
clipping = Clipping.parse_single_highlight(raw_string)
assert clipping is not None
assert clipping.title == "Book 2 (Spanish Edition)"
assert clipping.author == "Author 2"
assert clipping.metadata == "- Your Bookmark on"
assert clipping.content == ""
clipping.parse_metadata("en")
assert clipping.position_start == -1
assert clipping.type == "MARK"
| StarcoderdataPython |
12864244 | <reponame>ritwik12/lightwood
import time
import copy
import random
import logging
from functools import partial
import numpy as np
import torch
from torch.utils.data import DataLoader
from transformers import DistilBertModel, DistilBertForSequenceClassification, DistilBertTokenizer, AlbertModel, AlbertForSequenceClassification, DistilBertTokenizer, AlbertTokenizer, AdamW, get_linear_schedule_with_warmup
from lightwood.config.config import CONFIG
from lightwood.constants.lightwood import COLUMN_DATA_TYPES, ENCODER_AIM
from lightwood.mixers.helpers.default_net import DefaultNet
from lightwood.mixers.helpers.ranger import Ranger
from lightwood.mixers.helpers.shapes import *
from lightwood.mixers.helpers.transformer import Transformer
from lightwood.api.gym import Gym
class DistilBertEncoder:
def __init__(self, is_target=False, aim=ENCODER_AIM.BALANCE):
self.name = 'Text Transformer Encoder'
self._tokenizer = None
self._model = None
self._pad_id = None
self._pytorch_wrapper = torch.FloatTensor
self._max_len = None
self._max_ele = None
self._prepared = False
self._model_type = None
self.desired_error = 0.01
self.max_training_time = CONFIG.MAX_ENCODER_TRAINING_TIME
self._head = None
# Possible: speed, balance, accuracy
self.aim = aim
if self.aim == ENCODER_AIM.SPEED:
# uses more memory, takes very long to train and outputs weird debugging statements to the command line, consider waiting until it gets better or try to investigate why this happens (changing the pretrained model doesn't seem to help)
self._classifier_model_class = AlbertForSequenceClassification
self._embeddings_model_class = AlbertModel
self._tokenizer_class = AlbertTokenizer
self._pretrained_model_name = 'albert-base-v2'
self._model_max_len = 768
if self.aim == ENCODER_AIM.BALANCE:
self._classifier_model_class = DistilBertForSequenceClassification
self._embeddings_model_class = DistilBertModel
self._tokenizer_class = DistilBertTokenizer
self._pretrained_model_name = 'distilbert-base-uncased'
self._model_max_len = 768
if self.aim == ENCODER_AIM.ACCURACY:
self._classifier_model_class = DistilBertForSequenceClassification
self._embeddings_model_class = DistilBertModel
self._tokenizer_class = DistilBertTokenizer
self._pretrained_model_name = 'distilbert-base-uncased'
self._model_max_len = 768
device_str = "cuda" if CONFIG.USE_CUDA else "cpu"
if CONFIG.USE_DEVICE is not None:
device_str = CONFIG.USE_DEVICE
self.device = torch.device(device_str)
def _train_callback(self, error, real_buff, predicted_buff):
logging.info(f'{self.name} reached a loss of {error} while training !')
@staticmethod
def categorical_train_function(model, data, gym, test=False):
input, real = data
input = input.to(gym.device)
labels = torch.tensor([torch.argmax(x) for x in real]).to(gym.device)
outputs = gym.model(input, labels=labels)
loss, logits = outputs[:2]
if not test:
loss.backward()
gym.optimizer.step()
gym.scheduler.step()
gym.optimizer.zero_grad()
return loss
@staticmethod
def numerical_train_function(model, data, gym, backbone, test=False):
input, real = data
input = input.to(gym.device)
real = real.to(gym.device)
embeddings = backbone(input)[0][:,0,:]
outputs = gym.model(embeddings)
loss = gym.loss_criterion(outputs, real)
if not test:
loss.backward()
gym.optimizer.step()
gym.scheduler.step()
gym.optimizer.zero_grad()
return loss
def prepare_encoder(self, priming_data, training_data=None):
if self._prepared:
raise Exception('You can only call "prepare_encoder" once for a given encoder.')
priming_data = [x if x is not None else '' for x in priming_data]
self._max_len = min(max([len(x) for x in priming_data]),self._model_max_len)
self._tokenizer = self._tokenizer_class.from_pretrained(self._pretrained_model_name)
self._pad_id = self._tokenizer.convert_tokens_to_ids([self._tokenizer.pad_token])[0]
# @TODO: Support multiple targets if they are all categorical or train for the categorical target if it's a mix (maybe ?)
# @TODO: Attach a language modeling head and/or use GPT2 and/or provide outputs better suited to a LM head (which will be the mixer) if the output if text
if training_data is not None and 'targets' in training_data and len(training_data['targets']) ==1 and training_data['targets'][0]['output_type'] == COLUMN_DATA_TYPES.CATEGORICAL and CONFIG.TRAIN_TO_PREDICT_TARGET:
self._model_type = 'classifier'
self._model = self._classifier_model_class.from_pretrained(self._pretrained_model_name, num_labels=len(set(training_data['targets'][0]['unencoded_output'])) + 1).to(self.device)
batch_size = 10
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in self._model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.000001},
{'params': [p for n, p in self._model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=10, num_training_steps=len(priming_data) * 15/20)
gym = Gym(model=self._model, optimizer=optimizer, scheduler=scheduler, loss_criterion=None, device=self.device, name=self.name)
input = [self._tokenizer.encode(x[:self._max_len], add_special_tokens=True) for x in priming_data]
tokenized_max_len = max([len(x) for x in input])
input = torch.tensor([x + [self._pad_id] * (tokenized_max_len - len(x)) for x in input])
real = training_data['targets'][0]['encoded_output']
merged_data = list(zip(input,real))
train_data_loader = DataLoader(merged_data[:int(len(merged_data)*9/10)], batch_size=batch_size, shuffle=True)
test_data_loader = DataLoader(merged_data[int(len(merged_data)*9/10):], batch_size=batch_size, shuffle=True)
best_model, error, training_time = gym.fit(train_data_loader=train_data_loader, test_data_loader=test_data_loader, desired_error=self.desired_error, max_time=self.max_training_time, callback=self._train_callback, eval_every_x_epochs=1, max_unimproving_models=10, custom_train_func=partial(self.categorical_train_function,test=False), custom_test_func=partial(self.categorical_train_function,test=True))
self._model = best_model.to(self.device)
elif all([x['output_type'] == COLUMN_DATA_TYPES.NUMERIC or x['output_type'] == COLUMN_DATA_TYPES.CATEGORICAL for x in training_data['targets']]) and CONFIG.TRAIN_TO_PREDICT_TARGET:
self.desired_error = 0.01
self._model_type = 'generic_target_predictor'
self._model = self._embeddings_model_class.from_pretrained(self._pretrained_model_name).to(self.device)
batch_size = 10
self._head = DefaultNet(ds=None, dynamic_parameters={},shape=funnel(768, sum( [ len(x['encoded_output'][0]) for x in training_data['targets'] ] ), depth=5), selfaware=False)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in self._head.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.000001},
{'params': [p for n, p in self._head.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8)
#optimizer = Ranger(self._head.parameters(),lr=5e-5)
# num_training_steps is kind of an estimation
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=10, num_training_steps=len(priming_data) * 15/20)
criterion = torch.nn.MSELoss()
gym = Gym(model=self._head, optimizer=optimizer, scheduler=scheduler, loss_criterion=criterion, device=self.device, name=self.name)
input = [self._tokenizer.encode(x[:self._max_len], add_special_tokens=True) for x in priming_data]
tokenized_max_len = max([len(x) for x in input])
input = torch.tensor([x + [self._pad_id] * (tokenized_max_len - len(x)) for x in input])
real = [[]] * len(training_data['targets'][0]['encoded_output'])
for i in range(len(real)):
for target in training_data['targets']:
real[i] = real[i] + target['encoded_output'][i]
real = torch.tensor(real)
merged_data = list(zip(input,real))
train_data_loader = DataLoader(merged_data[:int(len(merged_data)*9/10)], batch_size=batch_size, shuffle=True)
test_data_loader = DataLoader(merged_data[int(len(merged_data)*9/10):], batch_size=batch_size, shuffle=True)
self._model.eval()
best_model, error, training_time = gym.fit(train_data_loader=train_data_loader, test_data_loader=test_data_loader, desired_error=self.desired_error, max_time=self.max_training_time, callback=self._train_callback, eval_every_x_epochs=1, max_unimproving_models=10, custom_train_func=partial(self.numerical_train_function, backbone=self._model, test=False), custom_test_func=partial(self.numerical_train_function, backbone=self._model, test=True))
self._head = best_model.to(self.device)
else:
self._model_type = 'embeddings_generator'
self._model = self._embeddings_model_class.from_pretrained(self._pretrained_model_name).to(self.device)
self._prepared = True
def encode(self, column_data):
encoded_representation = []
self._model.eval()
with torch.no_grad():
for text in column_data:
if text is None:
text = ''
input = torch.tensor(self._tokenizer.encode(text[:self._max_len], add_special_tokens=True)).to(self.device).unsqueeze(0)
if self._model_type == 'generic_target_predictor':
embeddings = self._model(input)
output = self._head(embeddings[0][:,0,:])
encoded_representation.append(output.tolist()[0])
elif self._model_type == 'classifier':
output = self._model(input)
logits = output[0]
predicted_targets = logits[0].tolist()
encoded_representation.append(predicted_targets)
else:
output = self._model(input)
embeddings = output[0][:,0,:].cpu().numpy()[0]
encoded_representation.append(embeddings)
return self._pytorch_wrapper(encoded_representation)
def decode(self, encoded_values_tensor, max_length = 100):
# When test is an output... a bit trickier to handle this case, thinking on it
pass
if __name__ == "__main__":
# Generate some tests data
import random
from sklearn.metrics import r2_score
import logging
from lightwood.encoders.numeric import NumericEncoder
logging.basicConfig(level=logging.DEBUG)
random.seed(2)
priming_data = []
primting_target = []
test_data = []
test_target = []
for i in range(0,300):
if random.randint(1,5) == 3:
test_data.append(str(i) + ''.join(['n'] * i))
#test_data.append(str(i))
test_target.append(i)
#else:
priming_data.append(str(i) + ''.join(['n'] * i))
#priming_data.append(str(i))
primting_target.append(i)
output_1_encoder = NumericEncoder()
output_1_encoder.prepare_encoder(primting_target)
encoded_data_1 = output_1_encoder.encode(primting_target)
encoded_data_1 = encoded_data_1.tolist()
enc = DistilBertEncoder()
enc.prepare_encoder(priming_data, training_data={'targets': [{'output_type': COLUMN_DATA_TYPES.NUMERIC, 'encoded_output': encoded_data_1}, {'output_type': COLUMN_DATA_TYPES.NUMERIC, 'encoded_output': encoded_data_1}]})
encoded_predicted_target = enc.encode(test_data).tolist()
predicted_targets_1 = output_1_encoder.decode(torch.tensor([x[:4] for x in encoded_predicted_target]))
predicted_targets_2 = output_1_encoder.decode(torch.tensor([x[4:] for x in encoded_predicted_target]))
for predicted_targets in [predicted_targets_1, predicted_targets_2]:
real = list(test_target)
pred = list(predicted_targets)
# handle nan
for i in range(len(pred)):
try:
float(pred[i])
except:
pred[i] = 0
print(real[0:25], '\n', pred[0:25])
encoder_accuracy = r2_score(real, pred)
print(f'Categorial encoder accuracy for: {encoder_accuracy} on testing dataset')
#assert(encoder_accuracy > 0.5)
| StarcoderdataPython |
6426559 | <filename>main.py
from QQ_tracer import QQTracer
if __name__ == '__main__':
QQTracer().run()
| StarcoderdataPython |
3472830 | from setuptools import find_packages, setup
import shutil
import os
from os import path
from typing import List
import glob
def get_model_zoo_configs() -> List[str]:
"""
Return a list of configs to include in package for model zoo. Copy over these configs inside
centermask/model_zoo.
"""
# Use absolute paths while symlinking.
source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs")
destination = path.join(
path.dirname(path.realpath(__file__)), "centermask", "model_zoo", "configs"
)
# Symlink the config directory inside package to have a cleaner pip install.
# Remove stale symlink/directory from a previous build.
if path.exists(source_configs_dir):
if path.islink(destination):
os.unlink(destination)
elif path.isdir(destination):
shutil.rmtree(destination)
if not path.exists(destination):
try:
os.symlink(source_configs_dir, destination)
except OSError:
# Fall back to copying if symlink fails: ex. on Windows.
shutil.copytree(source_configs_dir, destination)
config_paths = glob.glob("configs/**/*.yaml", recursive=True)
return config_paths
setup(
name='centermask',
version='2.0',
description='CenterMask2 is an upgraded implementation on top of detectron2 beyond original CenterMask based on maskrcnn-benchmark',
author='build by MiXaiLL76',
author_email='https://github.com/youngwanLEE/centermask2',
packages=find_packages(exclude=("configs", "tests*")),
package_data={"centermask.model_zoo": get_model_zoo_configs()},
install_requires=['detectron2'], #external packages as dependencies
) | StarcoderdataPython |
6577707 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ['MODEL_DIR_NAME', 'PATH_TO_MODEL_DIR']
__version__ = '0.0.1'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
def get_version():
return __version__
from .config import MODEL_DIR_NAME, PATH_TO_MODEL_DIR
| StarcoderdataPython |
165462 | <filename>rdflib/tools/__init__.py
"""
Various commandline tools for working with RDFLib
"""
| StarcoderdataPython |
3584143 | <reponame>stivenramireza/spotify-sync-app
from src.data_access import (
get_user_saved_tracks,
save_tracks_for_user,
get_user_recently_played_tracks,
get_album_tracks,
start_user_playback,
get_user_playlists
)
from src.logger import logger
import pandas as pd
import time
def filter_user_saved_tracks(oauth_token: str) -> list:
try:
current_tracks = [None]
saved_tracks = []
offset = 0
while len(current_tracks) != 0:
data = get_user_saved_tracks(oauth_token, offset)
df = pd.DataFrame(data)
df = df.filter(['items'])
df = pd.json_normalize(df.to_dict('records'))
df = df.filter(['items.track.id'])
current_tracks = [track for track in df.to_dict('records')]
for saved_track in current_tracks:
saved_tracks.append(saved_track)
offset += 50
logger.info('User saved tracks have been filtered successfully')
return saved_tracks
except Exception as error:
logger.error(f'Error to filter user saved tracks: {error}')
raise
def filter_tracks_for_user(oauth_token_1: str, oauth_token_2: str) -> None:
try:
saved_tracks = filter_user_saved_tracks(oauth_token_1)
df_saved_tracks = pd.DataFrame(saved_tracks)
saved_tracks_list = df_saved_tracks['items.track.id'].tolist()
offset = 0
while offset < len(saved_tracks_list):
ids_list = [track for track in saved_tracks_list[offset:offset + 50]]
ids = ",".join(ids_list)
save_tracks_for_user(oauth_token_2, ids)
offset += 50
logger.info(f'Tracks for user have been saved successfully')
except Exception as error:
logger.error(f'Error to filter tracks for user: {error}')
raise
def filter_user_recently_played_tracks(oauth_token: str) -> list:
try:
data = get_user_recently_played_tracks(oauth_token)
df = pd.DataFrame(data)
df = df.filter(['track'])
df = pd.json_normalize(df.to_dict('records'))
df = df.filter(['track.album.id', 'track.id', 'track.name'])
df.rename(columns={'track.album.id': 'album_id', 'track.id': 'track_id', 'track.name': 'track_name'}, inplace=True)
logger.info('Recently played tracks have been filtered successfully')
return [track for track in df.to_dict('records')]
except Exception as error:
logger.error(f'Error to filter recently played tracks: {error}')
raise
def filter_album_tracks(oauth_token: str, recent_tracks: list) -> list:
try:
tracks = []
for track in recent_tracks:
album_tracks = get_album_tracks(oauth_token, track['album_id'])
df_album_tracks = pd.DataFrame(album_tracks)
df_album_tracks = df_album_tracks.filter(['items'])
df_album_tracks = pd.json_normalize(df_album_tracks.to_dict('records'))
df_album_tracks = df_album_tracks.filter(['items.id', 'items.track_number'])
df_album_tracks.rename(columns={'items.id': 'track_id', 'items.track_number': 'track_number'}, inplace=True)
df_album_tracks['track_number'] = df_album_tracks['track_number'] - 1
current_track = pd.merge(pd.DataFrame(recent_tracks), df_album_tracks, on='track_id', how='inner').to_dict('records')[0]
tracks.append(current_track)
logger.info('Album tracks have been filtered successfully')
return tracks
except Exception as error:
logger.error(f'Error to filter album tracks: {error}')
raise
def filter_user_playback(oauth_token: str, recent_tracks: list) -> None:
try:
for recent_track in reversed(recent_tracks):
status_code = start_user_playback(oauth_token, recent_track['album_id'], recent_track['track_number'])
if status_code == 204:
time.sleep(7)
logger.info(f'Recent tracks have been played successfully')
except Exception as error:
logger.error(f'Error to filter user playback: {error}')
raise
def filter_user_playlists(oauth_token: str) -> list:
try:
current_playlists = [None]
saved_playlists = []
offset = 0
while len(current_playlists) != 0:
data = get_user_playlists(oauth_token, offset)
df = pd.DataFrame(data)
df = df.filter(['items'])
df = pd.json_normalize(df.to_dict('records'))
df = df.filter(['items.id'])
current_playlists = [playlist for playlist in df.to_dict('records')]
for saved_playlist in current_playlists:
saved_playlists.append(saved_playlist)
offset += 50
logger.info('User playlists have been filtered successfully')
return saved_playlists
except Exception as error:
logger.error(f'Error to filter user playlists: {error}')
raise
def filter_playlists_for_user(oauth_token_1: str, oauth_token_2: str) -> None:
try:
saved_playlists = filter_user_playlists(oauth_token_1)
df_saved_playlists = pd.DataFrame(saved_playlists)
saved_playlists_list = df_saved_playlists['items.id'].tolist()
for playlist_id in saved_playlists_list:
status_code = follow_playlist(oauth_token_2, playlist_id)
if status_code == 204:
time.sleep(2)
logger.info(f'Playlists for user have been followed successfully')
except Exception as error:
logger.info(f'Error to filter playlists for user: {error}')
raise
| StarcoderdataPython |
1895756 | import os
import sys
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def setup(url):
global driver, orig_url
orig_url = url
delay = float(os.getenv("USECASE_REPLAY_DELAY", "0"))
options = webdriver.ChromeOptions()
if delay:
options.add_argument("--start-maximized")
else:
options.add_argument("headless")
d = options.to_capabilities()
d['loggingPrefs'] = {"browser": "ALL"}
driver = webdriver.Chrome(desired_capabilities=d)
driver.get(url)
def print_html_page(page_name):
html = driver.page_source
with open(f"{page_name}.html", "w", encoding="utf-8") as f:
f.write(html)
def wait_until(condition):
try:
return WebDriverWait(driver, 30).until(condition)
except Exception as e:
sys.stderr.write(f"Timed out {repr(driver)}\n")
driver.quit()
raise
def select_garden_size(size):
garden_selector = Select(driver.find_element_by_id("select-garden-size"))
garden_selector.select_by_visible_text(size)
def submit_garden_quizz():
submit_button = driver.find_element_by_id("submit-garden-quizz")
submit_button.click()
def select_flowers(flowers):
for flower in flowers:
checkbox_selector = driver.find_element_by_id(f"checkbox_{flower}")
checkbox_selector.click()
def wait_for_garden_quizz_response():
WebDriverWait(driver, 10).until_not(
EC.text_to_be_present_in_element((By.ID, "garden_advice"), "Loading...")
)
def close():
driver.quit()
| StarcoderdataPython |
105614 | <filename>tests/basic_deployment.py<gh_stars>0
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import swiftclient
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG,
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
class SwiftStorageBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic swift-storage deployment."""
def __init__(self, series, openstack=None, source=None, stable=False):
"""Deploy the entire test environment."""
super(SwiftStorageBasicDeployment, self).__init__(series, openstack,
source, stable)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
u.log.info('Waiting on extended status checks...')
exclude_services = []
# Wait for deployment ready msgs, except exclusions
self._auto_wait_for_status(exclude_services=exclude_services)
self.d.sentry.wait()
self._initialize_tests()
def _add_services(self):
"""Add services
Add the services that we're testing, where swift-storage is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'swift-storage'}
other_services = [
{'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},
{'name': 'keystone'},
{'name': 'glance'},
{'name': 'swift-proxy'}
]
super(SwiftStorageBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'keystone:shared-db': 'percona-cluster:shared-db',
'swift-proxy:identity-service': 'keystone:identity-service',
'swift-storage:swift-storage': 'swift-proxy:swift-storage',
'glance:identity-service': 'keystone:identity-service',
'glance:shared-db': 'percona-cluster:shared-db',
'glance:object-store': 'swift-proxy:object-store'
}
super(SwiftStorageBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {
'admin-password': '<PASSWORD>',
'admin-token': '<PASSWORD>',
}
swift_proxy_config = {
'zone-assignment': 'manual',
'replicas': '1',
'swift-hash': 'fdfef9d4-8b06-11e2-8ac0-531c923c8fae',
}
swift_storage_config = {
'zone': '1',
'block-device': 'vdb',
'overwrite': 'true',
}
pxc_config = {
'dataset-size': '25%',
'max-connections': 1000,
'root-password': '<PASSWORD>',
'sst-password': '<PASSWORD>',
}
configs = {
'keystone': keystone_config,
'swift-proxy': swift_proxy_config,
'swift-storage': swift_storage_config,
'percona-cluster': pxc_config,
}
super(SwiftStorageBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.pxc_sentry = self.d.sentry['percona-cluster'][0]
self.keystone_sentry = self.d.sentry['keystone'][0]
self.glance_sentry = self.d.sentry['glance'][0]
self.swift_proxy_sentry = self.d.sentry['swift-proxy'][0]
self.swift_storage_sentry = self.d.sentry['swift-storage'][0]
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Authenticate admin with keystone
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='<PASSWORD>',
tenant='admin')
# Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(self.keystone)
# Authenticate swift user
keystone_relation = self.keystone_sentry.relation(
'identity-service', 'swift-proxy:identity-service')
ep = self.keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
self.swift = swiftclient.Connection(
authurl=ep,
user=keystone_relation['service_username'],
key=keystone_relation['service_password'],
tenant_name=keystone_relation['service_tenant'],
auth_version='2.0')
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user,
password='password',
tenant_id=tenant.id,
email='<EMAIL>')
# Authenticate demo user with keystone
self.keystone_demo = \
u.authenticate_keystone_user(self.keystone, user=self.demo_user,
password='password',
tenant=self.demo_tenant)
def test_100_services(self):
"""Verify the expected services are running on the corresponding
service units."""
u.log.debug('Checking system services...')
swift_storage_services = ['swift-account',
'swift-account-auditor',
'swift-account-reaper',
'swift-account-replicator',
'swift-container',
'swift-container-auditor',
'swift-container-replicator',
'swift-container-sync',
'swift-container-updater',
'swift-object',
'swift-object-auditor',
'swift-object-replicator',
'swift-object-updater']
service_names = {
self.keystone_sentry: ['keystone'],
self.glance_sentry: ['glance-registry',
'glance-api'],
self.swift_proxy_sentry: ['swift-proxy'],
self.swift_storage_sentry: swift_storage_services
}
if self._get_openstack_release() >= self.trusty_liberty:
service_names[self.keystone_sentry] = ['apache2']
ret = u.validate_services_by_name(service_names)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_102_users(self):
"""Verify all existing roles."""
u.log.debug('Checking keystone users...')
user1 = {'name': 'demoUser',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': '<EMAIL>'}
user2 = {'name': 'admin',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'}
user3 = {'name': 'glance',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': u'juju@localhost'}
user4 = {'name': 's3_swift',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': u'juju@localhost'}
expected = [user1, user2, user3, user4]
actual = self.keystone.users.list()
ret = u.validate_user_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_104_keystone_service_catalog(self):
"""Verify that the service catalog endpoint data is valid."""
u.log.debug('Checking keystone service catalog...')
endpoint_id = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url,
'id': u.not_null}
expected = {'image': [endpoint_id], 'object-store': [endpoint_id],
'identity': [endpoint_id], 's3': [endpoint_id]}
actual = self.keystone_demo.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_106_swift_object_store_endpoint(self):
"""Verify the swift object-store endpoint data."""
u.log.debug('Checking keystone endpoint for swift object store...')
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '8080'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
message = 'object-store endpoint: {}'.format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_200_swift_storage_swift_storage_relation(self):
"""Verify the swift-storage to swift-proxy swift-storage relation
data."""
u.log.debug('Checking swift:swift-proxy swift-storage relation...')
unit = self.swift_storage_sentry
relation = ['swift-storage', 'swift-proxy:swift-storage']
expected = {
'account_port': '6002',
'zone': '1',
'object_port': '6000',
'container_port': '6001',
'private-address': u.valid_ip,
'device': 'vdb'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('swift-storage swift-storage', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_202_swift_proxy_swift_storage_relation(self):
"""Verify the swift-proxy to swift-storage swift-storage relation
data."""
u.log.debug('Checking swift-proxy:swift swift-storage relation...')
unit = self.swift_proxy_sentry
relation = ['swift-storage', 'swift-storage:swift-storage']
expected = {
'private-address': u.valid_ip,
'trigger': u.not_null,
'rings_url': u.valid_url,
'swift_hash': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('swift-proxy swift-storage', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_300_swift_config(self):
"""Verify the data in the swift-hash section of the swift config
file."""
u.log.debug('Checking swift config...')
unit = self.swift_storage_sentry
conf = '/etc/swift/swift.conf'
swift_proxy_relation = self.swift_proxy_sentry.relation(
'swift-storage', 'swift-storage:swift-storage')
expected = {
'swift_hash_path_suffix': swift_proxy_relation['swift_hash']
}
ret = u.validate_config_data(unit, conf, 'swift-hash', expected)
if ret:
message = "swift config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_302_account_server_config(self):
"""Verify the data in the account server config file."""
u.log.debug('Checking swift account-server config...')
unit = self.swift_storage_sentry
conf = '/etc/swift/account-server.conf'
expected = {
'DEFAULT': {
'bind_ip': '0.0.0.0',
'bind_port': '6002',
'workers': '1'
},
'pipeline:main': {
'pipeline': 'recon account-server'
},
'filter:recon': {
'use': 'egg:swift#recon',
'recon_cache_path': '/var/cache/swift'
},
'app:account-server': {
'use': 'egg:swift#account'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "account server config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_304_container_server_config(self):
"""Verify the data in the container server config file."""
u.log.debug('Checking swift container-server config...')
unit = self.swift_storage_sentry
conf = '/etc/swift/container-server.conf'
expected = {
'DEFAULT': {
'bind_ip': '0.0.0.0',
'bind_port': '6001',
'workers': '1'
},
'pipeline:main': {
'pipeline': 'recon container-server'
},
'filter:recon': {
'use': 'egg:swift#recon',
'recon_cache_path': '/var/cache/swift'
},
'app:container-server': {
'use': 'egg:swift#container',
'allow_versions': 'true'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "container server config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_306_object_server_config(self):
"""Verify the data in the object server config file."""
u.log.debug('Checking swift object-server config...')
unit = self.swift_storage_sentry
conf = '/etc/swift/object-server.conf'
expected = {
'DEFAULT': {
'bind_ip': '0.0.0.0',
'bind_port': '6000',
'workers': '1'
},
'pipeline:main': {
'pipeline': 'recon object-server'
},
'filter:recon': {
'use': 'egg:swift#recon',
'recon_cache_path': '/var/cache/swift'
},
'app:object-server': {
'use': 'egg:swift#object',
'threads_per_disk': '4'
},
'object-replicator': {
'concurrency': '1'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "object server config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_swift_backed_image_create(self):
"""Create an instance in glance, which is backed by swift, and validate
that some of the metadata for the image match in glance and swift."""
u.log.debug('Checking swift objects and containers with a '
'swift-backed glance image...')
# Create swift-backed glance image
img_new = u.create_cirros_image(self.glance, "cirros-image-1")
img_id = img_new.id
img_md5 = img_new.checksum
img_size = img_new.size
# Validate that swift object's checksum/size match that from glance
headers, containers = self.swift.get_account()
if len(containers) != 1:
msg = "Expected 1 swift container, found {}".format(
len(containers))
amulet.raise_status(amulet.FAIL, msg=msg)
container_name = containers[0].get('name')
headers, objects = self.swift.get_container(container_name)
if len(objects) != 1:
msg = "Expected 1 swift object, found {}".format(len(objects))
amulet.raise_status(amulet.FAIL, msg=msg)
swift_object_size = objects[0].get('bytes')
swift_object_md5 = objects[0].get('hash')
if img_size != swift_object_size:
msg = "Glance image size {} != swift object size {}".format(
img_size, swift_object_size)
amulet.raise_status(amulet.FAIL, msg=msg)
if img_md5 != swift_object_md5:
msg = "Glance image hash {} != swift object hash {}".format(
img_md5, swift_object_md5)
amulet.raise_status(amulet.FAIL, msg=msg)
# Cleanup
u.delete_resource(self.glance.images, img_id, msg="glance image")
u.log.info('OK')
def test_900_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed."""
u.log.info('Checking that conf files and system services respond '
'to a charm config change...')
sentry = self.swift_storage_sentry
juju_service = 'swift-storage'
# Expected default and alternate values
set_default = {'object-server-threads-per-disk': '4'}
set_alternate = {'object-server-threads-per-disk': '2'}
# Config file affected by juju set config change, and
# services which are expected to restart upon config change
services = {'swift-object-server': 'object-server.conf',
'swift-object-auditor': 'object-server.conf',
'swift-object-replicator': 'object-server.conf',
'swift-object-updater': 'object-server.conf'}
# Make config change, check for service restarts
u.log.debug('Making config change on {}...'.format(juju_service))
mtime = u.get_sentry_time(sentry)
self.d.configure(juju_service, set_alternate)
sleep_time = 40
for s, conf_file in services.iteritems():
u.log.debug("Checking that service restarted: {}".format(s))
conf_file_abs = '/etc/swift/{}'.format(conf_file)
if not u.validate_service_config_changed(sentry, mtime, s,
conf_file_abs,
sleep_time=sleep_time,
pgrep_full=True):
self.d.configure(juju_service, set_default)
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
sleep_time = 0
self.d.configure(juju_service, set_default)
def _assert_services(self, should_run):
swift_storage_services = ['swift-account-auditor',
'swift-account-reaper',
'swift-account-replicator',
'swift-account-server',
'swift-container-auditor',
'swift-container-replicator',
'swift-container-server',
'swift-container-sync',
'swift-container-updater',
'swift-object-auditor',
'swift-object-replicator',
'swift-object-server',
'swift-object-updater']
u.get_unit_process_ids(
{self.swift_storage_sentry: swift_storage_services},
expect_success=should_run)
# No point using validate_unit_process_ids, since we don't
# care about how many PIDs, merely that they're running, so
# would populate expected with either True or False. This
# validation is already performed in get_process_id_list
def _test_pause(self):
u.log.info("Testing pause action")
self._assert_services(should_run=True)
pause_action_id = u.run_action(self.swift_storage_sentry, "pause")
assert u.wait_on_action(pause_action_id), "Pause action failed."
self._assert_services(should_run=False)
def _test_resume(self):
u.log.info("Testing resume action")
# service is left paused by _test_pause
self._assert_services(should_run=False)
resume_action_id = u.run_action(self.swift_storage_sentry, "resume")
assert u.wait_on_action(resume_action_id), "Resume action failed."
self._assert_services(should_run=True)
def test_910_pause_resume_actions(self):
"""Pause and then resume swift-storage."""
u.log.debug('Checking pause/resume actions...')
self._test_pause()
self._test_resume()
def test_920_no_restart_on_config_change_when_paused(self):
"""Verify that the specified services are not restarted when the config
is changed and the unit is paused."""
u.log.info('Checking that system services do not get restarted '
'when charm config changes but unit is paused...')
sentry = self.swift_storage_sentry
juju_service = 'swift-storage'
# Expected default and alternate values
set_default = {'object-server-threads-per-disk': '4'}
set_alternate = {'object-server-threads-per-disk': '2'}
services = ['swift-account-server',
'swift-account-auditor',
'swift-account-reaper',
'swift-account-replicator',
'swift-container-server',
'swift-container-auditor',
'swift-container-replicator',
'swift-container-updater',
'swift-object-server',
'swift-object-auditor',
'swift-object-replicator',
'swift-object-updater',
'swift-container-sync']
# Pause the unit
u.log.debug('Pausing the unit...')
pause_action_id = u.run_action(sentry, "pause")
assert u.wait_on_action(pause_action_id), "Pause action failed."
# Make config change, check for service restarts
u.log.debug('Making config change on {}...'.format(juju_service))
self.d.configure(juju_service, set_alternate)
for service in services:
u.log.debug("Checking that service didn't start while "
"paused: {}".format(service))
# No explicit assert because get_process_id_list will do it for us
u.get_process_id_list(
sentry, service, expect_success=False)
self.d.configure(juju_service, set_default)
resume_action_id = u.run_action(sentry, "resume")
assert u.wait_on_action(resume_action_id), "Resume action failed."
| StarcoderdataPython |
8030891 | <filename>IEEEXtreme 11/Aeneas' cryptographic disc (4th c. B.C.).py<gh_stars>0
import re, math
l=int(input())
alphabet={}
length=0
for i in range(1,27):
a=input().split(' ')
alphabet[a[0]]=float(a[1])
phrase=input().upper()
regex = re.compile('[^a-zA-Z]')
phrase=regex.sub('', phrase)
for i in range(1,len(phrase)):
si=math.sin(abs(math.radians(alphabet[phrase[i]]-alphabet[phrase[i-1]]))/2)
length+=2*l*abs(si)
print(math.ceil(length+l))
| StarcoderdataPython |
6524785 | <filename>Assignment2/RL/deep_q_network.py
#!/usr/bin/env python
from __future__ import print_function
import tensorflow as tf
import cv2
import sys
sys.path.append("game/")
import wrapped_flappy_bird as game
import random
import numpy as np
from collections import deque
# reference: https://github.com/yenchenlin/DeepLearningFlappyBird
GAME = 'bird' # the name of the game being played for log files
ACTIONS = 2 # number of valid actions
GAMMA = 0.99 # decay rate of past observations
OBSERVE = 10000. # timesteps to observe before training
EXPLORE = 2000000. # frames over which to anneal epsilon
FINAL_EPSILON = 0.0001 # final value of epsilon
INITIAL_EPSILON = 0.1 # starting value of epsilon
REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH = 32 # size of minibatch
FRAME_PER_ACTION = 1
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.01, shape = shape)
return tf.Variable(initial)
def conv2d(x, W, stride):
return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = "SAME")
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")
def createNetwork():
# network weights
W_conv1 = weight_variable([8, 8, 4, 32])
b_conv1 = bias_variable([32])
W_conv2 = weight_variable([4, 4, 32, 64])
b_conv2 = bias_variable([64])
W_conv3 = weight_variable([3, 3, 64, 64])
b_conv3 = bias_variable([64])
W_fc1 = weight_variable([1600, 512])
b_fc1 = bias_variable([512])
W_fc2 = weight_variable([512, ACTIONS])
b_fc2 = bias_variable([ACTIONS])
# input layer
s = tf.placeholder("float", [None, 80, 80, 4])
# hidden layers
h_conv1 = tf.nn.relu(conv2d(s, W_conv1, 4) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 2) + b_conv2)
#h_pool2 = max_pool_2x2(h_conv2)
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3)
#h_pool3 = max_pool_2x2(h_conv3)
#h_pool3_flat = tf.reshape(h_pool3, [-1, 256])
h_conv3_flat = tf.reshape(h_conv3, [-1, 1600])
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
# readout layer
readout = tf.matmul(h_fc1, W_fc2) + b_fc2
return s, readout, h_fc1
def trainNetwork(s, readout, h_fc1, sess):
# define the cost function
a = tf.placeholder("float", [None, ACTIONS])
y = tf.placeholder("float", [None])
readout_action = tf.reduce_sum(tf.multiply(readout, a), reduction_indices=1)
cost = tf.reduce_mean(tf.square(y - readout_action))
train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)
# open up a game state to communicate with emulator
game_state = game.GameState()
# store the previous observations in replay memory
D = deque()
# get the first state by doing nothing and preprocess the image to 80x80x4
do_nothing = np.zeros(ACTIONS)
do_nothing[0] = 1
x_t, r_0, terminal = game_state.frame_step(do_nothing)
x_t = cv2.cvtColor(cv2.resize(x_t, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, x_t = cv2.threshold(x_t,1,255,cv2.THRESH_BINARY)
s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)
# saving and loading networks
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
checkpoint = tf.train.get_checkpoint_state("saved_model") # if you have completed training once, the model is saved in this directory
# Note that if you interrupt the training phase and want to continue training after restoring from the saved model,
# only the model will be restored and the experience replay memory still is reinitialized, i.e., a null deque.
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("Successfully loaded:", checkpoint.model_checkpoint_path)
else:
print("Could not find saved model")
# start training
epsilon = INITIAL_EPSILON
t = 0
while 1:
# choose an epsilon-greedy action
readout_t = readout.eval(feed_dict={s : [s_t]})[0]
a_t = np.zeros([ACTIONS])
action_index = 0
# to complete the e-greedy action selection phase
if t % FRAME_PER_ACTION == 0:
if random.random() <= epsilon:
action_index = random.randrange(ACTIONS)
a_t[action_index] = 1
else:
action_index = np.argmax(readout_t)
a_t[action_index] = 1
else:
a_t[0] = 1
# scale down epsilon
if epsilon > FINAL_EPSILON and t > OBSERVE:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
# run the selected action and observe next state and reward
x_t1_colored, r_t, terminal = game_state.frame_step(a_t)
x_t1 = cv2.cvtColor(cv2.resize(x_t1_colored, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, x_t1 = cv2.threshold(x_t1, 1, 255, cv2.THRESH_BINARY)
x_t1 = np.reshape(x_t1, (80, 80, 1))
s_t1 = np.append(x_t1, s_t[:, :, :3], axis=2) # frame stack step
# store the transition in D
D.append((s_t, a_t, r_t, s_t1, terminal))
if len(D) > REPLAY_MEMORY:
D.popleft()
# begin training when observing phase is over
if t > OBSERVE:
# sample a minibatch to train on
minibatch = random.sample(D, BATCH)
# get the corresponding batch variables and perform q learning
s_j_batch, a_batch, r_batch, s_j1_batch = [], [], [], []
for d in minibatch:
s_j_batch.append(d[0])
a_batch.append(d[1])
r_batch.append(d[2])
s_j1_batch.append(d[3])
y_batch = []
readout_s_j1 = readout.eval(feed_dict = {s : s_j1_batch})
for i in range(len(minibatch)):
terminal = minibatch[i][4]
if not terminal:
y_batch.append(r_batch[i] + GAMMA * np.max(readout_s_j1[i]))
else:
y_batch.append(r_batch[i])
# perform gradient step and a batch of BATCH_SIZE frames are used
train_step.run(feed_dict = {
y : y_batch,
a : a_batch,
s : s_j_batch}
)
# update the old values
s_t = s_t1
t += 1
# save progress every 10000 iterations
if t % 10000 == 0:
saver.save(sess, 'saved_model/' + GAME + '-dqn', global_step=t)
def playGame():
sess = tf.InteractiveSession()
s, readout, h_fc1 = createNetwork()
trainNetwork(s, readout, h_fc1, sess)
def main():
playGame()
if __name__ == "__main__":
main()
| StarcoderdataPython |
6686544 | <gh_stars>10-100
#!/usr/bin/python
# write out the data in a form useful to pass to the sba (demo) program
# it appears camera poses are basically given as [ R | t ] where R is
# the same R we use throughout and t is the 'tvec'
# todo, run sba and automatically parse output ...
import sys
sys.path.insert(0, "/usr/local/lib/python2.7/site-packages/")
import argparse
import cPickle as pickle
import math
import numpy as np
sys.path.append('../lib')
import Matcher
import ProjectMgr
import SBA1
import transformations
d2r = math.pi / 180.0 # a helpful constant
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')
args = parser.parse_args()
# return a 3d affine tranformation between current camera locations
# and original camera locations.
def get_recenter_affine(src_list, dst_list):
src = [[], [], [], []] # current camera locations
dst = [[], [], [], []] # original camera locations
for i in range(len(src_list)):
src_ned = src_list[i]
src[0].append(src_ned[0])
src[1].append(src_ned[1])
src[2].append(src_ned[2])
src[3].append(1.0)
dst_ned = dst_list[i]
dst[0].append(dst_ned[0])
dst[1].append(dst_ned[1])
dst[2].append(dst_ned[2])
dst[3].append(1.0)
print "%s <-- %s" % (dst_ned, src_ned)
A = transformations.superimposition_matrix(src, dst, scale=True)
print "A:\n", A
return A
# transform a point list given an affine transform matrix
def transform_points( A, pts_list ):
src = [[], [], [], []]
for p in pts_list:
src[0].append(p[0])
src[1].append(p[1])
src[2].append(p[2])
src[3].append(1.0)
dst = A.dot( np.array(src) )
result = []
for i in range(len(pts_list)):
result.append( [ float(dst[0][i]),
float(dst[1][i]),
float(dst[2][i]) ] )
return result
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
proj.load_features()
proj.undistort_keypoints()
#m = Matcher.Matcher()
matches_direct = pickle.load( open( args.project + "/matches_direct", "rb" ) )
print "unique features:", len(matches_direct)
image_width = proj.image_list[0].width
camw, camh = proj.cam.get_image_params()
scale = float(image_width) / float(camw)
print 'scale:', scale
sba = SBA1.SBA1(args.project)
sba.prepair_data( proj.image_list, matches_direct, proj.cam.get_K(scale) )
cameras, features = sba.run_live()
for i, image in enumerate(proj.image_list):
orig = image.camera_pose
new = cameras[i]
newq = np.array( [ new[0], new[1], new[2], new[3] ] )
tvec = np.array( [ new[4], new[5], new[6] ] )
Rned2cam = transformations.quaternion_matrix(newq)[:3,:3]
cam2body = image.get_cam2body()
Rned2body = cam2body.dot(Rned2cam)
Rbody2ned = np.matrix(Rned2body).T
(yaw, pitch, roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')
#print "orig ypr =", image.camera_pose['ypr']
#print "new ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
pos = -np.matrix(Rned2cam).T * np.matrix(tvec).T
newned = pos.T[0].tolist()[0]
#print "orig ned =", image.camera_pose['ned']
#print "new ned =", newned
image.set_camera_pose_sba( ned=newned, ypr=[yaw/d2r, pitch/d2r, roll/d2r] )
# now count how many features show up in each image
for i in proj.image_list:
i.feature_count = 0
for i, match in enumerate(matches_direct):
for j, p in enumerate(match[1:]):
image = proj.image_list[ p[0] ]
image.feature_count += 1
# compare original camera locations with sba camera locations and
# derive a transform matrix to 'best fit' the new camera locations
# over the original ... trusting the original group gps solution as
# our best absolute truth for positioning the system in world
# coordinates.
src_list = []
dst_list = []
for image in proj.image_list:
if image.feature_count >= 25:
# only consider images that are in the fitted set
ned, ypr, quat = image.get_camera_pose_sba()
src_list.append(ned)
ned, ypr, quat = image.get_camera_pose()
dst_list.append(ned)
A = get_recenter_affine(src_list, dst_list)
# extract the rotation matrix (R) from the affine transform
scale, shear, angles, trans, persp = transformations.decompose_matrix(A)
R = transformations.euler_matrix(*angles)
print "R:\n", R
# update the sba camera locations based on best fit
camera_list = []
# load current sba poses
for image in proj.image_list:
ned, ypr, quat = image.get_camera_pose_sba()
camera_list.append( ned )
# refit
new_cams = transform_points(A, camera_list)
# update sba poses. FIXME: do we need to update orientation here as
# well? Somewhere we worked out the code, but it may not matter all
# that much ... except for later manually computing mean projection
# error.
for i, image in enumerate(proj.image_list):
ned_orig, ypr_orig, quat_orig = image.get_camera_pose()
ned, ypr, quat = image.get_camera_pose_sba()
Rbody2ned = image.get_body2ned_sba()
# update the orientation with the same transform to keep
# everything in proper consistent alignment
newRbody2ned = R[:3,:3].dot(Rbody2ned)
(yaw, pitch, roll) = transformations.euler_from_matrix(newRbody2ned, 'rzyx')
image.set_camera_pose_sba(ned=new_cams[i],
ypr=[yaw/d2r, pitch/d2r, roll/d2r])
print 'image:', image.name
print ' orig pos:', ned_orig
print ' fit pos:', new_cams[i]
print ' dist moved:', np.linalg.norm( np.array(ned_orig) - np.array(new_cams[i]))
image.save_meta()
# update the sba point locations based on same best fit transform
# derived from the cameras (remember that 'features' is the point
# features structure spit out by the SBA process)
feature_list = []
for f in features:
feature_list.append( f.tolist() )
new_feats = transform_points(A, feature_list)
# create the matches_sba list (copy) and update the ned coordinate
matches_sba = list(matches_direct)
for i, match in enumerate(matches_sba):
#print type(new_feats[i])
matches_sba[i][0] = new_feats[i]
# write out the updated match_dict
print "Writing match_sba file ...", len(matches_sba), 'features'
pickle.dump(matches_sba, open(args.project + "/matches_sba", "wb"))
# collect/group match chains that refer to the same keypoint
matches_tmp = list(matches_sba)
count = 0
done = False
while not done:
print "Iteration:", count
count += 1
matches_new = []
matches_lookup = {}
for i, match in enumerate(matches_tmp):
# scan if any of these match points have been previously seen
# and record the match index
index = -1
for p in match[1:]:
key = "%d-%d" % (p[0], p[1])
if key in matches_lookup:
index = matches_lookup[key]
break
if index < 0:
# not found, append to the new list
for p in match[1:]:
key = "%d-%d" % (p[0], p[1])
matches_lookup[key] = len(matches_new)
matches_new.append(match)
else:
# found a previous reference, append these match items
existing = matches_new[index]
# only append items that don't already exist in the early
# match, and only one match per image (!)
for p in match[1:]:
key = "%d-%d" % (p[0], p[1])
found = False
for e in existing[1:]:
if p[0] == e[0]:
found = True
break
if not found:
# add
existing.append(p)
matches_lookup[key] = index
# print "new:", existing
# print
if len(matches_new) == len(matches_tmp):
done = True
else:
matches_tmp = matches_new
matches_group = matches_tmp
# write out the updated match_dict
print "Writing match_group file ...", len(matches_group), 'features'
pickle.dump(matches_group, open(args.project + "/matches_group", "wb"))
| StarcoderdataPython |
9633351 | from functools import wraps
from closures import chain_closures
import logging
logger = logging.getLogger()
def x(a):
def request_logger(f):
@wraps(f)
def rlog(*args, **kwargs):
logger.info("%s x entry", a)
ret = f(*args, **kwargs)
logger.info("%s x exit", a)
return ret
return rlog
return request_logger
def y(b):
def request_logger(f):
@wraps(f)
def rlog(*args, **kwargs):
logger.info("%s y entry", b)
ret = f(*args, **kwargs)
logger.info("%s y exit", b)
return ret
return rlog
return request_logger
def z(a, b):
c1 = x(a)
c2 = y(b)
def request_logger(f):
@wraps(f)
def rlog(*args, **kwargs):
q = c1(c2(f))
logger.info("z entry")
ret = q(*args, **kwargs)
logger.info("z exit")
return ret
return rlog
return request_logger
def w(a, b, f):
c1 = x(a)
c2 = y(b)
q = c1(c2(f))
return q
def get_w(a, b):
c1 = x(a)
c2 = y(b)
q = chain_closures([c1, c2])
return q
def f1(a, b):
logger.info("%s %s", a, b)
# raise Exception("me except")
def smain():
c4 = get_w(10, 11)
f2 = c4(f1)
f2(12, 13)
logger.info(f2.__name__)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.DEBUG)
smain()
| StarcoderdataPython |
8091843 | import sys
sys.path.insert(0, "../")
from PINN_Survey.problems.burgers.v1 import Burgers, Burgers_Siren, Burgers_Random_Fourier
from PINN_Survey.problems.burgers.data.load import load_burgers_bounds
from PINN_Survey.problems.helmholtz.data.load import load_helmholtz_bounds
from PINN_Survey.problems.helmholtz.v1 import Helmholtz, Helmholtz_Siren, Helmholtz_Random_Fourier
from PINN_Base.util import bounds_from_data, random_choice
import numpy as np
import tensorflow as tf
def rmsprop_init_optimizers(self):
self.optimizer_RMSProp = tf.train.RMSPropOptimizer(
learning_rate=.001).minimize(self.loss)
def rmsprop_train(self, X, U, X_df, batch_size, epochs):
self._train_stochastic_optimizer(
self.optimizer_RMSProp, X, U, X_df, batch_size, epochs)
class Burgers_Base_RMSProp(Burgers):
def _init_optimizers(self):
rmsprop_init_optimizers(self)
def train_RMSProp_batched(self, X, U, X_df, batch_size, epochs):
rmsprop_train(self, X, U, X_df, batch_size, epochs)
class Helmholtz_Base_RMSProp(Helmholtz):
def _init_optimizers(self):
rmsprop_init_optimizers(self)
def train_RMSProp_batched(self, X, U, X_df, batch_size, epochs):
rmsprop_train(self, X, U, X_df, batch_size, epochs)
MAX_THREADS = 32
config = tf.ConfigProto(
intra_op_parallelism_threads=MAX_THREADS
)
def burgers_big_adam():
X_true, U_true, X_bounds, U_bounds, _ = load_burgers_bounds()
X = np.vstack(X_bounds)
U = np.vstack(U_bounds)
X_df = random_choice(X_true)
lower_bound, upper_bound = bounds_from_data(X_true)
layers = [2, 256, 256, 256, 256, 256, 1]
nu = .01 / np.pi
model = Burgers(
lower_bound, upper_bound, layers, nu, session_config=config, use_collocation_residual=False)
model.train_Adam_batched(X, U, X_df, batch_size=64, epochs=50000)
U_hat = model.predict(X_true)
rel_error = np.linalg.norm(U_true - U_hat, 2) / np.linalg.norm(U_true, 2)
print(rel_error)
with open("big_log.csv", "a+") as f:
f.write(f"burgers,adam,{rel_error}\n")
def burgers_big_rmsprop():
X_true, U_true, X_bounds, U_bounds, _ = load_burgers_bounds()
X = np.vstack(X_bounds)
U = np.vstack(U_bounds)
X_df = random_choice(X_true)
lower_bound, upper_bound = bounds_from_data(X_true)
layers = [2, 256, 256, 256, 256, 256, 1]
nu = .01 / np.pi
model = Burgers_Base_RMSProp(
lower_bound, upper_bound, layers, nu, session_config=config, use_collocation_residual=False)
model.train_RMSProp_batched(X, U, X_df, batch_size=64, epochs=50000)
U_hat = model.predict(X_true)
rel_error = np.linalg.norm(U_true - U_hat, 2) / np.linalg.norm(U_true, 2)
print(rel_error)
with open("big_log.csv", "a+") as f:
f.write(f"burgers,rms,{rel_error}\n")
def burgers_big_siren():
X_true, U_true, X_bounds, U_bounds, _ = load_burgers_bounds()
X = np.vstack(X_bounds)
U = np.vstack(U_bounds)
X_df = random_choice(X_true)
lower_bound, upper_bound = bounds_from_data(X_true)
layers = [2, 256, 256, 256, 256, 256, 1]
nu = .01 / np.pi
model = Burgers_Siren(
lower_bound, upper_bound, layers, nu, session_config=config, use_collocation_residual=False)
model.train_Adam_batched(X, U, X_df, batch_size=64, epochs=50000)
U_hat = model.predict(X_true)
rel_error = np.linalg.norm(U_true - U_hat, 2) / np.linalg.norm(U_true, 2)
print(rel_error)
with open("big_log.csv", "a+") as f:
f.write(f"burgers_siren,Adam,{rel_error}\n")
def burgers_big_rf():
X_true, U_true, X_bounds, U_bounds, _ = load_burgers_bounds()
X = np.vstack(X_bounds)
U = np.vstack(U_bounds)
X_df = random_choice(X_true)
lower_bound, upper_bound = bounds_from_data(X_true)
layers = [2, 256, 256, 256, 256, 256, 1]
nu = .01 / np.pi
model = Burgers_Random_Fourier(
lower_bound, upper_bound, layers, nu, session_config=config, use_collocation_residual=False)
model.train_Adam_batched(X, U, X_df, batch_size=64, epochs=50000)
U_hat = model.predict(X_true)
rel_error = np.linalg.norm(U_true - U_hat, 2) / np.linalg.norm(U_true, 2)
print(rel_error)
with open("big_log.csv", "a+") as f:
f.write(f"burgers_rf,Adam,{rel_error}\n")
def helmholtz_big_adam():
X_true, U_true, X_bounds, U_bounds, _ = load_helmholtz_bounds()
X = np.vstack(X_bounds)
U = np.vstack(U_bounds)
X_df = random_choice(X_true)
lower_bound, upper_bound = bounds_from_data(X_true)
layers = [2, 256, 256, 256, 256, 256, 1]
a = 1
b = 4
model = Helmholtz(
lower_bound, upper_bound, layers, a, b, session_config=config, use_collocation_residual=False, df_multiplier=1e-2)
model.train_Adam_batched(X, U, X_df, batch_size=64, epochs=50000)
U_hat = model.predict(X_true)
rel_error = np.linalg.norm(U_true - U_hat, 2) / np.linalg.norm(U_true, 2)
print(rel_error)
with open("big_log.csv", "a+") as f:
f.write(f"helmholtz,adam,{rel_error}\n")
def helmholtz_big_rmsprop():
X_true, U_true, X_bounds, U_bounds, _ = load_helmholtz_bounds()
X = np.vstack(X_bounds)
U = np.vstack(U_bounds)
X_df = random_choice(X_true)
lower_bound, upper_bound = bounds_from_data(X_true)
layers = [2, 256, 256, 256, 256, 256, 1]
a = 1
b = 4
model = Helmholtz_Base_RMSProp(
lower_bound, upper_bound, layers, a, b, session_config=config, use_collocation_residual=False, df_multiplier=1e-2)
model.train_RMSProp_batched(X, U, X_df, batch_size=64, epochs=50000)
U_hat = model.predict(X_true)
rel_error = np.linalg.norm(U_true - U_hat, 2) / np.linalg.norm(U_true, 2)
print(rel_error)
with open("big_log.csv", "a+") as f:
f.write(f"helmholtz,rms,{rel_error}\n")
def helmholtz_big_siren():
X_true, U_true, X_bounds, U_bounds, _ = load_helmholtz_bounds()
X = np.vstack(X_bounds)
U = np.vstack(U_bounds)
X_df = random_choice(X_true)
lower_bound, upper_bound = bounds_from_data(X_true)
layers = [2, 256, 256, 256, 256, 256, 1]
a = 1
b = 4
model = Helmholtz_Siren(
lower_bound, upper_bound, layers, a, b, session_config=config, use_collocation_residual=False, df_multiplier=1e-2)
model.train_Adam_batched(X, U, X_df, batch_size=64, epochs=50000)
U_hat = model.predict(X_true)
rel_error = np.linalg.norm(U_true - U_hat, 2) / np.linalg.norm(U_true, 2)
print(rel_error)
with open("big_log.csv", "a+") as f:
f.write(f"helmholtz_siren,Adam,{rel_error}\n")
def helmholtz_big_rf():
X_true, U_true, X_bounds, U_bounds, _ = load_helmholtz_bounds()
X = np.vstack(X_bounds)
U = np.vstack(U_bounds)
X_df = random_choice(X_true)
lower_bound, upper_bound = bounds_from_data(X_true)
layers = [2, 256, 256, 256, 256, 256, 1]
a = 1
b = 4
model = Helmholtz_Random_Fourier(
lower_bound, upper_bound, layers, a, b, session_config=config, use_collocation_residual=False, df_multiplier=1e-2)
model.train_Adam_batched(X, U, X_df, batch_size=64, epochs=50000)
U_hat = model.predict(X_true)
rel_error = np.linalg.norm(U_true - U_hat, 2) / np.linalg.norm(U_true, 2)
print(rel_error)
with open("big_log.csv", "a+") as f:
f.write(f"helmholtz_rf,Adam,{rel_error}\n")
if __name__ == "__main__":
if int(sys.argv[1]) == 0:
burgers_big_adam()
elif int(sys.argv[1]) == 1:
burgers_big_rmsprop()
elif int(sys.argv[1]) == 2:
helmholtz_big_adam()
elif int(sys.argv[1]) == 3:
helmholtz_big_rmsprop()
elif int(sys.argv[1]) == 4:
burgers_big_siren()
elif int(sys.argv[1]) == 5:
burgers_big_rf()
elif int(sys.argv[1]) == 6:
helmholtz_big_siren()
elif int(sys.argv[1]) == 7:
helmholtz_big_rf()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.