code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
from functools import partial
import wx
from gooey.gui.util import wx_util
from gooey.gui.widgets import widget_pack
class BaseGuiComponent(object):
widget_class = None
def __init__(self, parent, title, msg, choices=None):
'''
:param data: field info (title, help, etc..)
:param widget_pack: internal wxWidgets to render
'''
# parent
self.parent = parent
# Widgets
self.title = None
self.help_msg = None
self.choices = choices
# Internal WidgetPack set in subclasses
self.do_layout(parent, title, msg)
def do_layout(self, parent, title, msg):
self.panel = wx.Panel(parent)
self.widget_pack = self.widget_class()
self.title = self.format_title(self.panel, title)
self.help_msg = self.format_help_msg(self.panel, msg)
self.help_msg.SetMinSize((0, -1))
core_widget_set = self.widget_pack.build(self.panel, {}, self.choices)
vertical_container = wx.BoxSizer(wx.VERTICAL)
vertical_container.Add(self.title)
vertical_container.AddSpacer(2)
if self.help_msg.GetLabelText():
vertical_container.Add(self.help_msg, 1, wx.EXPAND)
vertical_container.AddSpacer(2)
else:
vertical_container.AddStretchSpacer(1)
vertical_container.Add(core_widget_set, 0, wx.EXPAND)
self.panel.SetSizer(vertical_container)
return self.panel
def bind(self, *args, **kwargs):
print self.widget_pack.widget.Bind(*args, **kwargs)
def get_title(self):
return self.title.GetLabel()
def set_title(self, text):
self.title.SetLabel(text)
def get_help_msg(self):
return self.help_msg.GetLabelText()
def set_label_text(self, text):
self.help_msg.SetLabel(text)
def format_help_msg(self, parent, msg):
base_text = wx.StaticText(parent, label=msg or '')
wx_util.dark_grey(base_text)
return base_text
def format_title(self, parent, title):
text = wx.StaticText(parent, label=title)
wx_util.make_bold(text)
return text
def onResize(self, evt):
# handle internal widgets
# self.panel.Freeze()
self._onResize(evt)
# propagate event to child widgets
self.widget_pack.onResize(evt)
evt.Skip()
# self.panel.Thaw()
def _onResize(self, evt):
if not self.help_msg:
return
self.panel.Size = evt.GetSize()
container_width, _ = self.panel.Size
text_width, _ = self.help_msg.Size
if text_width != container_width:
self.help_msg.SetLabel(self.help_msg.GetLabelText().replace('\n', ' '))
self.help_msg.Wrap(container_width)
evt.Skip()
def get_value(self):
return self.widget_pack.get_value()
def set_value(self, val):
if val:
self.widget_pack.widget.SetValue(unicode(val))
def __repr__(self):
return self.__class__.__name__
class CheckBox(BaseGuiComponent):
def __init__(self, parent, title, msg, choices=None):
BaseGuiComponent.__init__(self, parent, title, msg)
def do_layout(self, parent, title, msg):
self.panel = wx.Panel(parent)
self.widget = wx.CheckBox(self.panel)
# self.widget.SetValue(self.default_value)
self.title = self.format_title(self.panel, title)
self.help_msg = self.format_help_msg(self.panel, msg)
self.help_msg.SetMinSize((0, -1))
# self.help_msg.Bind(wx.EVT_LEFT_UP, lambda event: self.widget.SetValue(not self.widget.GetValue()))
vertical_container = wx.BoxSizer(wx.VERTICAL)
vertical_container.Add(self.title)
horizontal_sizer = wx.BoxSizer(wx.HORIZONTAL)
horizontal_sizer.Add(self.widget, 0, wx.EXPAND | wx.RIGHT, 10)
horizontal_sizer.Add(self.help_msg, 1, wx.EXPAND)
vertical_container.Add(horizontal_sizer, 0, wx.EXPAND)
self.panel.SetSizer(vertical_container)
self.panel.Bind(wx.EVT_SIZE, self.onResize)
return self.panel
def onResize(self, evt):
msg = self.help_msg
container_width, _ = self.panel.Size
text_width, _ = msg.Size
if text_width != container_width:
msg.SetLabel(msg.GetLabelText().replace('\n', ' '))
msg.Wrap(container_width)
evt.Skip()
def get_value(self):
return self.widget.GetValue()
def set_value(self, val):
self.widget.SetValue(val)
class RadioGroup(object):
def __init__(self, parent, title, msg, choices=None):
self.panel = None
self.radio_buttons = []
self.option_strings = []
self.help_msgs = []
self.btn_names = []
self.do_layout(parent, title, msg)
def do_layout(self, parent, titles, msgs):
self.panel = wx.Panel(parent)
self.radio_buttons = [wx.RadioButton(self.panel, -1) for _ in titles]
self.btn_names = [wx.StaticText(self.panel, label=title.title()) for title in titles]
self.help_msgs = [wx.StaticText(self.panel, label=msg.title()) for msg in msgs]
# box = wx.StaticBox(self.panel, -1, label=self.data['group_name'])
box = wx.StaticBox(self.panel, -1, label='')
vertical_container = wx.StaticBoxSizer(box, wx.VERTICAL)
for button, name, help in zip(self.radio_buttons, self.btn_names, self.help_msgs):
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(button, 0, wx.ALIGN_TOP | wx.ALIGN_LEFT)
hbox.Add(name, 0, wx.LEFT, 10)
vertical_container.Add(hbox, 0, wx.EXPAND)
vertical_container.Add(help, 1, wx.EXPAND | wx.LEFT, 25)
vertical_container.AddSpacer(5)
self.panel.SetSizer(vertical_container)
self.panel.Bind(wx.EVT_SIZE, self.onResize)
self.panel.Bind(wx.EVT_RADIOBUTTON, self.showz)
return self.panel
def showz(self, evt):
print evt
for i in self.radio_buttons:
print i.GetValue()
def onResize(self, evt):
msg = self.help_msgs[0]
container_width, _ = self.panel.Size
text_width, _ = msg.Size
if text_width != container_width:
msg.SetLabel(msg.GetLabelText().replace('\n', ' '))
msg.Wrap(container_width)
evt.Skip()
def get_value(self):
return [button.GetValue() for button in self.radio_buttons]
def set_value(self, val):
pass
def build_subclass(name, widget_class):
# this seemed faster than typing class X a bunch
return type(name, (BaseGuiComponent,), {'widget_class': widget_class})
FileChooser = build_subclass('FileChooser', widget_pack.FileChooserPayload)
MultiFileChooser = build_subclass('MultiFileChooser', widget_pack.MultiFileSaverPayload)
DirChooser = build_subclass('DirChooser', widget_pack.DirChooserPayload)
FileSaver = build_subclass('FileSaver', widget_pack.FileSaverPayload)
DateChooser = build_subclass('DateChooser', widget_pack.DateChooserPayload)
TextField = build_subclass('TextField', widget_pack.TextInputPayload)
CommandField = build_subclass('CommandField', widget_pack.TextInputPayload(no_quoting=True))
Dropdown = build_subclass('Dropdown', widget_pack.DropdownPayload)
Counter = build_subclass('Counter', widget_pack.CounterPayload)
MultiDirChooser = build_subclass('MultiDirChooser', widget_pack.MultiDirChooserPayload) | unknown | codeparrot/codeparrot-clean | ||
from pexpect import TIMEOUT
import pytest
import time
from tests.functional.utils import spawn, functional, bare
dockerfile = u'''
FROM ubuntu:latest
RUN apt-get update
RUN apt-get install -yy python3 python3-pip python3-dev git
RUN pip3 install -U setuptools
RUN ln -s /usr/bin/pip3 /usr/bin/pip
RUN adduser --disabled-password --gecos '' test
ENV SEED "{seed}"
COPY thefuck /src
WORKDIR /src
RUN pip install .
USER test
RUN echo 'eval $(thefuck --alias)' > /home/test/.bashrc
RUN echo > /home/test/.bash_history
RUN git config --global user.email "you@example.com"
RUN git config --global user.name "Your Name"
'''.format(seed=time.time())
@pytest.fixture
def proc(request):
return spawn(request, 'ubuntu-python3-bash-performance',
dockerfile, u'bash', install=False, copy_src=True)
def plot(proc):
proc.sendline(u'cd /home/test/')
proc.sendline(u'fuck')
assert proc.expect([TIMEOUT, u'No fucks given'])
proc.sendline(u'git init')
proc.sendline(u'git add .')
proc.sendline(u'git commit -a -m init')
proc.sendline(u'git brnch')
proc.sendline(u'fuck')
assert proc.expect([TIMEOUT, u'git branch'])
proc.send('\n')
assert proc.expect([TIMEOUT, u'master'])
proc.sendline(u'echo test')
proc.sendline(u'echo tst')
proc.sendline(u'fuck')
assert proc.expect([TIMEOUT, u'echo test'])
proc.send('\n')
assert proc.expect([TIMEOUT, u'test'])
@functional
@pytest.mark.skipif(
bool(bare), reason='Would lie on a bare run')
@pytest.mark.benchmark(min_rounds=10)
def test_performance(proc, benchmark):
assert benchmark(plot, proc) is None | unknown | codeparrot/codeparrot-clean | ||
"""
The :mod:`sklearn._loss` module includes loss function classes suitable for
fitting classification and regression tasks.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn._loss.loss import (
AbsoluteError,
HalfBinomialLoss,
HalfBinomialLossArrayAPI,
HalfGammaLoss,
HalfMultinomialLoss,
HalfMultinomialLossArrayAPI,
HalfPoissonLoss,
HalfSquaredError,
HalfTweedieLoss,
HalfTweedieLossIdentity,
HuberLoss,
PinballLoss,
)
__all__ = [
"AbsoluteError",
"HalfBinomialLoss",
"HalfBinomialLossArrayAPI",
"HalfGammaLoss",
"HalfMultinomialLoss",
"HalfMultinomialLossArrayAPI",
"HalfPoissonLoss",
"HalfSquaredError",
"HalfTweedieLoss",
"HalfTweedieLossIdentity",
"HuberLoss",
"PinballLoss",
] | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/_loss/__init__.py |
/*
* Copyright (c) 2018 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito.junit.jupiter.resolver;
import org.junit.jupiter.api.extension.ExtensionContext;
import org.junit.jupiter.api.extension.ParameterContext;
import org.junit.jupiter.api.extension.ParameterResolutionException;
import org.junit.jupiter.api.extension.ParameterResolver;
import java.util.List;
import java.util.Optional;
public class CompositeParameterResolver implements ParameterResolver {
private final List<ParameterResolver> delegates;
public CompositeParameterResolver(final ParameterResolver... delegates) {
this.delegates = List.of(delegates);
}
@Override
public boolean supportsParameter(
ParameterContext parameterContext, ExtensionContext extensionContext)
throws ParameterResolutionException {
return findDelegate(parameterContext, extensionContext).isPresent();
}
@Override
@SuppressWarnings("OptionalGetWithoutIsPresent")
public Object resolveParameter(
ParameterContext parameterContext, ExtensionContext extensionContext)
throws ParameterResolutionException {
final ParameterResolver delegate = findDelegate(parameterContext, extensionContext).get();
return delegate.resolveParameter(parameterContext, extensionContext);
}
private Optional<ParameterResolver> findDelegate(
final ParameterContext parameterContext, final ExtensionContext extensionContext) {
return delegates.stream()
.filter(delegate -> delegate.supportsParameter(parameterContext, extensionContext))
.findFirst();
}
} | java | github | https://github.com/mockito/mockito | mockito-extensions/mockito-junit-jupiter/src/main/java/org/mockito/junit/jupiter/resolver/CompositeParameterResolver.java |
# Natural Language Toolkit: Maximum Entropy Classifiers
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Dmitry Chichkov <dchichkov@gmail.com> (TypedMaxentFeatureEncoding)
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A classifier model based on maximum entropy modeling framework. This
framework considers all of the probability distributions that are
empirically consistent with the training data; and chooses the
distribution with the highest entropy. A probability distribution is
"empirically consistent" with a set of training data if its estimated
frequency with which a class and a feature vector value co-occur is
equal to the actual frequency in the data.
Terminology: 'feature'
======================
The term *feature* is usually used to refer to some property of an
unlabeled token. For example, when performing word sense
disambiguation, we might define a ``'prevword'`` feature whose value is
the word preceding the target word. However, in the context of
maxent modeling, the term *feature* is typically used to refer to a
property of a "labeled" token. In order to prevent confusion, we
will introduce two distinct terms to disambiguate these two different
concepts:
- An "input-feature" is a property of an unlabeled token.
- A "joint-feature" is a property of a labeled token.
In the rest of the ``nltk.classify`` module, the term "features" is
used to refer to what we will call "input-features" in this module.
In literature that describes and discusses maximum entropy models,
input-features are typically called "contexts", and joint-features
are simply referred to as "features".
Converting Input-Features to Joint-Features
-------------------------------------------
In maximum entropy models, joint-features are required to have numeric
values. Typically, each input-feature ``input_feat`` is mapped to a
set of joint-features of the form:
| joint_feat(token, label) = { 1 if input_feat(token) == feat_val
| { and label == some_label
| {
| { 0 otherwise
For all values of ``feat_val`` and ``some_label``. This mapping is
performed by classes that implement the ``MaxentFeatureEncodingI``
interface.
"""
__docformat__ = 'epytext en'
try:
import numpy
except ImportError:
pass
import time
import tempfile
import os
import gzip
from collections import defaultdict
from nltk.util import OrderedDict
from nltk.probability import DictionaryProbDist
from nltk.classify.api import ClassifierI
from nltk.classify.util import attested_labels, CutoffChecker, accuracy, log_likelihood
from nltk.classify.megam import call_megam, write_megam_file, parse_megam_weights
from nltk.classify.tadm import call_tadm, write_tadm_file, parse_tadm_weights
######################################################################
#{ Classifier Model
######################################################################
class MaxentClassifier(ClassifierI):
"""
A maximum entropy classifier (also known as a "conditional
exponential classifier"). This classifier is parameterized by a
set of "weights", which are used to combine the joint-features
that are generated from a featureset by an "encoding". In
particular, the encoding maps each ``(featureset, label)`` pair to
a vector. The probability of each label is then computed using
the following equation::
dotprod(weights, encode(fs,label))
prob(fs|label) = ---------------------------------------------------
sum(dotprod(weights, encode(fs,l)) for l in labels)
Where ``dotprod`` is the dot product::
dotprod(a,b) = sum(x*y for (x,y) in zip(a,b))
"""
def __init__(self, encoding, weights, logarithmic=True):
"""
Construct a new maxent classifier model. Typically, new
classifier models are created using the ``train()`` method.
:type encoding: MaxentFeatureEncodingI
:param encoding: An encoding that is used to convert the
featuresets that are given to the ``classify`` method into
joint-feature vectors, which are used by the maxent
classifier model.
:type weights: list of float
:param weights: The feature weight vector for this classifier.
:type logarithmic: bool
:param logarithmic: If false, then use non-logarithmic weights.
"""
self._encoding = encoding
self._weights = weights
self._logarithmic = logarithmic
#self._logarithmic = False
assert encoding.length() == len(weights)
def labels(self):
return self._encoding.labels()
def set_weights(self, new_weights):
"""
Set the feature weight vector for this classifier.
:param new_weights: The new feature weight vector.
:type new_weights: list of float
"""
self._weights = new_weights
assert (self._encoding.length() == len(new_weights))
def weights(self):
"""
:return: The feature weight vector for this classifier.
:rtype: list of float
"""
return self._weights
def classify(self, featureset):
return self.prob_classify(featureset).max()
def prob_classify(self, featureset):
prob_dict = {}
for label in self._encoding.labels():
feature_vector = self._encoding.encode(featureset, label)
if self._logarithmic:
total = 0.0
for (f_id, f_val) in feature_vector:
total += self._weights[f_id] * f_val
prob_dict[label] = total
else:
prod = 1.0
for (f_id, f_val) in feature_vector:
prod *= self._weights[f_id] ** f_val
prob_dict[label] = prod
# Normalize the dictionary to give a probability distribution
return DictionaryProbDist(prob_dict, log=self._logarithmic,
normalize=True)
def explain(self, featureset, columns=4):
"""
Print a table showing the effect of each of the features in
the given feature set, and how they combine to determine the
probabilities of each label for that featureset.
"""
descr_width = 50
TEMPLATE = ' %-'+str(descr_width-2)+'s%s%8.3f'
pdist = self.prob_classify(featureset)
labels = sorted(pdist.samples(), key=pdist.prob, reverse=True)
labels = labels[:columns]
print ' Feature'.ljust(descr_width)+''.join(
'%8s' % str(l)[:7] for l in labels)
print ' '+'-'*(descr_width-2+8*len(labels))
sums = defaultdict(int)
for i, label in enumerate(labels):
feature_vector = self._encoding.encode(featureset, label)
feature_vector.sort(key=lambda (fid,_): abs(self._weights[fid]),
reverse=True)
for (f_id, f_val) in feature_vector:
if self._logarithmic: score = self._weights[f_id] * f_val
else: score = self._weights[fid] ** f_val
descr = self._encoding.describe(f_id)
descr = descr.split(' and label is ')[0] # hack
descr += ' (%s)' % f_val # hack
if len(descr) > 47: descr = descr[:44]+'...'
print TEMPLATE % (descr, i*8*' ', score)
sums[label] += score
print ' '+'-'*(descr_width-1+8*len(labels))
print ' TOTAL:'.ljust(descr_width)+''.join(
'%8.3f' % sums[l] for l in labels)
print ' PROBS:'.ljust(descr_width)+''.join(
'%8.3f' % pdist.prob(l) for l in labels)
def show_most_informative_features(self, n=10, show='all'):
"""
:param show: all, neg, or pos (for negative-only or positive-only)
"""
fids = sorted(range(len(self._weights)),
key=lambda fid: abs(self._weights[fid]),
reverse=True)
if show == 'pos':
fids = [fid for fid in fids if self._weights[fid]>0]
elif show == 'neg':
fids = [fid for fid in fids if self._weights[fid]<0]
for fid in fids[:n]:
print '%8.3f %s' % (self._weights[fid],
self._encoding.describe(fid))
def __repr__(self):
return ('<ConditionalExponentialClassifier: %d labels, %d features>' %
(len(self._encoding.labels()), self._encoding.length()))
#: A list of the algorithm names that are accepted for the
#: ``train()`` method's ``algorithm`` parameter.
ALGORITHMS = ['GIS', 'IIS', 'CG', 'BFGS', 'Powell', 'LBFGSB',
'Nelder-Mead', 'MEGAM', 'TADM']
@classmethod
def train(cls, train_toks, algorithm=None, trace=3, encoding=None,
labels=None, sparse=True, gaussian_prior_sigma=0, **cutoffs):
"""
Train a new maxent classifier based on the given corpus of
training samples. This classifier will have its weights
chosen to maximize entropy while remaining empirically
consistent with the training corpus.
:rtype: MaxentClassifier
:return: The new maxent classifier
:type train_toks: list
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a featureset,
and the second of which is a classification label.
:type algorithm: str
:param algorithm: A case-insensitive string, specifying which
algorithm should be used to train the classifier. The
following algorithms are currently available.
- Iterative Scaling Methods: Generalized Iterative Scaling (``'GIS'``),
Improved Iterative Scaling (``'IIS'``)
- Optimization Methods (requiring scipy): Conjugate gradient (``'CG'``)
Broyden-Fletcher-Goldfarb-Shanno algorithm (``'BFGS'``),
Powell algorithm (``'Powell'``),
A limited-memory variant of the BFGS algorithm (``'LBFGSB'``),
The Nelder-Mead algorithm (``'Nelder-Mead'``).
- External Libraries (requiring megam):
LM-BFGS algorithm, with training performed by Megam (``'megam'``)
The default algorithm is ``'CG'`` if scipy is
installed; and ``'IIS'`` otherwise.
:type trace: int
:param trace: The level of diagnostic tracing output to produce.
Higher values produce more verbose output.
:type encoding: MaxentFeatureEncodingI
:param encoding: A feature encoding, used to convert featuresets
into feature vectors. If none is specified, then a
``BinaryMaxentFeatureEncoding`` will be built based on the
features that are attested in the training corpus.
:type labels: list(str)
:param labels: The set of possible labels. If none is given, then
the set of all labels attested in the training data will be
used instead.
:param sparse: If True, then use sparse matrices instead of
dense matrices. Currently, this is only supported by
the scipy (optimization method) algorithms. For other
algorithms, its value is ignored.
:param gaussian_prior_sigma: The sigma value for a gaussian
prior on model weights. Currently, this is supported by
the scipy (optimization method) algorithms and ``megam``.
For other algorithms, its value is ignored.
:param cutoffs: Arguments specifying various conditions under
which the training should be halted. (Some of the cutoff
conditions are not supported by some algorithms.)
- ``max_iter=v``: Terminate after ``v`` iterations.
- ``min_ll=v``: Terminate after the negative average
log-likelihood drops under ``v``.
- ``min_lldelta=v``: Terminate if a single iteration improves
log likelihood by less than ``v``.
- ``tolerance=v``: Terminate a scipy optimization method when
improvement drops below a tolerance level ``v``. The
exact meaning of this tolerance depends on the scipy
algorithm used. See ``scipy`` documentation for more
info. Default values: 1e-3 for CG, 1e-5 for LBFGSB,
and 1e-4 for other algorithms. (``scipy`` only)
"""
if algorithm is None:
try:
import scipy
algorithm = 'cg'
except ImportError:
algorithm = 'iis'
for key in cutoffs:
if key not in ('max_iter', 'min_ll', 'min_lldelta', 'tolerance',
'max_acc', 'min_accdelta', 'count_cutoff',
'norm', 'explicit', 'bernoulli'):
raise TypeError('Unexpected keyword arg %r' % key)
algorithm = algorithm.lower()
if algorithm == 'iis':
return train_maxent_classifier_with_iis(
train_toks, trace, encoding, labels, **cutoffs)
elif algorithm == 'gis':
return train_maxent_classifier_with_gis(
train_toks, trace, encoding, labels, **cutoffs)
elif algorithm in cls._SCIPY_ALGS:
return train_maxent_classifier_with_scipy(
train_toks, trace, encoding, labels,
cls._SCIPY_ALGS[algorithm], sparse,
gaussian_prior_sigma, **cutoffs)
elif algorithm == 'megam':
return train_maxent_classifier_with_megam(
train_toks, trace, encoding, labels,
gaussian_prior_sigma, **cutoffs)
elif algorithm == 'tadm':
kwargs = cutoffs
kwargs['trace'] = trace
kwargs['encoding'] = encoding
kwargs['labels'] = labels
kwargs['gaussian_prior_sigma'] = gaussian_prior_sigma
return TadmMaxentClassifier.train(train_toks, **kwargs)
else:
raise ValueError('Unknown algorithm %s' % algorithm)
_SCIPY_ALGS = {'cg':'CG', 'bfgs':'BFGS', 'powell':'Powell',
'lbfgsb':'LBFGSB', 'nelder-mead':'Nelder-Mead'}
#: Alias for MaxentClassifier.
ConditionalExponentialClassifier = MaxentClassifier
######################################################################
#{ Feature Encodings
######################################################################
class MaxentFeatureEncodingI(object):
"""
A mapping that converts a set of input-feature values to a vector
of joint-feature values, given a label. This conversion is
necessary to translate featuresets into a format that can be used
by maximum entropy models.
The set of joint-features used by a given encoding is fixed, and
each index in the generated joint-feature vectors corresponds to a
single joint-feature. The length of the generated joint-feature
vectors is therefore constant (for a given encoding).
Because the joint-feature vectors generated by
``MaxentFeatureEncodingI`` are typically very sparse, they are
represented as a list of ``(index, value)`` tuples, specifying the
value of each non-zero joint-feature.
Feature encodings are generally created using the ``train()``
method, which generates an appropriate encoding based on the
input-feature values and labels that are present in a given
corpus.
"""
def encode(self, featureset, label):
"""
Given a (featureset, label) pair, return the corresponding
vector of joint-feature values. This vector is represented as
a list of ``(index, value)`` tuples, specifying the value of
each non-zero joint-feature.
:type featureset: dict
:rtype: list(tuple(int, int))
"""
raise NotImplementedError()
def length(self):
"""
:return: The size of the fixed-length joint-feature vectors
that are generated by this encoding.
:rtype: int
"""
raise NotImplementedError()
def labels(self):
"""
:return: A list of the \"known labels\" -- i.e., all labels
``l`` such that ``self.encode(fs,l)`` can be a nonzero
joint-feature vector for some value of ``fs``.
:rtype: list
"""
raise NotImplementedError()
def describe(self, fid):
"""
:return: A string describing the value of the joint-feature
whose index in the generated feature vectors is ``fid``.
:rtype: str
"""
raise NotImplementedError()
def train(cls, train_toks):
"""
Construct and return new feature encoding, based on a given
training corpus ``train_toks``.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
"""
raise NotImplementedError()
class FunctionBackedMaxentFeatureEncoding(MaxentFeatureEncodingI):
"""
A feature encoding that calls a user-supplied function to map a
given featureset/label pair to a sparse joint-feature vector.
"""
def __init__(self, func, length, labels):
"""
Construct a new feature encoding based on the given function.
:type func: (callable)
:param func: A function that takes two arguments, a featureset
and a label, and returns the sparse joint feature vector
that encodes them:
>>> func(featureset, label) -> feature_vector
This sparse joint feature vector (``feature_vector``) is a
list of ``(index,value)`` tuples.
:type length: int
:param length: The size of the fixed-length joint-feature
vectors that are generated by this encoding.
:type labels: list
:param labels: A list of the \"known labels\" for this
encoding -- i.e., all labels ``l`` such that
``self.encode(fs,l)`` can be a nonzero joint-feature vector
for some value of ``fs``.
"""
self._length = length
self._func = func
self._labels = labels
def encode(self, featureset, label):
return self._func(featureset, label)
def length(self):
return self._length
def labels(self):
return self._labels
def describe(self, fid):
return 'no description available'
class BinaryMaxentFeatureEncoding(MaxentFeatureEncodingI):
"""
A feature encoding that generates vectors containing a binary
joint-features of the form:
| joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
| {
| { 0 otherwise
Where ``fname`` is the name of an input-feature, ``fval`` is a value
for that input-feature, and ``label`` is a label.
Typically, these features are constructed based on a training
corpus, using the ``train()`` method. This method will create one
feature for each combination of ``fname``, ``fval``, and ``label``
that occurs at least once in the training corpus.
The ``unseen_features`` parameter can be used to add "unseen-value
features", which are used whenever an input feature has a value
that was not encountered in the training corpus. These features
have the form:
| joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
| { and l == label
| {
| { 0 otherwise
Where ``is_unseen(fname, fval)`` is true if the encoding does not
contain any joint features that are true when ``fs[fname]==fval``.
The ``alwayson_features`` parameter can be used to add "always-on
features", which have the form::
| joint_feat(fs, l) = { 1 if (l == label)
| {
| { 0 otherwise
These always-on features allow the maxent model to directly model
the prior probabilities of each label.
"""
def __init__(self, labels, mapping, unseen_features=False,
alwayson_features=False):
"""
:param labels: A list of the \"known labels\" for this encoding.
:param mapping: A dictionary mapping from ``(fname,fval,label)``
tuples to corresponding joint-feature indexes. These
indexes must be the set of integers from 0...len(mapping).
If ``mapping[fname,fval,label]=id``, then
``self.encode(..., fname:fval, ..., label)[id]`` is 1;
otherwise, it is 0.
:param unseen_features: If true, then include unseen value
features in the generated joint-feature vectors.
:param alwayson_features: If true, then include always-on
features in the generated joint-feature vectors.
"""
if set(mapping.values()) != set(range(len(mapping))):
raise ValueError('Mapping values must be exactly the '
'set of integers from 0...len(mapping)')
self._labels = list(labels)
"""A list of attested labels."""
self._mapping = mapping
"""dict mapping from (fname,fval,label) -> fid"""
self._length = len(mapping)
"""The length of generated joint feature vectors."""
self._alwayson = None
"""dict mapping from label -> fid"""
self._unseen = None
"""dict mapping from fname -> fid"""
if alwayson_features:
self._alwayson = dict([(label,i+self._length)
for (i,label) in enumerate(labels)])
self._length += len(self._alwayson)
if unseen_features:
fnames = set(fname for (fname, fval, label) in mapping)
self._unseen = dict([(fname, i+self._length)
for (i, fname) in enumerate(fnames)])
self._length += len(fnames)
def encode(self, featureset, label):
# Inherit docs.
encoding = []
# Convert input-features to joint-features:
for fname, fval in featureset.items():
# Known feature name & value:
if (fname, fval, label) in self._mapping:
encoding.append((self._mapping[fname, fval, label], 1))
# Otherwise, we might want to fire an "unseen-value feature".
elif self._unseen:
# Have we seen this fname/fval combination with any label?
for label2 in self._labels:
if (fname, fval, label2) in self._mapping:
break # we've seen this fname/fval combo
# We haven't -- fire the unseen-value feature
else:
if fname in self._unseen:
encoding.append((self._unseen[fname], 1))
# Add always-on features:
if self._alwayson and label in self._alwayson:
encoding.append((self._alwayson[label], 1))
return encoding
def describe(self, f_id):
# Inherit docs.
if not isinstance(f_id, (int, long)):
raise TypeError('describe() expected an int')
try:
self._inv_mapping
except AttributeError:
self._inv_mapping = [-1]*len(self._mapping)
for (info, i) in self._mapping.items():
self._inv_mapping[i] = info
if f_id < len(self._mapping):
(fname, fval, label) = self._inv_mapping[f_id]
return '%s==%r and label is %r' % (fname, fval, label)
elif self._alwayson and f_id in self._alwayson.values():
for (label, f_id2) in self._alwayson.items():
if f_id==f_id2: return 'label is %r' % label
elif self._unseen and f_id in self._unseen.values():
for (fname, f_id2) in self._unseen.items():
if f_id==f_id2: return '%s is unseen' % fname
else:
raise ValueError('Bad feature id')
def labels(self):
# Inherit docs.
return self._labels
def length(self):
# Inherit docs.
return self._length
@classmethod
def train(cls, train_toks, count_cutoff=0, labels=None, **options):
"""
Construct and return new feature encoding, based on a given
training corpus ``train_toks``. See the class description
``BinaryMaxentFeatureEncoding`` for a description of the
joint-features that will be included in this encoding.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type count_cutoff: int
:param count_cutoff: A cutoff value that is used to discard
rare joint-features. If a joint-feature's value is 1
fewer than ``count_cutoff`` times in the training corpus,
then that joint-feature is not included in the generated
encoding.
:type labels: list
:param labels: A list of labels that should be used by the
classifier. If not specified, then the set of labels
attested in ``train_toks`` will be used.
:param options: Extra parameters for the constructor, such as
``unseen_features`` and ``alwayson_features``.
"""
mapping = {} # maps (fname, fval, label) -> fid
seen_labels = set() # The set of labels we've encountered
count = defaultdict(int) # maps (fname, fval) -> count
for (tok, label) in train_toks:
if labels and label not in labels:
raise ValueError('Unexpected label %s' % label)
seen_labels.add(label)
# Record each of the features.
for (fname, fval) in tok.items():
# If a count cutoff is given, then only add a joint
# feature once the corresponding (fname, fval, label)
# tuple exceeds that cutoff.
count[fname,fval] += 1
if count[fname,fval] >= count_cutoff:
if (fname, fval, label) not in mapping:
mapping[fname, fval, label] = len(mapping)
if labels is None: labels = seen_labels
return cls(labels, mapping, **options)
class GISEncoding(BinaryMaxentFeatureEncoding):
"""
A binary feature encoding which adds one new joint-feature to the
joint-features defined by ``BinaryMaxentFeatureEncoding``: a
correction feature, whose value is chosen to ensure that the
sparse vector always sums to a constant non-negative number. This
new feature is used to ensure two preconditions for the GIS
training algorithm:
- At least one feature vector index must be nonzero for every
token.
- The feature vector must sum to a constant non-negative number
for every token.
"""
def __init__(self, labels, mapping, unseen_features=False,
alwayson_features=False, C=None):
"""
:param C: The correction constant. The value of the correction
feature is based on this value. In particular, its value is
``C - sum([v for (f,v) in encoding])``.
:seealso: ``BinaryMaxentFeatureEncoding.__init__``
"""
BinaryMaxentFeatureEncoding.__init__(
self, labels, mapping, unseen_features, alwayson_features)
if C is None:
C = len(set([fname for (fname,fval,label) in mapping]))+1
self._C = C
@property
def C(self):
"""The non-negative constant that all encoded feature vectors
will sum to."""
return self._C
def encode(self, featureset, label):
# Get the basic encoding.
encoding = BinaryMaxentFeatureEncoding.encode(self, featureset, label)
base_length = BinaryMaxentFeatureEncoding.length(self)
# Add a correction feature.
total = sum([v for (f,v) in encoding])
if total >= self._C:
raise ValueError('Correction feature is not high enough!')
encoding.append( (base_length, self._C-total) )
# Return the result
return encoding
def length(self):
return BinaryMaxentFeatureEncoding.length(self) + 1
def describe(self, f_id):
if f_id == BinaryMaxentFeatureEncoding.length(self):
return 'Correction feature (%s)' % self._C
else:
return BinaryMaxentFeatureEncoding.describe(self, f_id)
class TadmEventMaxentFeatureEncoding(BinaryMaxentFeatureEncoding):
def __init__(self, labels, mapping, unseen_features=False,
alwayson_features=False):
self._mapping = OrderedDict(mapping)
self._label_mapping = OrderedDict()
BinaryMaxentFeatureEncoding.__init__(self, labels, self._mapping,
unseen_features,
alwayson_features)
def encode(self, featureset, label):
encoding = []
for feature, value in featureset.items():
if (feature, label) not in self._mapping:
self._mapping[(feature, label)] = len(self._mapping)
if value not in self._label_mapping:
if not isinstance(value, int):
self._label_mapping[value] = len(self._label_mapping)
else:
self._label_mapping[value] = value
encoding.append((self._mapping[(feature, label)],
self._label_mapping[value]))
return encoding
def labels(self):
return self._labels
def describe(self, fid):
for (feature, label) in self._mapping:
if self._mapping[(feature, label)] == fid:
return (feature, label)
def length(self):
return len(self._mapping)
@classmethod
def train(cls, train_toks, count_cutoff=0, labels=None, **options):
mapping = OrderedDict()
if not labels:
labels = []
# This gets read twice, so compute the values in case it's lazy.
train_toks = list(train_toks)
for (featureset, label) in train_toks:
if label not in labels:
labels.append(label)
for (featureset, label) in train_toks:
for label in labels:
for feature in featureset:
if (feature, label) not in mapping:
mapping[(feature, label)] = len(mapping)
return cls(labels, mapping, **options)
class TypedMaxentFeatureEncoding(MaxentFeatureEncodingI):
"""
A feature encoding that generates vectors containing integer,
float and binary joint-features of the form:
Binary (for string and boolean features):
| joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
| {
| { 0 otherwise
Value (for integer and float features):
| joint_feat(fs, l) = { fval if (fs[fname] == type(fval))
| { and (l == label)
| {
| { not encoded otherwise
Where ``fname`` is the name of an input-feature, ``fval`` is a value
for that input-feature, and ``label`` is a label.
Typically, these features are constructed based on a training
corpus, using the ``train()`` method.
For string and boolean features [type(fval) not in (int, float)]
this method will create one feature for each combination of
``fname``, ``fval``, and ``label`` that occurs at least once in the
training corpus.
For integer and float features [type(fval) in (int, float)] this
method will create one feature for each combination of ``fname``
and ``label`` that occurs at least once in the training corpus.
For binary features the ``unseen_features`` parameter can be used
to add "unseen-value features", which are used whenever an input
feature has a value that was not encountered in the training
corpus. These features have the form:
| joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
| { and l == label
| {
| { 0 otherwise
Where ``is_unseen(fname, fval)`` is true if the encoding does not
contain any joint features that are true when ``fs[fname]==fval``.
The ``alwayson_features`` parameter can be used to add "always-on
features", which have the form:
| joint_feat(fs, l) = { 1 if (l == label)
| {
| { 0 otherwise
These always-on features allow the maxent model to directly model
the prior probabilities of each label.
"""
def __init__(self, labels, mapping, unseen_features=False,
alwayson_features=False):
"""
:param labels: A list of the \"known labels\" for this encoding.
:param mapping: A dictionary mapping from ``(fname,fval,label)``
tuples to corresponding joint-feature indexes. These
indexes must be the set of integers from 0...len(mapping).
If ``mapping[fname,fval,label]=id``, then
``self.encode({..., fname:fval, ...``, label)[id]} is 1;
otherwise, it is 0.
:param unseen_features: If true, then include unseen value
features in the generated joint-feature vectors.
:param alwayson_features: If true, then include always-on
features in the generated joint-feature vectors.
"""
if set(mapping.values()) != set(range(len(mapping))):
raise ValueError('Mapping values must be exactly the '
'set of integers from 0...len(mapping)')
self._labels = list(labels)
"""A list of attested labels."""
self._mapping = mapping
"""dict mapping from (fname,fval,label) -> fid"""
self._length = len(mapping)
"""The length of generated joint feature vectors."""
self._alwayson = None
"""dict mapping from label -> fid"""
self._unseen = None
"""dict mapping from fname -> fid"""
if alwayson_features:
self._alwayson = dict([(label,i+self._length)
for (i,label) in enumerate(labels)])
self._length += len(self._alwayson)
if unseen_features:
fnames = set(fname for (fname, fval, label) in mapping)
self._unseen = dict([(fname, i+self._length)
for (i, fname) in enumerate(fnames)])
self._length += len(fnames)
def encode(self, featureset, label):
# Inherit docs.
encoding = []
# Convert input-features to joint-features:
for fname, fval in featureset.items():
if(type(fval) in (int, float)):
# Known feature name & value:
if (fname, type(fval), label) in self._mapping:
encoding.append((self._mapping[fname, type(fval), label], fval))
else:
# Known feature name & value:
if (fname, fval, label) in self._mapping:
encoding.append((self._mapping[fname, fval, label], 1))
# Otherwise, we might want to fire an "unseen-value feature".
elif self._unseen:
# Have we seen this fname/fval combination with any label?
for label2 in self._labels:
if (fname, fval, label2) in self._mapping:
break # we've seen this fname/fval combo
# We haven't -- fire the unseen-value feature
else:
if fname in self._unseen:
encoding.append((self._unseen[fname], 1))
# Add always-on features:
if self._alwayson and label in self._alwayson:
encoding.append((self._alwayson[label], 1))
return encoding
def describe(self, f_id):
# Inherit docs.
if not isinstance(f_id, (int, long)):
raise TypeError('describe() expected an int')
try:
self._inv_mapping
except AttributeError:
self._inv_mapping = [-1]*len(self._mapping)
for (info, i) in self._mapping.items():
self._inv_mapping[i] = info
if f_id < len(self._mapping):
(fname, fval, label) = self._inv_mapping[f_id]
return '%s==%r and label is %r' % (fname, fval, label)
elif self._alwayson and f_id in self._alwayson.values():
for (label, f_id2) in self._alwayson.items():
if f_id==f_id2: return 'label is %r' % label
elif self._unseen and f_id in self._unseen.values():
for (fname, f_id2) in self._unseen.items():
if f_id==f_id2: return '%s is unseen' % fname
else:
raise ValueError('Bad feature id')
def labels(self):
# Inherit docs.
return self._labels
def length(self):
# Inherit docs.
return self._length
@classmethod
def train(cls, train_toks, count_cutoff=0, labels=None, **options):
"""
Construct and return new feature encoding, based on a given
training corpus ``train_toks``. See the class description
``TypedMaxentFeatureEncoding`` for a description of the
joint-features that will be included in this encoding.
Note: recognized feature values types are (int, float), over
types are interpreted as regular binary features.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type count_cutoff: int
:param count_cutoff: A cutoff value that is used to discard
rare joint-features. If a joint-feature's value is 1
fewer than ``count_cutoff`` times in the training corpus,
then that joint-feature is not included in the generated
encoding.
:type labels: list
:param labels: A list of labels that should be used by the
classifier. If not specified, then the set of labels
attested in ``train_toks`` will be used.
:param options: Extra parameters for the constructor, such as
``unseen_features`` and ``alwayson_features``.
"""
mapping = {} # maps (fname, fval, label) -> fid
seen_labels = set() # The set of labels we've encountered
count = defaultdict(int) # maps (fname, fval) -> count
for (tok, label) in train_toks:
if labels and label not in labels:
raise ValueError('Unexpected label %s' % label)
seen_labels.add(label)
# Record each of the features.
for (fname, fval) in tok.items():
if(type(fval) in (int, float)): fval = type(fval)
# If a count cutoff is given, then only add a joint
# feature once the corresponding (fname, fval, label)
# tuple exceeds that cutoff.
count[fname,fval] += 1
if count[fname,fval] >= count_cutoff:
if (fname, fval, label) not in mapping:
mapping[fname, fval, label] = len(mapping)
if labels is None: labels = seen_labels
return cls(labels, mapping, **options)
######################################################################
#{ Classifier Trainer: Generalized Iterative Scaling
######################################################################
def train_maxent_classifier_with_gis(train_toks, trace=3, encoding=None,
labels=None, **cutoffs):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the Generalized Iterative Scaling
algorithm. This ``ConditionalExponentialClassifier`` will encode
the model that maximizes entropy from all the models that are
empirically consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
"""
cutoffs.setdefault('max_iter', 100)
cutoffchecker = CutoffChecker(cutoffs)
# Construct an encoding from the training data.
if encoding is None:
encoding = GISEncoding.train(train_toks, labels=labels)
if not hasattr(encoding, 'C'):
raise TypeError('The GIS algorithm requires an encoding that '
'defines C (e.g., GISEncoding).')
# Cinv is the inverse of the sum of each joint feature vector.
# This controls the learning rate: higher Cinv (or lower C) gives
# faster learning.
Cinv = 1.0/encoding.C
# Count how many times each feature occurs in the training data.
empirical_fcount = calculate_empirical_fcount(train_toks, encoding)
# Check for any features that are not attested in train_toks.
unattested = set(numpy.nonzero(empirical_fcount==0)[0])
# Build the classifier. Start with weight=0 for each attested
# feature, and weight=-infinity for each unattested feature.
weights = numpy.zeros(len(empirical_fcount), 'd')
for fid in unattested: weights[fid] = numpy.NINF
classifier = ConditionalExponentialClassifier(encoding, weights)
# Take the log of the empirical fcount.
log_empirical_fcount = numpy.log2(empirical_fcount)
del empirical_fcount
# Old log-likelihood and accuracy; used to check if the change
# in log-likelihood or accuracy is sufficient to indicate convergence.
ll_old = None
acc_old = None
if trace > 0: print ' ==> Training (%d iterations)' % cutoffs['max_iter']
if trace > 2:
print
print ' Iteration Log Likelihood Accuracy'
print ' ---------------------------------------'
# Train the classifier.
try:
while True:
if trace > 2:
ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
acc = cutoffchecker.acc or accuracy(classifier, train_toks)
iternum = cutoffchecker.iter
print ' %9d %14.5f %9.3f' % (iternum, ll, acc)
# Use the model to estimate the number of times each
# feature should occur in the training data.
estimated_fcount = calculate_estimated_fcount(
classifier, train_toks, encoding)
# Take the log of estimated fcount (avoid taking log(0).)
for fid in unattested: estimated_fcount[fid] += 1
log_estimated_fcount = numpy.log2(estimated_fcount)
del estimated_fcount
# Update the classifier weights
weights = classifier.weights()
weights += (log_empirical_fcount - log_estimated_fcount) * Cinv
classifier.set_weights(weights)
# Check the log-likelihood & accuracy cutoffs.
if cutoffchecker.check(classifier, train_toks):
break
except KeyboardInterrupt:
print ' Training stopped: keyboard interrupt'
except:
raise
if trace > 2:
ll = log_likelihood(classifier, train_toks)
acc = accuracy(classifier, train_toks)
print ' Final %14.5f %9.3f' % (ll, acc)
# Return the classifier.
return classifier
def calculate_empirical_fcount(train_toks, encoding):
fcount = numpy.zeros(encoding.length(), 'd')
for tok, label in train_toks:
for (index, val) in encoding.encode(tok, label):
fcount[index] += val
return fcount
def calculate_estimated_fcount(classifier, train_toks, encoding):
fcount = numpy.zeros(encoding.length(), 'd')
for tok, label in train_toks:
pdist = classifier.prob_classify(tok)
for label in pdist.samples():
prob = pdist.prob(label)
for (fid, fval) in encoding.encode(tok, label):
fcount[fid] += prob*fval
return fcount
######################################################################
#{ Classifier Trainer: Improved Iterative Scaling
######################################################################
def train_maxent_classifier_with_iis(train_toks, trace=3, encoding=None,
labels=None, **cutoffs):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the Improved Iterative Scaling algorithm.
This ``ConditionalExponentialClassifier`` will encode the model
that maximizes entropy from all the models that are empirically
consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
"""
cutoffs.setdefault('max_iter', 100)
cutoffchecker = CutoffChecker(cutoffs)
# Construct an encoding from the training data.
if encoding is None:
encoding = BinaryMaxentFeatureEncoding.train(train_toks, labels=labels)
# Count how many times each feature occurs in the training data.
empirical_ffreq = (calculate_empirical_fcount(train_toks, encoding) /
len(train_toks))
# Find the nf map, and related variables nfarray and nfident.
# nf is the sum of the features for a given labeled text.
# nfmap compresses this sparse set of values to a dense list.
# nfarray performs the reverse operation. nfident is
# nfarray multiplied by an identity matrix.
nfmap = calculate_nfmap(train_toks, encoding)
nfarray = numpy.array(sorted(nfmap, key=nfmap.__getitem__), 'd')
nftranspose = numpy.reshape(nfarray, (len(nfarray), 1))
# Check for any features that are not attested in train_toks.
unattested = set(numpy.nonzero(empirical_ffreq==0)[0])
# Build the classifier. Start with weight=0 for each attested
# feature, and weight=-infinity for each unattested feature.
weights = numpy.zeros(len(empirical_ffreq), 'd')
for fid in unattested: weights[fid] = numpy.NINF
classifier = ConditionalExponentialClassifier(encoding, weights)
if trace > 0: print ' ==> Training (%d iterations)' % cutoffs['max_iter']
if trace > 2:
print
print ' Iteration Log Likelihood Accuracy'
print ' ---------------------------------------'
# Old log-likelihood and accuracy; used to check if the change
# in log-likelihood or accuracy is sufficient to indicate convergence.
ll_old = None
acc_old = None
# Train the classifier.
try:
while True:
if trace > 2:
ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
acc = cutoffchecker.acc or accuracy(classifier, train_toks)
iternum = cutoffchecker.iter
print ' %9d %14.5f %9.3f' % (iternum, ll, acc)
# Calculate the deltas for this iteration, using Newton's method.
deltas = calculate_deltas(
train_toks, classifier, unattested, empirical_ffreq,
nfmap, nfarray, nftranspose, encoding)
# Use the deltas to update our weights.
weights = classifier.weights()
weights += deltas
classifier.set_weights(weights)
# Check the log-likelihood & accuracy cutoffs.
if cutoffchecker.check(classifier, train_toks):
break
except KeyboardInterrupt:
print ' Training stopped: keyboard interrupt'
except:
raise
if trace > 2:
ll = log_likelihood(classifier, train_toks)
acc = accuracy(classifier, train_toks)
print ' Final %14.5f %9.3f' % (ll, acc)
# Return the classifier.
return classifier
def calculate_nfmap(train_toks, encoding):
"""
Construct a map that can be used to compress ``nf`` (which is
typically sparse).
*nf(feature_vector)* is the sum of the feature values for
*feature_vector*.
This represents the number of features that are active for a
given labeled text. This method finds all values of *nf(t)*
that are attested for at least one token in the given list of
training tokens; and constructs a dictionary mapping these
attested values to a continuous range *0...N*. For example,
if the only values of *nf()* that were attested were 3, 5, and
7, then ``_nfmap`` might return the dictionary ``{3:0, 5:1, 7:2}``.
:return: A map that can be used to compress ``nf`` to a dense
vector.
:rtype: dict(int -> int)
"""
# Map from nf to indices. This allows us to use smaller arrays.
nfset = set()
for tok, _ in train_toks:
for label in encoding.labels():
nfset.add(sum([val for (id,val) in encoding.encode(tok,label)]))
return dict([(nf, i) for (i, nf) in enumerate(nfset)])
def calculate_deltas(train_toks, classifier, unattested, ffreq_empirical,
nfmap, nfarray, nftranspose, encoding):
"""
Calculate the update values for the classifier weights for
this iteration of IIS. These update weights are the value of
``delta`` that solves the equation::
ffreq_empirical[i]
=
SUM[fs,l] (classifier.prob_classify(fs).prob(l) *
feature_vector(fs,l)[i] *
exp(delta[i] * nf(feature_vector(fs,l))))
Where:
- *(fs,l)* is a (featureset, label) tuple from ``train_toks``
- *feature_vector(fs,l)* = ``encoding.encode(fs,l)``
- *nf(vector)* = ``sum([val for (id,val) in vector])``
This method uses Newton's method to solve this equation for
*delta[i]*. In particular, it starts with a guess of
``delta[i]`` = 1; and iteratively updates ``delta`` with:
| delta[i] -= (ffreq_empirical[i] - sum1[i])/(-sum2[i])
until convergence, where *sum1* and *sum2* are defined as:
| sum1[i](delta) = SUM[fs,l] f[i](fs,l,delta)
| sum2[i](delta) = SUM[fs,l] (f[i](fs,l,delta).nf(feature_vector(fs,l)))
| f[i](fs,l,delta) = (classifier.prob_classify(fs).prob(l) .
| feature_vector(fs,l)[i] .
| exp(delta[i] . nf(feature_vector(fs,l))))
Note that *sum1* and *sum2* depend on ``delta``; so they need
to be re-computed each iteration.
The variables ``nfmap``, ``nfarray``, and ``nftranspose`` are
used to generate a dense encoding for *nf(ltext)*. This
allows ``_deltas`` to calculate *sum1* and *sum2* using
matrices, which yields a significant performance improvement.
:param train_toks: The set of training tokens.
:type train_toks: list(tuple(dict, str))
:param classifier: The current classifier.
:type classifier: ClassifierI
:param ffreq_empirical: An array containing the empirical
frequency for each feature. The *i*\ th element of this
array is the empirical frequency for feature *i*.
:type ffreq_empirical: sequence of float
:param unattested: An array that is 1 for features that are
not attested in the training data; and 0 for features that
are attested. In other words, ``unattested[i]==0`` iff
``ffreq_empirical[i]==0``.
:type unattested: sequence of int
:param nfmap: A map that can be used to compress ``nf`` to a dense
vector.
:type nfmap: dict(int -> int)
:param nfarray: An array that can be used to uncompress ``nf``
from a dense vector.
:type nfarray: array(float)
:param nftranspose: The transpose of ``nfarray``
:type nftranspose: array(float)
"""
# These parameters control when we decide that we've
# converged. It probably should be possible to set these
# manually, via keyword arguments to train.
NEWTON_CONVERGE = 1e-12
MAX_NEWTON = 300
deltas = numpy.ones(encoding.length(), 'd')
# Precompute the A matrix:
# A[nf][id] = sum ( p(fs) * p(label|fs) * f(fs,label) )
# over all label,fs s.t. num_features[label,fs]=nf
A = numpy.zeros((len(nfmap), encoding.length()), 'd')
for tok, label in train_toks:
dist = classifier.prob_classify(tok)
for label in encoding.labels():
# Generate the feature vector
feature_vector = encoding.encode(tok,label)
# Find the number of active features
nf = sum([val for (id, val) in feature_vector])
# Update the A matrix
for (id, val) in feature_vector:
A[nfmap[nf], id] += dist.prob(label) * val
A /= len(train_toks)
# Iteratively solve for delta. Use the following variables:
# - nf_delta[x][y] = nfarray[x] * delta[y]
# - exp_nf_delta[x][y] = exp(nf[x] * delta[y])
# - nf_exp_nf_delta[x][y] = nf[x] * exp(nf[x] * delta[y])
# - sum1[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
# exp(delta[i]nf)
# - sum2[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
# nf exp(delta[i]nf)
for rangenum in range(MAX_NEWTON):
nf_delta = numpy.outer(nfarray, deltas)
exp_nf_delta = 2 ** nf_delta
nf_exp_nf_delta = nftranspose * exp_nf_delta
sum1 = numpy.sum(exp_nf_delta * A, axis=0)
sum2 = numpy.sum(nf_exp_nf_delta * A, axis=0)
# Avoid division by zero.
for fid in unattested: sum2[fid] += 1
# Update the deltas.
deltas -= (ffreq_empirical - sum1) / -sum2
# We can stop once we converge.
n_error = (numpy.sum(abs((ffreq_empirical-sum1)))/
numpy.sum(abs(deltas)))
if n_error < NEWTON_CONVERGE:
return deltas
return deltas
######################################################################
#{ Classifier Trainer: scipy algorithms (GC, LBFGSB, etc.)
######################################################################
# [xx] n.b.: it's possible to supply custom trace functions, which
# could be used to make trace output consistent with iis/gis.
def train_maxent_classifier_with_scipy(train_toks, trace=3, encoding=None,
labels=None, algorithm='CG',
sparse=True, gaussian_prior_sigma=0,
**cutoffs):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the specified ``scipy`` optimization
algorithm. This ``ConditionalExponentialClassifier`` will encode
the model that maximizes entropy from all the models that are
empirically consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
:require: The ``scipy`` package must be installed.
"""
try:
import scipy
except ImportError, e:
raise ValueError('The maxent training algorithm %r requires '
'that the scipy package be installed. See '
'http://www.scipy.org/' % algorithm)
try:
# E.g., if libgfortran.2.dylib is not found.
import scipy.sparse, scipy.maxentropy
except ImportError, e:
raise ValueError('Import of scipy package failed: %s' % e)
# Construct an encoding from the training data.
if encoding is None:
encoding = BinaryMaxentFeatureEncoding.train(train_toks, labels=labels)
elif labels is not None:
raise ValueError('Specify encoding or labels, not both')
labels = encoding.labels()
labelnum = dict([(label, i) for (i, label) in enumerate(labels)])
num_features = encoding.length()
num_toks = len(train_toks)
num_labels = len(labels)
# Decide whether to use a sparse matrix or a dense one. Very
# limited testing has shown that the lil matrix format
# (list-of-lists) performs better than csr and csc formats.
# Limited testing also suggests that the sparse matrix format
# doesn't save much memory over the dense format in practice
# (in terms of max memory usage).
if sparse: zeros = scipy.sparse.lil_matrix
else: zeros = numpy.zeros
# Construct the 'F' matrix, which lists the feature values for
# each training instance. F[i, j*len(labels)+k] is equal to the
# value of the i'th feature for the feature vector corresponding
# to (tok[j], label[k]).
F = zeros((num_features, num_toks*num_labels))
# Construct the 'N' matrix, which specifies the correct label for
# each training instance. N[0, j*len(labels)+k] is equal to one
# iff label[k] is the correct label for tok[j].
N = zeros((1, num_toks*num_labels))
# Fill in the 'F' and 'N' matrices (just make one pass through the
# training tokens.)
for toknum, (featureset, label) in enumerate(train_toks):
N[0, toknum*len(labels) + labelnum[label]] += 1
for label2 in labels:
for (fid, fval) in encoding.encode(featureset, label2):
F[fid, toknum*len(labels) + labelnum[label2]] = fval
# Set up the scipy model, based on the matrices F and N.
model = scipy.maxentropy.conditionalmodel(F, N, num_toks)
# note -- model.setsmooth() is buggy.
if gaussian_prior_sigma:
model.sigma2 = gaussian_prior_sigma**2
if algorithm == 'LBFGSB':
model.log = None
if trace >= 3:
model.verbose = True
if 'max_iter' in cutoffs:
model.maxiter = cutoffs['max_iter']
if 'tolerance' in cutoffs:
if algorithm == 'CG': model.avegtol = cutoffs['tolerance']
elif algorithm == 'LBFGSB': model.maxgtol = cutoffs['tolerance']
else: model.tol = cutoffs['tolerance']
# Train the model.
model.fit(algorithm=algorithm)
# Convert the model's weights from base-e to base-2 weights.
weights = model.params * numpy.log2(numpy.e)
# Build the classifier
return MaxentClassifier(encoding, weights)
######################################################################
#{ Classifier Trainer: megam
######################################################################
# [xx] possible extension: add support for using implicit file format;
# this would need to put requirements on what encoding is used. But
# we may need this for other maxent classifier trainers that require
# implicit formats anyway.
def train_maxent_classifier_with_megam(train_toks, trace=3, encoding=None,
labels=None, gaussian_prior_sigma=0,
**kwargs):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the external ``megam`` library. This
``ConditionalExponentialClassifier`` will encode the model that
maximizes entropy from all the models that are empirically
consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
:see: ``nltk.classify.megam``
"""
explicit = True
bernoulli = True
if 'explicit' in kwargs: explicit = kwargs['explicit']
if 'bernoulli' in kwargs: bernoulli = kwargs['bernoulli']
# Construct an encoding from the training data.
if encoding is None:
# Count cutoff can also be controlled by megam with the -minfc
# option. Not sure where the best place for it is.
count_cutoff = kwargs.get('count_cutoff', 0)
encoding = BinaryMaxentFeatureEncoding.train(train_toks, count_cutoff,
labels=labels,
alwayson_features=True)
elif labels is not None:
raise ValueError('Specify encoding or labels, not both')
# Write a training file for megam.
try:
fd, trainfile_name = tempfile.mkstemp(prefix='nltk-', suffix='.gz')
trainfile = gzip.open(trainfile_name, 'wb')
write_megam_file(train_toks, encoding, trainfile, \
explicit=explicit, bernoulli=bernoulli)
trainfile.close()
except (OSError, IOError, ValueError), e:
raise ValueError('Error while creating megam training file: %s' % e)
# Run megam on the training file.
options = []
options += ['-nobias', '-repeat', '10']
if explicit:
options += ['-explicit']
if not bernoulli:
options += ['-fvals']
if gaussian_prior_sigma:
# Lambda is just the precision of the Gaussian prior, i.e. it's the
# inverse variance, so the parameter conversion is 1.0/sigma**2.
# See http://www.cs.utah.edu/~hal/docs/daume04cg-bfgs.pdf.
inv_variance = 1.0 / gaussian_prior_sigma**2
else:
inv_variance = 0
options += ['-lambda', '%.2f' % inv_variance, '-tune']
if trace < 3:
options += ['-quiet']
if 'max_iter' in kwargs:
options += ['-maxi', '%s' % kwargs['max_iter']]
if 'll_delta' in kwargs:
# [xx] this is actually a perplexity delta, not a log
# likelihood delta
options += ['-dpp', '%s' % abs(kwargs['ll_delta'])]
if hasattr(encoding, 'cost'):
options += ['-multilabel'] # each possible la
options += ['multiclass', trainfile_name]
stdout = call_megam(options)
# print './megam_i686.opt ', ' '.join(options)
# Delete the training file
try: os.remove(trainfile_name)
except (OSError, IOError), e:
print 'Warning: unable to delete %s: %s' % (trainfile_name, e)
# Parse the generated weight vector.
weights = parse_megam_weights(stdout, encoding.length(), explicit)
# Convert from base-e to base-2 weights.
weights *= numpy.log2(numpy.e)
# Build the classifier
return MaxentClassifier(encoding, weights)
######################################################################
#{ Classifier Trainer: tadm
######################################################################
class TadmMaxentClassifier(MaxentClassifier):
@classmethod
def train(cls, train_toks, **kwargs):
algorithm = kwargs.get('algorithm', 'tao_lmvm')
trace = kwargs.get('trace', 3)
encoding = kwargs.get('encoding', None)
labels = kwargs.get('labels', None)
sigma = kwargs.get('gaussian_prior_sigma', 0)
count_cutoff = kwargs.get('count_cutoff', 0)
max_iter = kwargs.get('max_iter')
ll_delta = kwargs.get('min_lldelta')
# Construct an encoding from the training data.
if not encoding:
encoding = TadmEventMaxentFeatureEncoding.train(train_toks,
count_cutoff,
labels=labels)
trainfile_fd, trainfile_name = \
tempfile.mkstemp(prefix='nltk-tadm-events-', suffix='.gz')
weightfile_fd, weightfile_name = \
tempfile.mkstemp(prefix='nltk-tadm-weights-')
trainfile = gzip.open(trainfile_name, 'wb')
write_tadm_file(train_toks, encoding, trainfile)
trainfile.close()
options = []
options.extend(['-monitor'])
options.extend(['-method', algorithm])
if sigma:
options.extend(['-l2', '%.6f' % sigma**2])
if max_iter:
options.extend(['-max_it', '%d' % max_iter])
if ll_delta:
options.extend(['-fatol', '%.6f' % abs(ll_delta)])
options.extend(['-events_in', trainfile_name])
options.extend(['-params_out', weightfile_name])
if trace < 3:
options.extend(['2>&1'])
else:
options.extend(['-summary'])
call_tadm(options)
weightfile = open(weightfile_name, 'rb')
weights = parse_tadm_weights(weightfile)
weightfile.close()
os.remove(trainfile_name)
os.remove(weightfile_name)
# Convert from base-e to base-2 weights.
weights *= numpy.log2(numpy.e)
# Build the classifier
return cls(encoding, weights)
######################################################################
#{ Demo
######################################################################
def demo():
from nltk.classify.util import names_demo
classifier = names_demo(MaxentClassifier.train)
if __name__ == '__main__':
demo() | unknown | codeparrot/codeparrot-clean | ||
import urllib
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "../common/"))
import sleep
def main(request, response):
index = request.request_path.index("?")
args = request.request_path[index+1:].split("&")
headers = []
statusSent = False
headersSent = False
for arg in args:
if arg.startswith("ignored"):
continue
elif arg.endswith("ms"):
sleep.sleep_at_least(float(arg[0:-2]))
elif arg.startswith("redirect:"):
return (302, "WEBPERF MARKETING"), [("Location", urllib.unquote(arg[9:]))], "TEST"
elif arg.startswith("mime:"):
headers.append(("Content-Type", urllib.unquote(arg[5:])))
elif arg.startswith("send:"):
text = urllib.unquote(arg[5:])
if not statusSent:
# Default to a 200 status code.
response.writer.write_status(200)
statusSent = True
if not headersSent:
for key, value in headers:
response.writer.write_header(key, value)
response.writer.end_headers()
headersSent = True
response.writer.write_content(text)
elif arg.startswith("status:"):
code = int(urllib.unquote(arg[7:]))
response.writer.write_status(code)
if code // 100 == 1:
# Terminate informational 1XX responses with an empty line.
response.writer.end_headers()
else:
statusSent = True
elif arg == "flush":
response.writer.flush()
# else:
# error " INVALID ARGUMENT %s" % arg | unknown | codeparrot/codeparrot-clean | ||
from Screens.InfoBar import InfoBar
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigListScreen
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.config import config, getConfigListEntry
from enigma import eEPGCache
from time import time
class SleepTimerEdit(ConfigListScreen, Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = ["SleepTimerSetup", "Setup"]
self.setup_title = _("SleepTimer Configuration")
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["description"] = Label("")
self.list = []
ConfigListScreen.__init__(self, self.list, session = session)
self.createSetup()
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.ok,
"red": self.cancel,
"cancel": self.cancel,
"ok": self.ok,
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
self.list = []
if InfoBar.instance and InfoBar.instance.sleepTimer.isActive():
statusSleeptimerText = _("(activated +%d min)") % InfoBar.instance.sleepTimerState()
else:
statusSleeptimerText = _("(not activated)")
self.list.append(getConfigListEntry(_("Sleeptimer") + " " + statusSleeptimerText,
config.usage.sleep_timer,
_("Configure the duration in minutes for the sleeptimer. Select this entry and click OK or green to start/stop the sleeptimer")))
self.list.append(getConfigListEntry(_("Inactivity Sleeptimer"),
config.usage.inactivity_timer,
_("Configure the duration in hours the receiver should go to standby when the receiver is not controlled.")))
if int(config.usage.inactivity_timer.value):
self.list.append(getConfigListEntry(_("Specify timeframe to ignore inactivity sleeptimer"),
config.usage.inactivity_timer_blocktime,
_("When enabled you can specify a timeframe were the inactivity sleeptimer is ignored. Not the detection is disabled during this timeframe but the inactivity timeout is disabled")))
if config.usage.inactivity_timer_blocktime.value:
self.list.append(getConfigListEntry(_("Start time to ignore inactivity sleeptimer"),
config.usage.inactivity_timer_blocktime_begin,
_("Specify the start time when the inactivity sleeptimer should be ignored")))
self.list.append(getConfigListEntry(_("End time to ignore inactivity sleeptimer"),
config.usage.inactivity_timer_blocktime_end,
_("Specify the end time until the inactivity sleeptimer should be ignored")))
self.list.append(getConfigListEntry(_("Shutdown when in Standby"),
config.usage.standby_to_shutdown_timer,
_("Configure the duration when the receiver should go to shut down in case the receiver is in standby mode.")))
if int(config.usage.standby_to_shutdown_timer.value):
self.list.append(getConfigListEntry(_("Specify timeframe to ignore the shutdown in standby"),
config.usage.standby_to_shutdown_timer_blocktime,
_("When enabled you can specify a timeframe to ignore the shutdown timer when the receiver is in standby mode")))
if config.usage.standby_to_shutdown_timer_blocktime.value:
self.list.append(getConfigListEntry(_("Start time to ignore shutdown in standby"),
config.usage.standby_to_shutdown_timer_blocktime_begin,
_("Specify the start time to ignore the shutdown timer when the receiver is in standby mode")))
self.list.append(getConfigListEntry(_("End time to ignore shutdown in standby"),
config.usage.standby_to_shutdown_timer_blocktime_end,
_("Specify the end time to ignore the shutdown timer when the receiver is in standby mode")))
self["config"].list = self.list
self["config"].l.setList(self.list)
def ok(self):
if self["config"].isChanged():
for x in self["config"].list:
x[1].save()
if self.getCurrentEntry().startswith(_("Sleeptimer")):
sleepTimer = config.usage.sleep_timer.value
if sleepTimer == "event_standby":
sleepTimer = self.currentEventTime()
else:
sleepTimer = int(sleepTimer)
if sleepTimer or not self.getCurrentEntry().endswith(_("(not activated)")):
InfoBar.instance.setSleepTimer(sleepTimer)
self.close(True)
self.close()
def cancel(self, answer = None):
if answer is None:
if self["config"].isChanged():
self.session.openWithCallback(self.cancel, MessageBox, _("Really close without saving settings?"))
else:
self.close()
elif answer:
for x in self["config"].list:
x[1].cancel()
self.close()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.createSetup()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.createSetup()
def currentEventTime(self):
remaining = 0
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
path = ref.getPath()
if path: # Movie
service = self.session.nav.getCurrentService()
seek = service and service.seek()
if seek:
length = seek.getLength()
position = seek.getPlayPosition()
if length and position:
remaining = length[1] - position[1]
if remaining > 0:
remaining = remaining / 90000
else: # DVB
epg = eEPGCache.getInstance()
event = epg.lookupEventTime(ref, -1, 0)
if event:
now = int(time())
start = event.getBeginTime()
duration = event.getDuration()
end = start + duration
remaining = end - now
return remaining + config.recording.margin_after.value * 60 | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module implements functions to perform various useful operations on
entries, such as grouping entries by structure.
"""
from six.moves import filter, zip
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Feb 24, 2012"
import logging
import json
import datetime
import collections
from monty.json import MontyEncoder, MontyDecoder
from pymatgen.core.structure import Structure
from pymatgen.analysis.structure_matcher import StructureMatcher, \
SpeciesComparator
logger = logging.getLogger(__name__)
def _get_host(structure, species_to_remove):
if species_to_remove:
s = structure.copy()
s.remove_species(species_to_remove)
return s
else:
return structure
def _perform_grouping(args):
(entries_json, hosts_json, ltol, stol, angle_tol,
primitive_cell, scale, comparator, groups) = args
entries = json.loads(entries_json, cls=MontyDecoder)
hosts = json.loads(hosts_json, cls=MontyDecoder)
unmatched = list(zip(entries, hosts))
while len(unmatched) > 0:
ref_host = unmatched[0][1]
logger.info(
"Reference tid = {}, formula = {}".format(unmatched[0][0].entry_id,
ref_host.formula)
)
ref_formula = ref_host.composition.reduced_formula
logger.info("Reference host = {}".format(ref_formula))
matches = [unmatched[0]]
for i in range(1, len(unmatched)):
test_host = unmatched[i][1]
logger.info("Testing tid = {}, formula = {}"
.format(unmatched[i][0].entry_id, test_host.formula))
test_formula = test_host.composition.reduced_formula
logger.info("Test host = {}".format(test_formula))
m = StructureMatcher(ltol=ltol, stol=stol, angle_tol=angle_tol,
primitive_cell=primitive_cell, scale=scale,
comparator=comparator)
if m.fit(ref_host, test_host):
logger.info("Fit found")
matches.append(unmatched[i])
groups.append(json.dumps([m[0] for m in matches], cls=MontyEncoder))
unmatched = list(filter(lambda x: x not in matches, unmatched))
logger.info("{} unmatched remaining".format(len(unmatched)))
def group_entries_by_structure(entries, species_to_remove=None,
ltol=0.2, stol=.4, angle_tol=5,
primitive_cell=True, scale=True,
comparator=SpeciesComparator(),
ncpus=None):
"""
Given a sequence of ComputedStructureEntries, use structure fitter to group
them by structural similarity.
Args:
entries: Sequence of ComputedStructureEntries.
species_to_remove: Sometimes you want to compare a host framework
(e.g., in Li-ion battery analysis). This allows you to specify
species to remove before structural comparison.
ltol (float): Fractional length tolerance. Default is 0.2.
stol (float): Site tolerance in Angstrom. Default is 0.4 Angstrom.
angle_tol (float): Angle tolerance in degrees. Default is 5 degrees.
primitive_cell (bool): If true: input structures will be reduced to
primitive cells prior to matching. Defaults to True.
scale: Input structures are scaled to equivalent volume if true;
For exact matching, set to False.
comparator: A comparator object implementing an equals method that
declares equivalency of sites. Default is SpeciesComparator,
which implies rigid species mapping.
ncpus: Number of cpus to use. Use of multiple cpus can greatly improve
fitting speed. Default of None means serial processing.
Returns:
Sequence of sequence of entries by structural similarity. e.g,
[[ entry1, entry2], [entry3, entry4, entry5]]
"""
start = datetime.datetime.now()
logger.info("Started at {}".format(start))
entries_host = [(entry, _get_host(entry.structure, species_to_remove))
for entry in entries]
if ncpus:
symm_entries = collections.defaultdict(list)
for entry, host in entries_host:
symm_entries[comparator.get_structure_hash(host)].append((entry,
host))
import multiprocessing as mp
logging.info("Using {} cpus".format(ncpus))
manager = mp.Manager()
groups = manager.list()
p = mp.Pool(ncpus)
#Parallel processing only supports Python primitives and not objects.
p.map(_perform_grouping,
[(json.dumps([e[0] for e in eh], cls=MontyEncoder),
json.dumps([e[1] for e in eh], cls=MontyEncoder),
ltol, stol, angle_tol, primitive_cell, scale,
comparator, groups)
for eh in symm_entries.values()])
else:
groups = []
hosts = [host for entry, host in entries_host]
_perform_grouping((json.dumps(entries, cls=MontyEncoder),
json.dumps(hosts, cls=MontyEncoder),
ltol, stol, angle_tol, primitive_cell, scale,
comparator, groups))
entry_groups = []
for g in groups:
entry_groups.append(json.loads(g, cls=MontyDecoder))
logging.info("Finished at {}".format(datetime.datetime.now()))
logging.info("Took {}".format(datetime.datetime.now() - start))
return entry_groups | unknown | codeparrot/codeparrot-clean | ||
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting the LCEL based programs
to scale better for higher concurrent loads.
**Batch** operations allow for processing multiple inputs in parallel.
**Streaming** of intermediate outputs, as they're being generated, allows for
creating more responsive UX.
This module contains non-core Runnable classes.
""" | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/runnables/__init__.py |
#!/usr/bin/env python
# Simple tests for the ldb python bindings.
# Copyright (C) 2007 Jelmer Vernooij <jelmer@samba.org>
import os
from unittest import TestCase
import ldb
def filename():
import tempfile
try:
dir_prefix = os.path.join(os.environ["SELFTEST_PREFIX"], "tmp")
except KeyError:
dir_prefix = None
return tempfile.mktemp(dir=dir_prefix)
class NoContextTests(TestCase):
def test_valid_attr_name(self):
self.assertTrue(ldb.valid_attr_name("foo"))
self.assertFalse(ldb.valid_attr_name("24foo"))
def test_timestring(self):
self.assertEquals("19700101000000.0Z", ldb.timestring(0))
self.assertEquals("20071119191012.0Z", ldb.timestring(1195499412))
def test_string_to_time(self):
self.assertEquals(0, ldb.string_to_time("19700101000000.0Z"))
self.assertEquals(1195499412, ldb.string_to_time("20071119191012.0Z"))
def test_binary_encode(self):
encoded = ldb.binary_encode('test\\x')
decoded = ldb.binary_decode(encoded)
self.assertEquals(decoded, 'test\\x')
class SimpleLdb(TestCase):
def test_connect(self):
ldb.Ldb(filename())
def test_connect_none(self):
ldb.Ldb()
def test_connect_later(self):
x = ldb.Ldb()
x.connect(filename())
def test_repr(self):
x = ldb.Ldb()
self.assertTrue(repr(x).startswith("<ldb connection"))
def test_set_create_perms(self):
x = ldb.Ldb()
x.set_create_perms(0600)
def test_modules_none(self):
x = ldb.Ldb()
self.assertEquals([], x.modules())
def test_modules_tdb(self):
x = ldb.Ldb(filename())
self.assertEquals("[<ldb module 'tdb'>]", repr(x.modules()))
def test_search(self):
l = ldb.Ldb(filename())
self.assertEquals(len(l.search()), 0)
def test_search_controls(self):
l = ldb.Ldb(filename())
self.assertEquals(len(l.search(controls=["paged_results:0:5"])), 0)
def test_search_attrs(self):
l = ldb.Ldb(filename())
self.assertEquals(len(l.search(ldb.Dn(l, ""), ldb.SCOPE_SUBTREE, "(dc=*)", ["dc"])), 0)
def test_search_string_dn(self):
l = ldb.Ldb(filename())
self.assertEquals(len(l.search("", ldb.SCOPE_SUBTREE, "(dc=*)", ["dc"])), 0)
def test_search_attr_string(self):
l = ldb.Ldb(filename())
self.assertRaises(TypeError, l.search, attrs="dc")
def test_opaque(self):
l = ldb.Ldb(filename())
l.set_opaque("my_opaque", l)
self.assertTrue(l.get_opaque("my_opaque") is not None)
self.assertEquals(None, l.get_opaque("unknown"))
def test_search_scope_base(self):
l = ldb.Ldb(filename())
self.assertEquals(len(l.search(ldb.Dn(l, "dc=foo1"),
ldb.SCOPE_ONELEVEL)), 0)
def test_delete(self):
l = ldb.Ldb(filename())
self.assertRaises(ldb.LdbError, lambda: l.delete(ldb.Dn(l, "dc=foo2")))
def test_delete_w_unhandled_ctrl(self):
l = ldb.Ldb(filename())
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=foo1")
m["b"] = ["a"]
l.add(m)
self.assertRaises(ldb.LdbError, lambda: l.delete(m.dn, ["search_options:1:2"]))
l.delete(m.dn)
def test_contains(self):
name = filename()
l = ldb.Ldb(name)
self.assertFalse(ldb.Dn(l, "dc=foo3") in l)
l = ldb.Ldb(name)
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=foo3")
m["b"] = ["a"]
l.add(m)
try:
self.assertTrue(ldb.Dn(l, "dc=foo3") in l)
finally:
l.delete(m.dn)
def test_get_config_basedn(self):
l = ldb.Ldb(filename())
self.assertEquals(None, l.get_config_basedn())
def test_get_root_basedn(self):
l = ldb.Ldb(filename())
self.assertEquals(None, l.get_root_basedn())
def test_get_schema_basedn(self):
l = ldb.Ldb(filename())
self.assertEquals(None, l.get_schema_basedn())
def test_get_default_basedn(self):
l = ldb.Ldb(filename())
self.assertEquals(None, l.get_default_basedn())
def test_add(self):
l = ldb.Ldb(filename())
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=foo4")
m["bla"] = "bla"
self.assertEquals(len(l.search()), 0)
l.add(m)
try:
self.assertEquals(len(l.search()), 1)
finally:
l.delete(ldb.Dn(l, "dc=foo4"))
def test_add_w_unhandled_ctrl(self):
l = ldb.Ldb(filename())
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=foo4")
m["bla"] = "bla"
self.assertEquals(len(l.search()), 0)
self.assertRaises(ldb.LdbError, lambda: l.add(m,["search_options:1:2"]))
def test_add_dict(self):
l = ldb.Ldb(filename())
m = {"dn": ldb.Dn(l, "dc=foo5"),
"bla": "bla"}
self.assertEquals(len(l.search()), 0)
l.add(m)
try:
self.assertEquals(len(l.search()), 1)
finally:
l.delete(ldb.Dn(l, "dc=foo5"))
def test_add_dict_string_dn(self):
l = ldb.Ldb(filename())
m = {"dn": "dc=foo6", "bla": "bla"}
self.assertEquals(len(l.search()), 0)
l.add(m)
try:
self.assertEquals(len(l.search()), 1)
finally:
l.delete(ldb.Dn(l, "dc=foo6"))
def test_rename(self):
l = ldb.Ldb(filename())
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=foo7")
m["bla"] = "bla"
self.assertEquals(len(l.search()), 0)
l.add(m)
try:
l.rename(ldb.Dn(l, "dc=foo7"), ldb.Dn(l, "dc=bar"))
self.assertEquals(len(l.search()), 1)
finally:
l.delete(ldb.Dn(l, "dc=bar"))
def test_rename_string_dns(self):
l = ldb.Ldb(filename())
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=foo8")
m["bla"] = "bla"
self.assertEquals(len(l.search()), 0)
l.add(m)
self.assertEquals(len(l.search()), 1)
try:
l.rename("dc=foo8", "dc=bar")
self.assertEquals(len(l.search()), 1)
finally:
l.delete(ldb.Dn(l, "dc=bar"))
def test_modify_delete(self):
l = ldb.Ldb(filename())
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=modifydelete")
m["bla"] = ["1234"]
l.add(m)
rm = l.search(m.dn)[0]
self.assertEquals(["1234"], list(rm["bla"]))
try:
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=modifydelete")
m["bla"] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, "bla")
self.assertEquals(ldb.FLAG_MOD_DELETE, m["bla"].flags())
l.modify(m)
rm = l.search(m.dn)[0]
self.assertEquals(1, len(rm))
rm = l.search(m.dn, attrs=["bla"])
self.assertEquals(0, len(rm))
finally:
l.delete(ldb.Dn(l, "dc=modifydelete"))
def test_modify_add(self):
l = ldb.Ldb(filename())
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=add")
m["bla"] = ["1234"]
l.add(m)
try:
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=add")
m["bla"] = ldb.MessageElement(["456"], ldb.FLAG_MOD_ADD, "bla")
self.assertEquals(ldb.FLAG_MOD_ADD, m["bla"].flags())
l.modify(m)
rm = l.search(m.dn)[0]
self.assertEquals(2, len(rm))
self.assertEquals(["1234", "456"], list(rm["bla"]))
finally:
l.delete(ldb.Dn(l, "dc=add"))
def test_modify_replace(self):
l = ldb.Ldb(filename())
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=modify2")
m["bla"] = ["1234", "456"]
l.add(m)
try:
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=modify2")
m["bla"] = ldb.MessageElement(["789"], ldb.FLAG_MOD_REPLACE, "bla")
self.assertEquals(ldb.FLAG_MOD_REPLACE, m["bla"].flags())
l.modify(m)
rm = l.search(m.dn)[0]
self.assertEquals(2, len(rm))
self.assertEquals(["789"], list(rm["bla"]))
rm = l.search(m.dn, attrs=["bla"])[0]
self.assertEquals(1, len(rm))
finally:
l.delete(ldb.Dn(l, "dc=modify2"))
def test_modify_flags_change(self):
l = ldb.Ldb(filename())
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=add")
m["bla"] = ["1234"]
l.add(m)
try:
m = ldb.Message()
m.dn = ldb.Dn(l, "dc=add")
m["bla"] = ldb.MessageElement(["456"], ldb.FLAG_MOD_ADD, "bla")
self.assertEquals(ldb.FLAG_MOD_ADD, m["bla"].flags())
l.modify(m)
rm = l.search(m.dn)[0]
self.assertEquals(2, len(rm))
self.assertEquals(["1234", "456"], list(rm["bla"]))
# Now create another modify, but switch the flags before we do it
m["bla"] = ldb.MessageElement(["456"], ldb.FLAG_MOD_ADD, "bla")
m["bla"].set_flags(ldb.FLAG_MOD_DELETE)
l.modify(m)
rm = l.search(m.dn, attrs=["bla"])[0]
self.assertEquals(1, len(rm))
self.assertEquals(["1234"], list(rm["bla"]))
finally:
l.delete(ldb.Dn(l, "dc=add"))
def test_transaction_commit(self):
l = ldb.Ldb(filename())
l.transaction_start()
m = ldb.Message(ldb.Dn(l, "dc=foo9"))
m["foo"] = ["bar"]
l.add(m)
l.transaction_commit()
l.delete(m.dn)
def test_transaction_cancel(self):
l = ldb.Ldb(filename())
l.transaction_start()
m = ldb.Message(ldb.Dn(l, "dc=foo10"))
m["foo"] = ["bar"]
l.add(m)
l.transaction_cancel()
self.assertEquals(0, len(l.search(ldb.Dn(l, "dc=foo10"))))
def test_set_debug(self):
def my_report_fn(level, text):
pass
l = ldb.Ldb(filename())
l.set_debug(my_report_fn)
def test_zero_byte_string(self):
"""Testing we do not get trapped in the \0 byte in a property string."""
l = ldb.Ldb(filename())
l.add({
"dn" : "dc=somedn",
"objectclass" : "user",
"cN" : "LDAPtestUSER",
"givenname" : "ldap",
"displayname" : "foo\0bar",
})
res = l.search(expression="(dn=dc=somedn)")
self.assertEquals("foo\0bar", res[0]["displayname"][0])
def test_no_crash_broken_expr(self):
l = ldb.Ldb(filename())
self.assertRaises(ldb.LdbError,lambda: l.search("", ldb.SCOPE_SUBTREE, "&(dc=*)(dn=*)", ["dc"]))
class DnTests(TestCase):
def setUp(self):
super(DnTests, self).setUp()
self.ldb = ldb.Ldb(filename())
def test_set_dn_invalid(self):
x = ldb.Message()
def assign():
x.dn = "astring"
self.assertRaises(TypeError, assign)
def test_eq(self):
x = ldb.Dn(self.ldb, "dc=foo11,bar=bloe")
y = ldb.Dn(self.ldb, "dc=foo11,bar=bloe")
self.assertEquals(x, y)
y = ldb.Dn(self.ldb, "dc=foo11,bar=blie")
self.assertNotEquals(x, y)
def test_str(self):
x = ldb.Dn(self.ldb, "dc=foo12,bar=bloe")
self.assertEquals(x.__str__(), "dc=foo12,bar=bloe")
def test_repr(self):
x = ldb.Dn(self.ldb, "dc=foo13,bla=blie")
self.assertEquals(x.__repr__(), "Dn('dc=foo13,bla=blie')")
def test_get_casefold(self):
x = ldb.Dn(self.ldb, "dc=foo14,bar=bloe")
self.assertEquals(x.get_casefold(), "DC=FOO14,BAR=bloe")
def test_validate(self):
x = ldb.Dn(self.ldb, "dc=foo15,bar=bloe")
self.assertTrue(x.validate())
def test_parent(self):
x = ldb.Dn(self.ldb, "dc=foo16,bar=bloe")
self.assertEquals("bar=bloe", x.parent().__str__())
def test_parent_nonexistent(self):
x = ldb.Dn(self.ldb, "@BLA")
self.assertEquals(None, x.parent())
def test_is_valid(self):
x = ldb.Dn(self.ldb, "dc=foo18,dc=bloe")
self.assertTrue(x.is_valid())
x = ldb.Dn(self.ldb, "")
self.assertTrue(x.is_valid())
def test_is_special(self):
x = ldb.Dn(self.ldb, "dc=foo19,bar=bloe")
self.assertFalse(x.is_special())
x = ldb.Dn(self.ldb, "@FOOBAR")
self.assertTrue(x.is_special())
def test_check_special(self):
x = ldb.Dn(self.ldb, "dc=foo20,bar=bloe")
self.assertFalse(x.check_special("FOOBAR"))
x = ldb.Dn(self.ldb, "@FOOBAR")
self.assertTrue(x.check_special("@FOOBAR"))
def test_len(self):
x = ldb.Dn(self.ldb, "dc=foo21,bar=bloe")
self.assertEquals(2, len(x))
x = ldb.Dn(self.ldb, "dc=foo21")
self.assertEquals(1, len(x))
def test_add_child(self):
x = ldb.Dn(self.ldb, "dc=foo22,bar=bloe")
self.assertTrue(x.add_child(ldb.Dn(self.ldb, "bla=bloe")))
self.assertEquals("bla=bloe,dc=foo22,bar=bloe", x.__str__())
def test_add_base(self):
x = ldb.Dn(self.ldb, "dc=foo23,bar=bloe")
base = ldb.Dn(self.ldb, "bla=bloe")
self.assertTrue(x.add_base(base))
self.assertEquals("dc=foo23,bar=bloe,bla=bloe", x.__str__())
def test_add(self):
x = ldb.Dn(self.ldb, "dc=foo24")
y = ldb.Dn(self.ldb, "bar=bla")
self.assertEquals("dc=foo24,bar=bla", str(x + y))
def test_remove_base_components(self):
x = ldb.Dn(self.ldb, "dc=foo24,dc=samba,dc=org")
x.remove_base_components(len(x)-1)
self.assertEquals("dc=foo24", str(x))
def test_parse_ldif(self):
msgs = self.ldb.parse_ldif("dn: foo=bar\n")
msg = msgs.next()
self.assertEquals("foo=bar", str(msg[1].dn))
self.assertTrue(isinstance(msg[1], ldb.Message))
ldif = self.ldb.write_ldif(msg[1], ldb.CHANGETYPE_NONE)
self.assertEquals("dn: foo=bar\n\n", ldif)
def test_parse_ldif_more(self):
msgs = self.ldb.parse_ldif("dn: foo=bar\n\n\ndn: bar=bar")
msg = msgs.next()
self.assertEquals("foo=bar", str(msg[1].dn))
msg = msgs.next()
self.assertEquals("bar=bar", str(msg[1].dn))
def test_canonical_string(self):
x = ldb.Dn(self.ldb, "dc=foo25,bar=bloe")
self.assertEquals("/bloe/foo25", x.canonical_str())
def test_canonical_ex_string(self):
x = ldb.Dn(self.ldb, "dc=foo26,bar=bloe")
self.assertEquals("/bloe\nfoo26", x.canonical_ex_str())
def test_ldb_is_child_of(self):
"""Testing ldb_dn_compare_dn"""
dn1 = ldb.Dn(self.ldb, "dc=base")
dn2 = ldb.Dn(self.ldb, "cn=foo,dc=base")
dn3 = ldb.Dn(self.ldb, "cn=bar,dc=base")
dn4 = ldb.Dn(self.ldb, "cn=baz,cn=bar,dc=base")
self.assertTrue(dn2.is_child_of(dn1))
self.assertTrue(dn4.is_child_of(dn1))
self.assertTrue(dn4.is_child_of(dn3))
self.assertFalse(dn3.is_child_of(dn2))
self.assertFalse(dn1.is_child_of(dn4))
class LdbMsgTests(TestCase):
def setUp(self):
super(LdbMsgTests, self).setUp()
self.msg = ldb.Message()
def test_init_dn(self):
self.msg = ldb.Message(ldb.Dn(ldb.Ldb(), "dc=foo27"))
self.assertEquals("dc=foo27", str(self.msg.dn))
def test_iter_items(self):
self.assertEquals(0, len(self.msg.items()))
self.msg.dn = ldb.Dn(ldb.Ldb(filename()), "dc=foo28")
self.assertEquals(1, len(self.msg.items()))
def test_repr(self):
self.msg.dn = ldb.Dn(ldb.Ldb(filename()), "dc=foo29")
self.msg["dc"] = "foo"
self.assertEquals("Message({'dn': Dn('dc=foo29'), 'dc': MessageElement(['foo'])})", repr(self.msg))
def test_len(self):
self.assertEquals(0, len(self.msg))
def test_notpresent(self):
self.assertRaises(KeyError, lambda: self.msg["foo"])
def test_del(self):
del self.msg["foo"]
def test_add(self):
self.msg.add(ldb.MessageElement(["456"], ldb.FLAG_MOD_ADD, "bla"))
def test_elements_empty(self):
self.assertEquals([], self.msg.elements())
def test_elements(self):
el = ldb.MessageElement(["456"], ldb.FLAG_MOD_ADD, "bla")
self.msg.add(el)
self.assertEquals([el], self.msg.elements())
def test_add_value(self):
self.assertEquals(0, len(self.msg))
self.msg["foo"] = ["foo"]
self.assertEquals(1, len(self.msg))
def test_add_value_multiple(self):
self.assertEquals(0, len(self.msg))
self.msg["foo"] = ["foo", "bla"]
self.assertEquals(1, len(self.msg))
self.assertEquals(["foo", "bla"], list(self.msg["foo"]))
def test_set_value(self):
self.msg["foo"] = ["fool"]
self.assertEquals(["fool"], list(self.msg["foo"]))
self.msg["foo"] = ["bar"]
self.assertEquals(["bar"], list(self.msg["foo"]))
def test_keys(self):
self.msg.dn = ldb.Dn(ldb.Ldb(filename()), "@BASEINFO")
self.msg["foo"] = ["bla"]
self.msg["bar"] = ["bla"]
self.assertEquals(["dn", "foo", "bar"], self.msg.keys())
def test_dn(self):
self.msg.dn = ldb.Dn(ldb.Ldb(filename()), "@BASEINFO")
self.assertEquals("@BASEINFO", self.msg.dn.__str__())
def test_get_dn(self):
self.msg.dn = ldb.Dn(ldb.Ldb(filename()), "@BASEINFO")
self.assertEquals("@BASEINFO", self.msg.get("dn").__str__())
def test_get_invalid(self):
self.msg.dn = ldb.Dn(ldb.Ldb(filename()), "@BASEINFO")
self.assertRaises(TypeError, self.msg.get, 42)
def test_get_other(self):
self.msg["foo"] = ["bar"]
self.assertEquals("bar", self.msg.get("foo")[0])
self.assertEquals("bar", self.msg.get("foo", idx=0))
self.assertEquals(None, self.msg.get("foo", idx=1))
self.assertEquals("", self.msg.get("foo", default='', idx=1))
def test_get_default(self):
self.assertEquals(None, self.msg.get("tatayoyo", idx=0))
self.assertEquals("anniecordie", self.msg.get("tatayoyo", "anniecordie"))
def test_get_unknown(self):
self.assertEquals(None, self.msg.get("lalalala"))
def test_msg_diff(self):
l = ldb.Ldb()
msgs = l.parse_ldif("dn: foo=bar\nfoo: bar\nbaz: do\n\ndn: foo=bar\nfoo: bar\nbaz: dont\n")
msg1 = msgs.next()[1]
msg2 = msgs.next()[1]
msgdiff = l.msg_diff(msg1, msg2)
self.assertEquals("foo=bar", msgdiff.get("dn").__str__())
self.assertRaises(KeyError, lambda: msgdiff["foo"])
self.assertEquals(1, len(msgdiff))
def test_equal_empty(self):
msg1 = ldb.Message()
msg2 = ldb.Message()
self.assertEquals(msg1, msg2)
def test_equal_simplel(self):
db = ldb.Ldb(filename())
msg1 = ldb.Message()
msg1.dn = ldb.Dn(db, "foo=bar")
msg2 = ldb.Message()
msg2.dn = ldb.Dn(db, "foo=bar")
self.assertEquals(msg1, msg2)
msg1['foo'] = 'bar'
msg2['foo'] = 'bar'
self.assertEquals(msg1, msg2)
msg2['foo'] = 'blie'
self.assertNotEquals(msg1, msg2)
msg2['foo'] = 'blie'
def test_from_dict(self):
rec = {"dn": "dc=fromdict",
"a1": ["a1-val1", "a1-val1"]}
l = ldb.Ldb()
# check different types of input Flags
for flags in [ldb.FLAG_MOD_ADD, ldb.FLAG_MOD_REPLACE, ldb.FLAG_MOD_DELETE]:
m = ldb.Message.from_dict(l, rec, flags)
self.assertEquals(rec["a1"], list(m["a1"]))
self.assertEquals(flags, m["a1"].flags())
# check input params
self.assertRaises(TypeError, ldb.Message.from_dict, dict(), rec, ldb.FLAG_MOD_REPLACE)
self.assertRaises(TypeError, ldb.Message.from_dict, l, list(), ldb.FLAG_MOD_REPLACE)
self.assertRaises(ValueError, ldb.Message.from_dict, l, rec, 0)
# Message.from_dict expects dictionary with 'dn'
err_rec = {"a1": ["a1-val1", "a1-val1"]}
self.assertRaises(TypeError, ldb.Message.from_dict, l, err_rec, ldb.FLAG_MOD_REPLACE)
def test_copy_add_message_element(self):
m = ldb.Message()
m["1"] = ldb.MessageElement(["val 111"], ldb.FLAG_MOD_ADD, "1")
m["2"] = ldb.MessageElement(["val 222"], ldb.FLAG_MOD_ADD, "2")
mto = ldb.Message()
mto["1"] = m["1"]
mto["2"] = m["2"]
self.assertEqual(mto["1"], m["1"])
self.assertEqual(mto["2"], m["2"])
mto = ldb.Message()
mto.add(m["1"])
mto.add(m["2"])
self.assertEqual(mto["1"], m["1"])
self.assertEqual(mto["2"], m["2"])
class MessageElementTests(TestCase):
def test_cmp_element(self):
x = ldb.MessageElement(["foo"])
y = ldb.MessageElement(["foo"])
z = ldb.MessageElement(["bzr"])
self.assertEquals(x, y)
self.assertNotEquals(x, z)
def test_create_iterable(self):
x = ldb.MessageElement(["foo"])
self.assertEquals(["foo"], list(x))
def test_repr(self):
x = ldb.MessageElement(["foo"])
self.assertEquals("MessageElement(['foo'])", repr(x))
x = ldb.MessageElement(["foo", "bla"])
self.assertEquals(2, len(x))
self.assertEquals("MessageElement(['foo','bla'])", repr(x))
def test_get_item(self):
x = ldb.MessageElement(["foo", "bar"])
self.assertEquals("foo", x[0])
self.assertEquals("bar", x[1])
self.assertEquals("bar", x[-1])
self.assertRaises(IndexError, lambda: x[45])
def test_len(self):
x = ldb.MessageElement(["foo", "bar"])
self.assertEquals(2, len(x))
def test_eq(self):
x = ldb.MessageElement(["foo", "bar"])
y = ldb.MessageElement(["foo", "bar"])
self.assertEquals(y, x)
x = ldb.MessageElement(["foo"])
self.assertNotEquals(y, x)
y = ldb.MessageElement(["foo"])
self.assertEquals(y, x)
def test_extended(self):
el = ldb.MessageElement(["456"], ldb.FLAG_MOD_ADD, "bla")
self.assertEquals("MessageElement(['456'])", repr(el))
class ModuleTests(TestCase):
def test_register_module(self):
class ExampleModule:
name = "example"
ldb.register_module(ExampleModule)
def test_use_module(self):
ops = []
class ExampleModule:
name = "bla"
def __init__(self, ldb, next):
ops.append("init")
self.next = next
def search(self, *args, **kwargs):
return self.next.search(*args, **kwargs)
def request(self, *args, **kwargs):
pass
name = filename()
ldb.register_module(ExampleModule)
if os.path.exists(name):
os.unlink(name)
l = ldb.Ldb(name)
l.add({"dn": "@MODULES", "@LIST": "bla"})
self.assertEquals([], ops)
l = ldb.Ldb(name)
self.assertEquals(["init"], ops)
class LdbResultTests(TestCase):
def setUp(self):
super(LdbResultTests, self).setUp()
name = filename()
self.name = name
if os.path.exists(name):
os.unlink(name)
self.l = ldb.Ldb(name)
self.l.add({"dn": "DC=SAMBA,DC=ORG", "name": "samba.org"})
self.l.add({"dn": "OU=ADMIN,DC=SAMBA,DC=ORG", "name": "Admins"})
self.l.add({"dn": "OU=USERS,DC=SAMBA,DC=ORG", "name": "Users"})
self.l.add({"dn": "OU=OU1,DC=SAMBA,DC=ORG", "name": "OU #1"})
self.l.add({"dn": "OU=OU2,DC=SAMBA,DC=ORG", "name": "OU #2"})
self.l.add({"dn": "OU=OU3,DC=SAMBA,DC=ORG", "name": "OU #3"})
self.l.add({"dn": "OU=OU4,DC=SAMBA,DC=ORG", "name": "OU #4"})
self.l.add({"dn": "OU=OU5,DC=SAMBA,DC=ORG", "name": "OU #5"})
self.l.add({"dn": "OU=OU6,DC=SAMBA,DC=ORG", "name": "OU #6"})
self.l.add({"dn": "OU=OU7,DC=SAMBA,DC=ORG", "name": "OU #7"})
self.l.add({"dn": "OU=OU8,DC=SAMBA,DC=ORG", "name": "OU #8"})
self.l.add({"dn": "OU=OU9,DC=SAMBA,DC=ORG", "name": "OU #9"})
self.l.add({"dn": "OU=OU10,DC=SAMBA,DC=ORG", "name": "OU #10"})
def tearDown(self):
super(LdbResultTests, self).tearDown()
if os.path.exists(self.name):
os.unlink(self.name)
def test_return_type(self):
res = self.l.search()
self.assertEquals(str(res), "<ldb result>")
def test_get_msgs(self):
res = self.l.search()
list = res.msgs
def test_get_controls(self):
res = self.l.search()
list = res.controls
def test_get_referals(self):
res = self.l.search()
list = res.referals
def test_iter_msgs(self):
found = False
for l in self.l.search().msgs:
if str(l.dn) == "OU=OU10,DC=SAMBA,DC=ORG":
found = True
self.assertTrue(found)
def test_iter_msgs_count(self):
self.assertTrue(self.l.search().count > 0)
# 13 objects has been added to the DC=SAMBA, DC=ORG
self.assertEqual(self.l.search(base="DC=SAMBA,DC=ORG").count, 13)
def test_iter_controls(self):
res = self.l.search().controls
it = iter(res)
def test_create_control(self):
self.assertRaises(ValueError, ldb.Control, self.l, "tatayoyo:0")
c = ldb.Control(self.l, "relax:1")
self.assertEquals(c.critical, True)
self.assertEquals(c.oid, "1.3.6.1.4.1.4203.666.5.12")
def test_iter_refs(self):
res = self.l.search().referals
it = iter(res)
def test_iter_as_sequence_msgs(self):
found = False
res = self.l.search().msgs
for i in range(0, len(res)):
l = res[i]
if str(l.dn) == "OU=OU10,DC=SAMBA,DC=ORG":
found = True
self.assertTrue(found)
def test_iter_as_sequence(self):
found = False
res = self.l.search()
for i in range(0, len(res)):
l = res[i]
if str(l.dn) == "OU=OU10,DC=SAMBA,DC=ORG":
found = True
self.assertTrue(found)
class BadTypeTests(TestCase):
def test_control(self):
l = ldb.Ldb()
self.assertRaises(TypeError, ldb.Control, '<bad type>', 'relax:1')
self.assertRaises(TypeError, ldb.Control, ldb, 1234)
def test_modify(self):
l = ldb.Ldb()
dn = ldb.Dn(l, 'a=b')
m = ldb.Message(dn)
self.assertRaises(TypeError, l.modify, '<bad type>')
self.assertRaises(TypeError, l.modify, m, '<bad type>')
def test_add(self):
l = ldb.Ldb()
dn = ldb.Dn(l, 'a=b')
m = ldb.Message(dn)
self.assertRaises(TypeError, l.add, '<bad type>')
self.assertRaises(TypeError, l.add, m, '<bad type>')
def test_delete(self):
l = ldb.Ldb()
dn = ldb.Dn(l, 'a=b')
self.assertRaises(TypeError, l.add, '<bad type>')
self.assertRaises(TypeError, l.add, dn, '<bad type>')
def test_rename(self):
l = ldb.Ldb()
dn = ldb.Dn(l, 'a=b')
self.assertRaises(TypeError, l.add, '<bad type>', dn)
self.assertRaises(TypeError, l.add, dn, '<bad type>')
self.assertRaises(TypeError, l.add, dn, dn, '<bad type>')
def test_search(self):
l = ldb.Ldb()
self.assertRaises(TypeError, l.search, base=1234)
self.assertRaises(TypeError, l.search, scope='<bad type>')
self.assertRaises(TypeError, l.search, expression=1234)
self.assertRaises(TypeError, l.search, attrs='<bad type>')
self.assertRaises(TypeError, l.search, controls='<bad type>')
class VersionTests(TestCase):
def test_version(self):
self.assertTrue(isinstance(ldb.__version__, str))
if __name__ == '__main__':
import unittest
unittest.TestProgram() | unknown | codeparrot/codeparrot-clean | ||
# Author: Wei Xue <xuewei4d@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy.special import gammaln
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.mixture.bayesian_mixture import _log_dirichlet_norm
from sklearn.mixture.bayesian_mixture import _log_wishart_norm
from sklearn.mixture import BayesianGaussianMixture
from sklearn.mixture.tests.test_gaussian_mixture import RandomData
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_greater_equal, ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
PRIOR_TYPE = ['dirichlet_process', 'dirichlet_distribution']
def test_log_dirichlet_norm():
rng = np.random.RandomState(0)
weight_concentration = rng.rand(2)
expected_norm = (gammaln(np.sum(weight_concentration)) -
np.sum(gammaln(weight_concentration)))
predected_norm = _log_dirichlet_norm(weight_concentration)
assert_almost_equal(expected_norm, predected_norm)
def test_log_wishart_norm():
rng = np.random.RandomState(0)
n_components, n_features = 5, 2
degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.
log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components))
expected_norm = np.empty(5)
for k, (degrees_of_freedom_k, log_det_k) in enumerate(
zip(degrees_of_freedom, log_det_precisions_chol)):
expected_norm[k] = -(
degrees_of_freedom_k * (log_det_k + .5 * n_features * np.log(2.)) +
np.sum(gammaln(.5 * (degrees_of_freedom_k -
np.arange(0, n_features)[:, np.newaxis])), 0))
predected_norm = _log_wishart_norm(degrees_of_freedom,
log_det_precisions_chol, n_features)
assert_almost_equal(expected_norm, predected_norm)
def test_bayesian_mixture_covariance_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
covariance_type = 'bad_covariance_type'
bgmm = BayesianGaussianMixture(covariance_type=covariance_type,
random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type, bgmm.fit, X)
def test_bayesian_mixture_weight_concentration_prior_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
bad_prior_type = 'bad_prior_type'
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=bad_prior_type, random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'weight_concentration_prior_type':"
" %s 'weight_concentration_prior_type' should be in "
"['dirichlet_process', 'dirichlet_distribution']"
% bad_prior_type, bgmm.fit, X)
def test_bayesian_mixture_weights_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 5, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of weight_concentration_prior
bad_weight_concentration_prior_ = 0.
bgmm = BayesianGaussianMixture(
weight_concentration_prior=bad_weight_concentration_prior_,
random_state=0)
assert_raise_message(ValueError,
"The parameter 'weight_concentration_prior' "
"should be greater than 0., but got %.3f."
% bad_weight_concentration_prior_,
bgmm.fit, X)
# Check correct init for a given value of weight_concentration_prior
weight_concentration_prior = rng.rand()
bgmm = BayesianGaussianMixture(
weight_concentration_prior=weight_concentration_prior,
random_state=rng).fit(X)
assert_almost_equal(weight_concentration_prior,
bgmm.weight_concentration_prior_)
# Check correct init for the default value of weight_concentration_prior
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(1. / n_components, bgmm.weight_concentration_prior_)
def test_bayesian_mixture_means_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 3, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of mean_precision_prior
bad_mean_precision_prior_ = 0.
bgmm = BayesianGaussianMixture(
mean_precision_prior=bad_mean_precision_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'mean_precision_prior' should be "
"greater than 0., but got %.3f."
% bad_mean_precision_prior_,
bgmm.fit, X)
# Check correct init for a given value of mean_precision_prior
mean_precision_prior = rng.rand()
bgmm = BayesianGaussianMixture(
mean_precision_prior=mean_precision_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_)
# Check correct init for the default value of mean_precision_prior
bgmm = BayesianGaussianMixture(random_state=rng).fit(X)
assert_almost_equal(1., bgmm.mean_precision_prior_)
# Check raise message for a bad shape of mean_prior
mean_prior = rng.rand(n_features + 1)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
bgmm.fit, X)
# Check correct init for a given value of mean_prior
mean_prior = rng.rand(n_features)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_prior, bgmm.mean_prior_)
# Check correct init for the default value of bemean_priorta
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_)
def test_bayesian_mixture_precisions_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of degrees_of_freedom_prior
bad_degrees_of_freedom_prior_ = n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=bad_degrees_of_freedom_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'degrees_of_freedom_prior' should be "
"greater than %d, but got %.3f."
% (n_features - 1, bad_degrees_of_freedom_prior_),
bgmm.fit, X)
# Check correct init for a given value of degrees_of_freedom_prior
degrees_of_freedom_prior = rng.rand() + n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior,
bgmm.degrees_of_freedom_prior_)
# Check correct init for the default value of degrees_of_freedom_prior
degrees_of_freedom_prior_default = n_features
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior_default,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior_default,
bgmm.degrees_of_freedom_prior_)
# Check correct init for a given value of covariance_prior
covariance_prior = {
'full': np.cov(X.T, bias=1) + 10,
'tied': np.cov(X.T, bias=1) + 5,
'diag': np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3,
'spherical': rng.rand()}
bgmm = BayesianGaussianMixture(random_state=rng)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.covariance_prior = covariance_prior[cov_type]
bgmm.fit(X)
assert_almost_equal(covariance_prior[cov_type],
bgmm.covariance_prior_)
# Check raise message for a bad spherical value of covariance_prior
bad_covariance_prior_ = -1.
bgmm = BayesianGaussianMixture(covariance_type='spherical',
covariance_prior=bad_covariance_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'spherical covariance_prior' "
"should be greater than 0., but got %.3f."
% bad_covariance_prior_,
bgmm.fit, X)
# Check correct init for the default value of covariance_prior
covariance_prior_default = {
'full': np.atleast_2d(np.cov(X.T)),
'tied': np.atleast_2d(np.cov(X.T)),
'diag': np.var(X, axis=0, ddof=1),
'spherical': np.var(X, axis=0, ddof=1).mean()}
bgmm = BayesianGaussianMixture(random_state=0)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.fit(X)
assert_almost_equal(covariance_prior_default[cov_type],
bgmm.covariance_prior_)
def test_bayesian_mixture_check_is_fitted():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
# Check raise message
bgmm = BayesianGaussianMixture(random_state=rng)
X = rng.rand(n_samples, n_features)
assert_raise_message(ValueError,
'This BayesianGaussianMixture instance is not '
'fitted yet.', bgmm.score, X)
def test_bayesian_mixture_weights():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Case Dirichlet distribution for the weight concentration prior type
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=3, random_state=rng).fit(X)
expected_weights = (bgmm.weight_concentration_ /
np.sum(bgmm.weight_concentration_))
assert_almost_equal(expected_weights, bgmm.weights_)
assert_almost_equal(np.sum(bgmm.weights_), 1.0)
# Case Dirichlet process for the weight concentration prior type
dpgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=3, random_state=rng).fit(X)
weight_dirichlet_sum = (dpgmm.weight_concentration_[0] +
dpgmm.weight_concentration_[1])
tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum
expected_weights = (dpgmm.weight_concentration_[0] / weight_dirichlet_sum *
np.hstack((1, np.cumprod(tmp[:-1]))))
expected_weights /= np.sum(expected_weights)
assert_almost_equal(expected_weights, dpgmm.weights_)
assert_almost_equal(np.sum(dpgmm.weights_), 1.0)
@ignore_warnings(category=ConvergenceWarning)
def test_monotonic_likelihood():
# We check that each step of the each step of variational inference without
# regularization improve monotonically the training set of the bound
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=20)
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type=covar_type,
warm_start=True, max_iter=1, random_state=rng, tol=1e-4)
current_lower_bound = -np.infty
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_lower_bound = current_lower_bound
current_lower_bound = bgmm.fit(X).lower_bound_
assert_greater_equal(current_lower_bound, prev_lower_bound)
if bgmm.converged_:
break
assert(bgmm.converged_)
def test_compare_covar_type():
# We can compare the 'full' precision with the other cov_type if we apply
# 1 iter of the M-step (done during _initialize_parameters).
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
X = rand_data.X['full']
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='full',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
full_covariances = (
bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis])
# Check tied_covariance = mean(full_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='tied',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(tied_covariance, np.mean(full_covariances, 0))
# Check diag_covariance = diag(full_covariances)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='diag',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
diag_covariances = (bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis])
assert_almost_equal(diag_covariances,
np.array([np.diag(cov)
for cov in full_covariances]))
# Check spherical_covariance = np.mean(diag_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='spherical',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(
spherical_covariances, np.mean(diag_covariances, 1))
@ignore_warnings(category=ConvergenceWarning)
def test_check_covariance_precision():
# We check that the dot product of the covariance and the precision
# matrices is identity.
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components, n_features = 2 * rand_data.n_components, 2
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(n_components=n_components,
max_iter=100, random_state=rng, tol=1e-3,
reg_covar=0)
for covar_type in COVARIANCE_TYPE:
bgmm.covariance_type = covar_type
bgmm.fit(rand_data.X[covar_type])
if covar_type == 'full':
for covar, precision in zip(bgmm.covariances_, bgmm.precisions_):
assert_almost_equal(np.dot(covar, precision),
np.eye(n_features))
elif covar_type == 'tied':
assert_almost_equal(np.dot(bgmm.covariances_, bgmm.precisions_),
np.eye(n_features))
elif covar_type == 'diag':
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones((n_components, n_features)))
else:
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones(n_components))
@ignore_warnings(category=ConvergenceWarning)
def test_invariant_translation():
# We check here that adding a constant in the data change correctly the
# parameters of the mixture
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=100)
n_components = 2 * rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm1 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X)
bgmm2 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X + 100)
assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100)
assert_almost_equal(bgmm1.weights_, bgmm2.weights_)
assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyGLOME is a Python library that provides an API for GLOME protocol.
Basic Usage:
In order for Alice and Bob to communicate, the first step would be to generate
some new keys:
>>> import pyglome
>>> alice_keys = pyglome.generate_keys()
>>> bob_keys = pyglome.generate_keys()
Suppose that Alice knows Bob's `public_key` and wants to send Bob the message
`msg` and no other message have been shared before. Alice will need to:
>>> glome = pyglome.Glome(bob_keys.public, alice_keys.private)
>>> first_tag = glome.tag(msg, counter=0)
And Alice will send Bob both msg, first_tag as well as Alice's public key. On
Bob ends he will need to do the following:
>>> glome = pyglome.Glome(alice_keys.public, bob_keys.private)
>>> try:
... first_tag = glome.check(first_tag, msg, counter=0)
... except pyglome.TagCheckError as tag_error:
... ## Handle the exception.
>>> ## do what you have to do
"""
# Bring glome module to top level
from pyglome.glome import (Glome, TagCheckError, IncorrectTagError,
TagGenerationError, generate_keys, AutoGlome) | unknown | codeparrot/codeparrot-clean | ||
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render
#from django.http import Http404
#from django.template import RequestContext, loader
from django.core.urlresolvers import reverse
from django.views import generic # generic views documentation https://docs.djangoproject.com/en/1.7/topics/class-based-views/
from .models import Greeting
from polls.models import Choice, Question
import requests
import os
# Create your views here.
def index(request):
#r = requests.get('http://httpbin.org/status/418')
#print r.text
#times = int(os.environ.get('TIMES',3))
#return HttpResponse('<pre>' + r.text + '</pre>'+ 'Hello! ' * times)
return render(request, 'index.html')
def db(request):
greeting = Greeting()
greeting.save()
greetings = Greeting.objects.all()
return render(request, 'db.html', {'greetings': greetings})
# polls project
#def indexpolls(request):
# not use django.shortcuts
#latest_question_list = Question.objects.order_by('-pub_date')[:5]
#template = loader.get_template('polls/index.html')
#context = RequestContext(request, {
# 'latest_question_list': latest_question_list,
#})
#return HttpResponse(template.render(context))
# use django.shortcuts
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# context = {'latest_question_list': latest_question_list}
# return render(request, 'polls/index.html', context) # at this time we don't need `HttpResponse`, `loader`, and `RequestContext`
class IndexpollsView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
#def detail(request, question_id):
#return HttpResponse("You're looking at question %s." % question_id)
#try:
# question = Question.objects.get(pk=question_id)
#except Question.DoesNotExist:
# raise Http404("Question does not exist")
#return render(request, 'polls/detail.html', {'question': question})
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/detail.html', {'question': question})
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
#def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/results.html', {'question': question})
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
p = get_object_or_404(Question, pk=question_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': p,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(p.id,))) | unknown | codeparrot/codeparrot-clean | ||
from social.backends.oauth import BaseOAuth2
# This provides a backend for python-social-auth. This should not be confused
# with officially battle.net offerings. This piece of code is not officially
# affiliated with Blizzard Entertainment, copyrights to their respective
# owners. See http://us.battle.net/en/forum/topic/13979588015 for more details.
class BattleNetOAuth2(BaseOAuth2):
""" battle.net Oauth2 backend"""
name = 'battlenet-oauth2'
ID_KEY = 'accountId'
REDIRECT_STATE = False
AUTHORIZATION_URL = 'https://eu.battle.net/oauth/authorize'
ACCESS_TOKEN_URL = 'https://eu.battle.net/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
REVOKE_TOKEN_METHOD = 'GET'
DEFAULT_SCOPE = ['wow.profile']
EXTRA_DATA = [
('refresh_token', 'refresh_token', True),
('expires_in', 'expires'),
('token_type', 'token_type', True)
]
def get_characters(self, access_token):
"""
Fetches the character list from the battle.net API. Returns list of
characters or empty list if the request fails.
"""
params = {'access_token': access_token}
if self.setting('API_LOCALE'):
params['locale'] = self.setting('API_LOCALE')
response = self.get_json(
'https://eu.api.battle.net/wow/user/characters',
params=params
)
return response.get('characters') or []
def get_user_details(self, response):
""" Return user details from Battle.net account """
return {'battletag': response.get('battletag')}
def user_data(self, access_token, *args, **kwargs):
""" Loads user data from service """
return self.get_json(
'https://eu.api.battle.net/account/user/battletag',
params={'access_token': access_token}
) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import tempfile
import time
import re
import errno
import logging
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
logger = logging.getLogger("TestFramework.utils")
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
BITCOIND_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]]*len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if bitcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
time.sleep(0.25)
def _start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
"""Start a bitcoind and return RPC connection to it
This function should only be called from within test_framework, not by individual test scripts."""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("BITCOIND", "bitcoind")
args = [binary, "-datadir=" + datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(get_mocktime()), "-uacomment=testnode%d" % i]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
logger.debug("initialize_chain: bitcoind started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
logger.debug("initialize_chain: RPC successfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def assert_start_raises_init_error(i, dirname, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
node = _start_node(i, dirname, extra_args, stderr=log_stderr)
_stop_node(node, i)
except Exception as e:
assert 'bitcoind exited' in str(e) #node must have shutdown
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def _start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Start multiple bitcoinds, return RPC connections to them
This function should only be called from within test_framework, not by individual test scripts."""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(_start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
_stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def _stop_node(node, i):
"""Stop a bitcoind test node
This function should only be called from within test_framework, not by individual test scripts."""
logger.debug("Stopping node %d" % i)
try:
node.stop()
except http.client.CannotSendRequest as e:
logger.exception("Unable to stop node")
return_code = bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
assert_equal(return_code, 0)
del bitcoind_processes[i]
def _stop_nodes(nodes):
"""Stop multiple bitcoind test nodes
This function should only be called from within test_framework, not by individual test scripts."""
for i, node in enumerate(nodes):
_stop_node(node, i)
assert not bitcoind_processes.values() # All connections must be gone now
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
from_connection.disconnectnode(nodeid=peer_id)
for _ in range(50):
if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []:
break
time.sleep(0.1)
else:
raise AssertionError("timed out waiting for disconnect")
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_jsonrpc(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was returned or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs=[{ "txid" : t["txid"], "vout" : t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key] | unknown | codeparrot/codeparrot-clean | ||
import os
import infra.basetest
BASIC_TOOLCHAIN_CONFIG_HEADERS_AT_LEAST_3_14 = \
"""
BR2_arm=y
BR2_TOOLCHAIN_EXTERNAL=y
BR2_TOOLCHAIN_EXTERNAL_CUSTOM=y
BR2_TOOLCHAIN_EXTERNAL_DOWNLOAD=y
BR2_TOOLCHAIN_EXTERNAL_URL="http://autobuild.buildroot.org/toolchains/tarballs/br-arm-full-2019.05.1.tar.bz2"
BR2_TOOLCHAIN_EXTERNAL_GCC_4_9=y
BR2_TOOLCHAIN_EXTERNAL_HEADERS_4_14=y
BR2_TOOLCHAIN_EXTERNAL_LOCALE=y
# BR2_TOOLCHAIN_EXTERNAL_HAS_THREADS_DEBUG is not set
BR2_TOOLCHAIN_EXTERNAL_CXX=y
"""
class TestAtop(infra.basetest.BRTest):
config = BASIC_TOOLCHAIN_CONFIG_HEADERS_AT_LEAST_3_14 + \
"""
BR2_PACKAGE_ATOP=y
BR2_TARGET_ROOTFS_CPIO=y
# BR2_TARGET_ROOTFS_TAR is not set
"""
def test_run(self):
cpio_file = os.path.join(self.builddir, "images", "rootfs.cpio")
self.emulator.boot(arch="armv5",
kernel="builtin",
options=["-initrd", cpio_file])
self.emulator.login()
cmd = "atop -V | grep '^Version'"
_, exit_code = self.emulator.run(cmd)
self.assertEqual(exit_code, 0)
cmd = "atop -a 1 2 | grep '% *atop *$'"
_, exit_code = self.emulator.run(cmd)
self.assertEqual(exit_code, 0) | unknown | codeparrot/codeparrot-clean | ||
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Tests for the reportlab.platypus.paragraphs module.
"""
__version__=''' $Id: test_platypus_paragraphs.py 3959 2012-09-27 14:39:39Z robin $ '''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import sys, os, unittest
from operator import truth
from reportlab.pdfbase.pdfmetrics import stringWidth, registerFont, registerFontFamily
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus.paraparser import ParaParser
from reportlab.platypus.flowables import Flowable, DocAssert
from reportlab.lib.colors import Color
from reportlab.lib.units import cm
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.utils import _className
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.xpreformatted import XPreformatted
from reportlab.platypus.frames import Frame, ShowBoundaryValue
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate, PageBreak, NextPageTemplate
from reportlab.platypus import tableofcontents
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.tables import TableStyle, Table
from reportlab.platypus.paragraph import *
from reportlab.platypus.paragraph import _getFragWords
def myMainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
canvas.rect(2.5*cm, 2.5*cm, 15*cm, 25*cm)
canvas.setFont('Times-Roman', 12)
pageNumber = canvas.getPageNumber()
canvas.drawString(10*cm, cm, str(pageNumber))
canvas.restoreState()
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
frame1 = Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')
frame2 = Frame(2.5*cm, 2.5*cm, 310, 25*cm, id='F2')
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
template = PageTemplate('normal', [frame1], myMainPageFrame)
template1 = PageTemplate('special', [frame2], myMainPageFrame)
self.addPageTemplates([template,template1])
class ParagraphCorners(unittest.TestCase):
"some corner cases which should parse"
def check(self,text,bt = getSampleStyleSheet()['BodyText']):
try:
P = Paragraph(text,style=bt)
except:
raise AssertionError("'%s' should parse"%text)
def test0(self):
self.check('<para />')
self.check('<para/>')
self.check('\t\t\t\n\n\n<para />')
self.check('\t\t\t\n\n\n<para/>')
self.check('<para\t\t\t\t/>')
self.check('<para></para>')
self.check('<para> </para>')
self.check('\t\t\n\t\t\t <para> </para>')
def test1(self):
"This makes several special paragraphs."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
bt = styleSheet['BodyText']
btN = ParagraphStyle('BodyTextTTNone',parent=bt,textTransform='none')
btL = ParagraphStyle('BodyTextTTLower',parent=bt,textTransform='lowercase')
btU = ParagraphStyle('BodyTextTTUpper',parent=bt,textTransform='uppercase')
btC = ParagraphStyle('BodyTextTTCapitalize',parent=bt,textTransform='capitalize')
story.append(Paragraph('''This should be ORDINARY text.''',style=bt))
story.append(Paragraph('''This should be ORDINARY text.''',style=btN))
story.append(Paragraph('''This should be LOWER text.''',style=btL))
story.append(Paragraph('''This should be upper text.''',style=btU))
story.append(Paragraph('''This should be cAPITALIZED text.''',style=btC))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>ORDINARY</b> text.''',style=bt))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>ORDINARY</b> text.''',style=btN))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>LOWER</b> text.''',style=btL))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>upper</b> text.''',style=btU))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>cAPITALIZED</b> text.''',style=btC))
doc = MyDocTemplate(outputfile('test_platypus_specialparagraphs.pdf'))
doc.multiBuild(story)
def test2(self):
'''CJK splitting in multi-frag case'''
style = ParagraphStyle('test', wordWrap = 'CJK')
p = Paragraph('bla <i>blub</i> '*130 , style)
aW,aH=439.275590551,121.88976378
w,h=p.wrap(aW,aH)
S=p.split(aW,aH)
assert len(S)==2, 'Multi frag CJK splitting failed'
w0,h0=S[0].wrap(aW,aH)
assert h0<=aH,'Multi-frag CJK split[0] has wrong height %s >= available %s' % (H0,aH)
w1,h1=S[1].wrap(aW,aH)
assert h0+h1==h, 'Multi-frag-CJK split[0].height(%s)+split[1].height(%s) don\'t add to original %s' % (h0,h1,h)
def test3(self):
'''compare CJK splitting in some edge cases'''
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.enums import TA_LEFT
sty = ParagraphStyle('A')
sty.fontSize = 15
sty.leading = sty.fontSize*1.2
sty.fontName = 'Courier'
sty.alignment = TA_LEFT
sty.wordWrap = 'CJK'
p0=Paragraph('ABCDEFGHIJKL]N',sty)
p1=Paragraph('AB<font color="red">C</font>DEFGHIJKL]N',sty)
canv = Canvas('test_platypus_paragraph_cjk3.pdf')
ix = len(canv._code)
aW = pdfmetrics.stringWidth('ABCD','Courier',15)
w,h=p0.wrap(aW,1000000)
y = canv._pagesize[1]-72-h
p0.drawOn(canv,72,y)
w,h=p1.wrap(aW,1000000)
y -= h+10
p1.drawOn(canv,72,y)
w,h=p0.wrap(aW*0.25-2,1000000)
y -= h+10
p0.drawOn(canv,72,y)
w,h=p1.wrap(aW/4.-2,1000000)
y -= h+10
p1.drawOn(canv,72,y)
assert canv._code[ix:]==['q', '1 0 0 1 72 697.8898 cm', 'q', '0 0 0 rg', 'BT 1 0 0 1 0 57 Tm /F2 15 Tf 18 TL (ABCD) Tj T* (EFGH) Tj T* (IJKL]) Tj T* (N) Tj T* ET', 'Q', 'Q', 'q', '1 0 0 1 72 615.8898 cm', 'q', 'BT 1 0 0 1 0 57 Tm 18 TL /F2 15 Tf 0 0 0 rg (AB) Tj 1 0 0 rg (C) Tj 0 0 0 rg (D) Tj T* (EFGH) Tj T* (IJKL]) Tj T* (N) Tj T* ET', 'Q', 'Q', 'q', '1 0 0 1 72 353.8898 cm', 'q', '0 0 0 rg', 'BT 1 0 0 1 0 237 Tm /F2 15 Tf 18 TL (A) Tj T* (B) Tj T* (C) Tj T* (D) Tj T* (E) Tj T* (F) Tj T* (G) Tj T* (H) Tj T* (I) Tj T* (J) Tj T* (K) Tj T* (L) Tj T* (]) Tj T* (N) Tj T* ET', 'Q', 'Q', 'q', '1 0 0 1 72 91.88976 cm', 'q', 'BT 1 0 0 1 0 237 Tm 18 TL /F2 15 Tf 0 0 0 rg (A) Tj T* (B) Tj T* 1 0 0 rg (C) Tj T* 0 0 0 rg (D) Tj T* (E) Tj T* (F) Tj T* (G) Tj T* (H) Tj T* (I) Tj T* (J) Tj T* (K) Tj T* (L) Tj T* (]) Tj T* (N) Tj T* ET', 'Q', 'Q']
canv.showPage()
canv.save()
class ParagraphSplitTestCase(unittest.TestCase):
"Test multi-page splitting of paragraphs (eyeball-test)."
def test0(self):
"This makes one long multi-page paragraph."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
bt = styleSheet['BodyText']
text = '''If you imagine that the box of X's tothe left is
an image, what I want to be able to do is flow a
series of paragraphs around the image
so that once the bottom of the image is reached, then text will flow back to the
left margin. I know that it would be possible to something like this
using tables, but I can't see how to have a generic solution.
There are two examples of this in the demonstration section of the reportlab
site.
If you look at the "minimal" euro python conference brochure, at the end of the
timetable section (page 8), there are adverts for "AdSu" and "O'Reilly". I can
see how the AdSu one might be done generically, but the O'Reilly, unsure...
I guess I'm hoping that I've missed something, and that
it's actually easy to do using platypus.
'''
from reportlab.platypus.flowables import ParagraphAndImage, Image
from reportlab.lib.testutils import testsFolder
gif = os.path.join(testsFolder,'pythonpowered.gif')
story.append(ParagraphAndImage(Paragraph(text,bt),Image(gif)))
phrase = 'This should be a paragraph spanning at least three pages. '
description = ''.join([('%d: '%i)+phrase for i in range(250)])
story.append(ParagraphAndImage(Paragraph(description, bt),Image(gif),side='left'))
doc = MyDocTemplate(outputfile('test_platypus_paragraphandimage.pdf'))
doc.multiBuild(story)
def test1(self):
"This makes one long multi-page paragraph."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
h3 = styleSheet['Heading3']
bt = styleSheet['BodyText']
text = '''If you imagine that the box of X's tothe left is
an image, what I want to be able to do is flow a
series of paragraphs around the image
so that once the bottom of the image is reached, then text will flow back to the
left margin. I know that it would be possible to something like this
using tables, but I can't see how to have a generic solution.
There are two examples of this in the demonstration section of the reportlab
site.
If you look at the "minimal" euro python conference brochure, at the end of the
timetable section (page 8), there are adverts for "AdSu" and "O'Reilly". I can
see how the AdSu one might be done generically, but the O'Reilly, unsure...
I guess I'm hoping that I've missed something, and that
it's actually easy to do using platypus.We can do greek letters <greek>mDngG</greek>. This should be a
u with a dieresis on top <unichar code=0xfc/>="<unichar code="0xfc"/>" and this &#xfc;="ü" and this \\xc3\\xbc="\xc3\xbc". On the other hand this
should be a pound sign &pound;="£" and this an alpha &alpha;="α". You can have links in the page <link href="http://www.reportlab.com" color="blue">ReportLab</link> & <a href="http://www.reportlab.org" color="green">ReportLab.org</a>.
Use scheme "pdf:" to indicate an external PDF link, "http:", "https:" to indicate an external link eg something to open in
your browser. If an internal link begins with something that looks like a scheme, precede with "document:". <strike>This text should have a strike through it.</strike>
'''
from reportlab.platypus.flowables import ImageAndFlowables, Image
from reportlab.lib.testutils import testsFolder
gif = os.path.join(testsFolder,'pythonpowered.gif')
heading = Paragraph('This is a heading',h3)
story.append(ImageAndFlowables(Image(gif),[heading,Paragraph(text,bt)]))
phrase = 'This should be a paragraph spanning at least three pages. '
description = ''.join([('%d: '%i)+phrase for i in range(250)])
story.append(ImageAndFlowables(Image(gif),[heading,Paragraph(description, bt)],imageSide='left'))
story.append(NextPageTemplate('special'))
story.append(PageBreak())
VERA = ('Vera','VeraBd','VeraIt','VeraBI')
for v in VERA:
registerFont(TTFont(v,v+'.ttf'))
registerFontFamily(*(VERA[:1]+VERA))
story.append(ImageAndFlowables(
Image(gif,width=280,height=120),
Paragraph('''<font name="Vera">The <b>concept</b> of an <i>integrated</i> one <b><i>box</i></b> solution for <i><b>advanced</b></i> voice and
data applications began with the introduction of the IMACS. The
IMACS 200 carries on that tradition with an integrated solution
optimized for smaller port size applications that the IMACS could not
economically address. An array of the most popular interfaces and
features from the IMACS has been bundled into a small 2U chassis
providing the ultimate in ease of installation.</font>''',
style=ParagraphStyle(
name="base",
fontName="Helvetica",
leading=12,
leftIndent=0,
firstLineIndent=0,
spaceBefore = 9.5,
fontSize=9.5,
)
),
imageSide='left',
)
)
story.append(ImageAndFlowables(
Image(gif,width=240,height=120),
Paragraph('''The concept of an integrated one box solution for advanced voice and
data applications began with the introduction of the IMACS. The
IMACS 200 carries on that tradition with an integrated solution
optimized for smaller port size applications that the IMACS could not
economically address. An array of the most popular interfaces and
features from the IMACS has been bundled into a small 2U chassis
providing the ultimate in ease of installation.''',
style=ParagraphStyle(
name="base",
fontName="Helvetica",
leading=12,
leftIndent=0,
firstLineIndent=0,
spaceBefore = 9.5,
fontSize=9.5,
)
),
imageSide='left',
)
)
story.append(PageBreak())
story.append(Paragraph('Image larger than the frame',h3))
story.append(ImageAndFlowables(
Image(gif,width=6*110,height=6*44),
Paragraph('''The concept of an integrated one box solution for advanced voice and
data applications began with the introduction of the IMACS. The
IMACS 200 carries on that tradition with an integrated solution
optimized for smaller port size applications that the IMACS could not
economically address. An array of the most popular interfaces and
features from the IMACS has been bundled into a small 2U chassis
providing the ultimate in ease of installation.''',
style=ParagraphStyle(
name="base",
fontName="Helvetica",
leading=12,
leftIndent=0,
firstLineIndent=0,
spaceBefore = 9.5,
fontSize=9.5,
)
),
imageSide='left',
)
)
doc = MyDocTemplate(outputfile('test_platypus_imageandflowables.pdf'),showBoundary=1)
doc.multiBuild(story)
class TwoFrameDocTemplate(BaseDocTemplate):
"Define a simple document with two frames per page."
def __init__(self, filename, **kw):
m = 2*cm
from reportlab.lib import pagesizes
PAGESIZE = pagesizes.landscape(pagesizes.A4)
cw, ch = (PAGESIZE[0]-2*m)/2., (PAGESIZE[1]-2*m)
ch -= 14*cm
f1 = Frame(m, m+0.5*cm, cw-0.75*cm, ch-1*cm, id='F1',
leftPadding=0, topPadding=0, rightPadding=0, bottomPadding=0,
showBoundary=True
)
f2 = Frame(cw+2.7*cm, m+0.5*cm, cw-0.75*cm, ch-1*cm, id='F2',
leftPadding=0, topPadding=0, rightPadding=0, bottomPadding=0,
showBoundary=True
)
BaseDocTemplate.__init__(self, filename, **kw)
template = PageTemplate('template', [f1, f2])
self.addPageTemplates(template)
class SplitFrameParagraphTest(unittest.TestCase):
"Test paragraph split over two frames."
def test(self):
stylesheet = getSampleStyleSheet()
normal = stylesheet['BodyText']
normal.fontName = "Helvetica"
normal.fontSize = 12
normal.leading = 16
normal.alignment = TA_JUSTIFY
text = "Bedauerlicherweise ist ein Donaudampfschiffkapit\xc3\xa4n auch <font color='red'>nur</font> <font color='green'>ein</font> Dampfschiffkapit\xc3\xa4n."
tagFormat = '%s'
# strange behaviour when using next code line
# (same for '<a href="http://www.reportlab.org">%s</a>'
tagFormat = '<font color="red">%s</font>'
#text = " ".join([tagFormat % w for w in text.split()])
story = [Paragraph((text + " ") * 3, style=normal)]
from reportlab.lib import pagesizes
PAGESIZE = pagesizes.landscape(pagesizes.A4)
doc = TwoFrameDocTemplate(outputfile('test_paragraphs_splitframe.pdf'), pagesize=PAGESIZE)
doc.build(story)
class FragmentTestCase(unittest.TestCase):
"Test fragmentation of paragraphs."
def test0(self):
"Test empty paragraph."
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
text = ''
P = Paragraph(text, B)
frags = [f.text for f in P.frags]
assert frags == []
def test1(self):
"Test simple paragraph."
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
text = "X<font name=Courier>Y</font>Z"
P = Paragraph(text, B)
frags = [f.text for f in P.frags]
assert frags == ['X', 'Y', 'Z']
class ULTestCase(unittest.TestCase):
"Test underlining and overstriking of paragraphs."
def testUl(self):
from reportlab.platypus import BaseDocTemplate, PageTemplate, Frame, PageBegin
from reportlab.lib.units import inch
from reportlab.platypus.flowables import AnchorFlowable
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
self.allowSplitting = 0
kw['showBoundary']=1
BaseDocTemplate.__init__(self, filename, **kw)
self.addPageTemplates(
[
PageTemplate('normal',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
),
])
styleSheet = getSampleStyleSheet()
normal = ParagraphStyle(name='normal',fontName='Times-Roman',fontSize=12,leading=1.2*12,parent=styleSheet['Normal'])
normal_sp = ParagraphStyle(name='normal_sp',parent=normal,alignment=TA_JUSTIFY,spaceBefore=12)
normal_just = ParagraphStyle(name='normal_just',parent=normal,alignment=TA_JUSTIFY)
normal_right = ParagraphStyle(name='normal_right',parent=normal,alignment=TA_RIGHT)
normal_center = ParagraphStyle(name='normal_center',parent=normal,alignment=TA_CENTER)
normal_indent = ParagraphStyle(name='normal_indent',firstLineIndent=0.5*inch,parent=normal)
normal_indent_lv_2 = ParagraphStyle(name='normal_indent_lv_2',firstLineIndent=1.0*inch,parent=normal)
texts = ['''Furthermore, a subset of <font size="14">English sentences</font> interesting on quite
independent grounds is not quite equivalent to a stipulation to place
the constructions into these various categories.''',
'''We will bring evidence in favor of
The following thesis: most of the methodological work in modern
linguistics can be defined in such a way as to impose problems of
phonemic and morphological analysis.''']
story =[]
a = story.append
a(Paragraph("This should <a href=\"#theEnd\" color=\"blue\"><a href=\"#theEnd\" color=\"blue\">jump</a></a> jump to the end!",style=normal))
a(XPreformatted("This should <a href=\"#theEnd\" color=\"blue\"><a href=\"#theEnd\" color=\"blue\">jump</a></a> jump to the end!",style=normal))
a(Paragraph("<a href=\"#theEnd\"><u><font color=\"blue\">ditto</font></u></a>",style=normal))
a(XPreformatted("<a href=\"#theEnd\"><u><font color=\"blue\">ditto</font></u></a>",style=normal))
a(Paragraph("This <font color='CMYKColor(0,0.6,0.94,0)'>should</font> <a href=\"#thePenultimate\" color=\"blue\"><a href=\"#thePenultimate\" color=\"blue\">jump</a></a> jump to the penultimate page!",style=normal))
a(Paragraph("This should <a href=\"#theThird\" color=\"blue\"><a href=\"#theThird\" color=\"blue\">jump</a></a> jump to a justified para!",style=normal))
a(Paragraph("This should <a href=\"#theFourth\" color=\"blue\"><a href=\"#theFourth\" color=\"blue\">jump</a></a> jump to an indented para!",style=normal))
for mode in (0,1):
text0 = texts[0]
text1 = texts[1]
if mode:
text0 = text0.replace('English sentences','<b>English sentences</b>').replace('quite equivalent','<i>quite equivalent</i>')
text1 = text1.replace('the methodological work','<b>the methodological work</b>').replace('to impose problems','<i>to impose problems</i>')
for t in ('u','strike'):
for n in range(6):
for s in (normal,normal_center,normal_right,normal_just,normal_indent, normal_indent_lv_2):
for autoLeading in ('','min','max'):
if n==4 and s==normal_center and t=='strike' and mode==1:
a(Paragraph("<font color=green>The second jump at the beginning should come here <a name=\"thePenultimate\"/><a name=\"thePenultimate\"/>!</font>",style=normal))
elif n==4 and s==normal_just and t=='strike' and mode==1:
a(Paragraph("<font color=green>The third jump at the beginning should come just below here to a paragraph with just an a tag in it!</font>",style=normal))
a(Paragraph("<a name=\"theThird\"/>",style=normal))
elif n==4 and s==normal_indent and t=='strike' and mode==1:
a(Paragraph("<font color=green>The fourth jump at the beginning should come just below here!</font>",style=normal))
a(AnchorFlowable('theFourth'))
a(Paragraph('n=%d style=%s(autoLeading=%s) tag=%s'%(n,s.name,autoLeading,t),style=normal_sp))
a(Paragraph('<para autoleading="%s">%s<%s>%s</%s>. %s <%s>%s</%s>. %s</para>' % (
autoLeading,
(s==normal_indent_lv_2 and '<seq id="document" inc="no"/>.<seq id="document_lv_2"/>' or ''),
t,' '.join((n+1)*['A']),t,text0,t,' '.join((n+1)*['A']),t,text1),
style=s))
a(Paragraph("The jump at the beginning should come here <a name=\"theEnd\"/><a name=\"theEnd\"/>!",style=normal))
doc = MyDocTemplate(outputfile('test_platypus_paragraphs_ul.pdf'))
doc.build(story)
class AutoLeadingTestCase(unittest.TestCase):
"Test underlining and overstriking of paragraphs."
def testAutoLeading(self):
from reportlab.platypus import BaseDocTemplate, PageTemplate, Frame, PageBegin
from reportlab.lib.units import inch
from reportlab.platypus.flowables import AnchorFlowable
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
self.allowSplitting = 0
kw['showBoundary']=1
BaseDocTemplate.__init__(self, filename, **kw)
self.addPageTemplates(
[
PageTemplate('normal',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
),
])
from reportlab.lib.testutils import testsFolder
styleSheet = getSampleStyleSheet()
normal = ParagraphStyle(name='normal',fontName='Times-Roman',fontSize=12,leading=1.2*12,parent=styleSheet['Normal'])
normal_sp = ParagraphStyle(name='normal_sp',parent=normal,alignment=TA_JUSTIFY,spaceBefore=12)
texts = ['''Furthermore, a subset of <font size="14">English sentences</font> interesting on quite
independent grounds is not quite equivalent to a stipulation to place
<font color="blue">the constructions <img src="%(testsFolder)s/../docs/images/testimg.gif"/> into these various categories.</font>'''%dict(testsFolder=testsFolder),
'''We will bring <font size="18">Ugly Things</font> in favor of
The following thesis: most of the methodological work in Modern
Linguistics can be <img src="%(testsFolder)s/../docs/images/testimg.gif" valign="baseline" /> defined in such <img src="%(testsFolder)s/../docs/images/testimg.gif" valign="10" /> a way as to impose problems of
phonemic and <u>morphological <img src="%(testsFolder)s/../docs/images/testimg.gif" valign="top"/> </u> analysis.'''%dict(testsFolder=testsFolder)]
story =[]
a = story.append
t = 'u'
n = 1
for s in (normal,normal_sp):
for autoLeading in ('','min','max'):
a(Paragraph('style=%s(autoLeading=%s)'%(s.name,autoLeading),style=normal_sp))
a(Paragraph('<para autoleading="%s"><%s>%s</%s>. %s <%s>%s</%s>. %s</para>' % (
autoLeading,
t,' '.join((n+1)*['A']),t,texts[0],t,' '.join((n+1)*['A']),t,texts[1]),
style=s))
a(Paragraph('''<img src="%(testsFolder)s/../docs/images/testimg.gif" valign="top"/> image is very first thing in the line.'''%dict(testsFolder=testsFolder), style=normal))
a(Paragraph('some text.... some more.... some text.... some more....', normal))
a(Paragraph('<img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="0.19in" /> some text <br /> '%dict(testsFolder=testsFolder), normal))
a(Paragraph('some text.... some more.... some text.... some more....', normal))
a(Paragraph('<img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="0.19in" /> <br /> '%dict(testsFolder=testsFolder), normal))
a(Paragraph('some text.... some more.... some text.... some more....', normal))
#Volker Haas' valign tests
fmt = '''<font color="red">%(valign)s</font>: Furthermore, a <u>subset</u> <strike>of</strike> <font size="14">English sentences</font> interesting on quite
independent grounds is not quite equivalent to a stipulation to place <img src="%(testsFolder)s/../docs/images/redsquare.png" width="0.5in" height="0.5in" valign="%(valign)s"/>
the constructions into these <u>various</u> categories. We will bring <font size="18">Ugly Things</font> in favor of
The following thesis: most of the methodological work in Modern
Linguistics can be defined in such a way as to impose problems of
phonemic and <u>morphological</u> <strike>analysis</strike>.'''
p_style= ParagraphStyle('Normal')
p_style.autoLeading = 'max'
for valign in (
'baseline',
'sub',
'super',
'top',
'text-top',
'middle',
'bottom',
'text-bottom',
'0%',
'2in',
):
a(Paragraph(fmt % dict(valign=valign,testsFolder=testsFolder),p_style))
a(XPreformatted(fmt % dict(valign=valign,testsFolder=testsFolder),p_style))
a(Paragraph('<br/><b>Some Paragraph tests of <img width="x%" height="x%"</b>...', normal))
a(Paragraph('H=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="10%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('H=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="50%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('H=100%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('H=100%% W=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="10%%" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('H=100%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('H=50%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="50%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('<br/><b>Some XPreformatted tests of <img width="x%" height="x%"</b>...', normal))
a(XPreformatted('H=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="10%%" />'%dict(testsFolder=testsFolder), normal))
a(XPreformatted('H=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="50%%" />'%dict(testsFolder=testsFolder), normal))
a(XPreformatted('H=100%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(XPreformatted('H=100%% W=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="10%%" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(XPreformatted('H=100%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(XPreformatted('H=50%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="50%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('<br/><b>Some CJK Paragraph tests of <img width="x%" height="x%"</b>...', normal))
normalCJK = ParagraphStyle('normalCJK', parent=normal, wordWrap = 'CJK')
a(Paragraph('H=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="10%%" />'%dict(testsFolder=testsFolder), normalCJK))
a(Paragraph('H=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="50%%" />'%dict(testsFolder=testsFolder), normalCJK))
a(Paragraph('H=100%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="100%%" />'%dict(testsFolder=testsFolder), normalCJK))
a(Paragraph('H=100%% W=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="10%%" height="100%%" />'%dict(testsFolder=testsFolder), normalCJK))
a(Paragraph('H=100%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="100%%" />'%dict(testsFolder=testsFolder), normalCJK))
a(Paragraph('H=50%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="50%%" />'%dict(testsFolder=testsFolder), normalCJK))
doc = MyDocTemplate(outputfile('test_platypus_paragraphs_autoleading.pdf'))
doc.build(story)
class JustifyTestCase(unittest.TestCase):
"Test justification of paragraphs."
def testUl(self):
from reportlab.platypus import BaseDocTemplate, PageTemplate, Frame, PageBegin
from reportlab.lib.units import inch
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
self.addPageTemplates(
[
PageTemplate('normal',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
),
])
styleSheet = getSampleStyleSheet()
normal = ParagraphStyle(name='normal',fontName='Times-Roman',fontSize=12,leading=1.2*12,parent=styleSheet['Normal'])
normal_just = ParagraphStyle(name='normal_just',parent=normal,alignment=TA_JUSTIFY,spaceAfter=12)
text0 = '''Furthermore, a subset of English sentences interesting on quite
independent grounds is not quite equivalent to a stipulation to place
the constructions into these various categories. We will bring evidence in favor of
The following thesis: most of the methodological work in modern
linguistics can be defined in such a way as to impose problems of
phonemic and morphological analysis.'''
story =[]
a = story.append
for mode in (0,1,2,3,4):
text = text0
if mode==1:
text = text.replace('English sentences','<b>English sentences</b>').replace('quite equivalent','<i>quite equivalent</i>')
text = text.replace('the methodological work','<b>the methodological work</b>').replace('to impose problems','<i>to impose problems</i>')
a(Paragraph('Justified paragraph in normal/bold/italic font',style=normal))
elif mode==2:
text = '<b>%s</b>' % text
a(Paragraph('Justified paragraph in bold font',style=normal))
elif mode==3:
text = '<i>%s</i>' % text
a(Paragraph('Justified paragraph in italic font',style=normal))
elif mode==4:
text = text.replace('English ','English ').replace('quite ','quite ')
text = text.replace(' methodological',' methodological').replace(' impose',' impose')
a(Paragraph('Justified paragraph in normal font & some hard spaces',style=normal))
else:
a(Paragraph('Justified paragraph in normal font',style=normal))
a(Paragraph(text,style=normal_just))
doc = MyDocTemplate(outputfile('test_platypus_paragraphs_just.pdf'))
doc.build(story)
def testAutoPageTemplate(self):
from reportlab.platypus import BaseDocTemplate, PageTemplate, Frame, PageBegin
from reportlab.lib.units import inch
class onPage:
def __init__(self,label):
self.label = label
def __call__(self,canv,doc):
canv.drawString(72,72,'This is pageTemplate(%s)' % (self.label,))
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
self.addPageTemplates(
[
PageTemplate('normal',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
onPage = onPage('normal'),
),
PageTemplate('auto',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
onPage = onPage('auto'),
autoNextPageTemplate = 'autoFollow',
),
PageTemplate('autoFollow',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
onPage = onPage('autoFollow'),
),
])
styleSheet = getSampleStyleSheet()
normal = ParagraphStyle(name='normal',fontName='Times-Roman',fontSize=12,leading=1.2*12,parent=styleSheet['Normal'])
story =[]
a = story.append
a(Paragraph('should be on page template normal', normal))
a(NextPageTemplate('auto'))
a(PageBreak())
a(Paragraph('should be on page template auto', normal))
a(PageBreak())
a(DocAssert('doc.pageTemplate.id=="autoFollow"','expected doc.pageTemplate.id=="autoFollow"'))
a(Paragraph('should be on page template autoFollow 1', normal))
a(PageBreak())
a(Paragraph('should be on page template autoFollow 2', normal))
doc = MyDocTemplate(outputfile('test_platypus_paragraphs_AutoNextPageTemplate.pdf'))
doc.build(story)
#noruntests
def makeSuite():
return makeSuiteForClasses(ParagraphCorners,SplitFrameParagraphTest,FragmentTestCase, ParagraphSplitTestCase, ULTestCase, JustifyTestCase,
AutoLeadingTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation() | unknown | codeparrot/codeparrot-clean | ||
"""
Log-related functions and structures.
"""
from builtins import map
import sys
import logging
from colorama import AnsiToWin32
from functools import partial
from contextlib import contextmanager
from .colorizer import Colorizer
from .mark.objects import Mark
from .stream import stream_has_color_support
class ColorizingFormatter(logging.Formatter, object):
"""
A formatter that colorize its output.
"""
@contextmanager
def _patch_record(self, record, colorizer, message_color_tag):
save_dict = record.__dict__.copy()
if colorizer:
if isinstance(record.args, dict):
record.args = dict(
(
k, colorizer.colorize(
v, context_color_tag=message_color_tag
)
) for k, v in record.args.items()
)
else:
record.args = tuple(map(
partial(
colorizer.colorize,
context_color_tag=message_color_tag,
),
record.args,
))
record.filename = colorizer.colorize(record.filename)
record.funcName = colorizer.colorize(record.funcName)
record.levelname = colorizer.colorize(record.levelname)
record.module = colorizer.colorize(record.module)
record.name = colorizer.colorize(record.name)
record.pathname = colorizer.colorize(record.pathname)
record.processName = colorizer.colorize(record.processName)
record.threadName = colorizer.colorize(record.threadName)
if message_color_tag:
message = colorizer.colorize(Mark(
record.getMessage(),
color_tag=message_color_tag,
))
record.getMessage = lambda: message
try:
yield
finally:
record.__dict__ = save_dict
def format(self, record):
"""
Colorize the arguments of a record.
:record: A `LogRecord` instance.
:returns: The colorized formatted string.
.. note:: The `record` object must have a `colorizer` attribute to be
use for colorizing the formatted string. If no such attribute is
found, the default non-colorized behaviour is used instead.
"""
colorizer = getattr(record, 'colorizer', None)
message_color_tag = getattr(record, 'message_color_tag', None)
with self._patch_record(record, colorizer, message_color_tag):
return super(ColorizingFormatter, self).format(record)
class ColorizingStreamHandler(logging.StreamHandler, object):
"""
A stream handler that colorize its output.
"""
_RECORD_ATTRIBUTE_NAME = 'colorizer'
default_attributes_map = {
'name': 'important',
'levelname': lambda record: str(record.levelname).lower(),
'message': lambda record: str(record.levelname).lower(),
}
def __init__(
self,
stream=None,
colorizer=None,
highlighter=None,
attributes_map=None,
):
"""
Initializes a colorizing stream handler.
:param stream: The stream to use for output.
:param colorizer: The colorizer to use for colorizing the output. If
not specified, a :class:`chromalog.colorizer.Colorizer` is
instantiated.
:param highlighter: The colorizer to use for highlighting the output
when color is not supported.
:param attributes_map: A map of LogRecord attributes/color tags.
"""
if not stream:
stream = sys.stderr
self.has_color_support = stream_has_color_support(stream)
self.color_disabled = False
self.attributes_map = attributes_map or self.default_attributes_map
if self.has_color_support:
stream = AnsiToWin32(stream).stream
super(ColorizingStreamHandler, self).__init__(
stream
)
self.colorizer = colorizer or Colorizer()
self.highlighter = highlighter
self.setFormatter(ColorizingFormatter())
@property
def active_colorizer(self):
"""
The active colorizer or highlighter depending on whether color is
supported.
"""
if (
self.has_color_support and
not self.color_disabled and
self.colorizer
):
return self.colorizer
return self.highlighter
@contextmanager
def __bind_to_record(self, record):
setattr(record, self._RECORD_ATTRIBUTE_NAME, self.active_colorizer)
try:
yield
finally:
delattr(record, self._RECORD_ATTRIBUTE_NAME)
def _color_tag_from_record(self, color_tag, record):
if hasattr(color_tag, '__call__'):
return color_tag(record)
else:
return color_tag.format(**record.__dict__)
def format(self, record):
"""
Format a `LogRecord` and prints it to the associated stream.
"""
with self.__bind_to_record(record):
for attribute, color_tag in self.attributes_map.items():
if attribute == 'message':
record.message_color_tag = self._color_tag_from_record(
color_tag,
record,
)
else:
setattr(record, attribute, Mark(
getattr(record, attribute),
color_tag=self._color_tag_from_record(
color_tag,
record,
),
))
return super(ColorizingStreamHandler, self).format(record) | unknown | codeparrot/codeparrot-clean | ||
###############################################################################
##
## Copyright 2011,2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor, ssl
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.resource import WebSocketResource, HTTPChannelHixie76Aware
class EchoServerProtocol(WebSocketServerProtocol):
def onMessage(self, msg, binary):
self.sendMessage(msg, binary)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
contextFactory = ssl.DefaultOpenSSLContextFactory('keys/server.key',
'keys/server.crt')
factory = WebSocketServerFactory("wss://localhost:8080",
debug = debug,
debugCodePaths = debug)
factory.protocol = EchoServerProtocol
factory.setProtocolOptions(allowHixie76 = True) # needed if Hixie76 is to be supported
resource = WebSocketResource(factory)
## we server static files under "/" ..
root = File(".")
## and our WebSocket server under "/ws"
root.putChild("ws", resource)
## both under one Twisted Web Site
site = Site(root)
site.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is to be supported
reactor.listenSSL(8080, site, contextFactory)
reactor.run() | unknown | codeparrot/codeparrot-clean | ||
import sys
import requests
import json
import urllib2
import time
if len(sys.argv) < 5:
print "Usage: feed.py rpcuser rpcpass port \"['delegate','list']\""
sys.exit(0)
user = sys.argv[1]
password = sys.argv[2]
port = int(sys.argv[3])
delegates = []
if len(sys.argv[4]) < 3:
for i in range(60):
delegates.append("init" + str(i))
else:
delegates = eval(sys.argv[4])
print delegates
while True:
url = 'https://www.bitstamp.net/api/ticker/'
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
req = urllib2.Request(url, headers=hdr)
bitstamp_data = json.load(urllib2.urlopen(req))
usd_per_btc = float(bitstamp_data["last"])
url = 'http://data.bter.com/api/1/ticker/btsx_btc'
req = urllib2.Request(url, headers=hdr)
btsx_data = json.load(urllib2.urlopen(req))
btc_per_btsx = float(btsx_data["last"])
usd_per_btsx = usd_per_btc * btc_per_btsx
print usd_per_btsx
url = "http://" + user + ":" + password + "@localhost:" + str(port) + "/rpc"
print url
count = 0
for name in delegates:
payload = {
"method": "wallet_transfer",
"params": [5, "XTS", "init0", "init" + str(count)],
"jsonrpc": "2.0",
"id": 0,
}
print payload
headers = {
'content-type': 'application/json',
'Authorization': "Basic YTph"
}
response = requests.post(url, data=json.dumps(payload), headers=headers)
print response
print response.json()
count += 1
for name in delegates:
payload = {
"method": "wallet_publish_price_feed",
"params": [name, usd_per_btsx, "USD"],
"jsonrpc": "2.0",
"id": 0,
}
print payload
headers = {
'content-type': 'application/json',
'Authorization': "Basic YTph"
}
response = requests.post(url, data=json.dumps(payload), headers=headers)
print response
print response.json()
payload = {
"method": "wallet_publish_price_feed",
"params": [name, btc_per_btsx, "BTC"],
"jsonrpc": "2.0",
"id": 0,
}
print payload
headers = {
'content-type': 'application/json',
'Authorization': "Basic YTph"
}
response = requests.post(url, data=json.dumps(payload), headers=headers)
print response
print response.json()
time.sleep(30) | unknown | codeparrot/codeparrot-clean | ||
import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.utils import importlib
from django.utils.translation import check_for_language, activate, to_locale, get_language
from django.utils.text import javascript_quote
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.REQUEST.get('next')
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
src = []
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
src.append("formats['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(smart_text(v))))
elif isinstance(v, (tuple, list)):
v = [javascript_quote(smart_text(value)) for value in v]
src.append("formats['%s'] = ['%s'];\n" % (javascript_quote(k), "', '".join(v)))
return ''.join(src)
NullSource = """
/* gettext identity library */
function gettext(msgid) { return msgid; }
function ngettext(singular, plural, count) { return (count == 1) ? singular : plural; }
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) { return msgid; }
function npgettext(context, singular, plural, count) { return (count == 1) ? singular : plural; }
"""
LibHead = """
/* gettext library */
var catalog = new Array();
"""
LibFoot = """
function gettext(msgid) {
var value = catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
}
function ngettext(singular, plural, count) {
value = catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[pluralidx(count)];
}
}
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) {
var value = gettext(context + '\\x04' + msgid);
if (value.indexOf('\\x04') != -1) {
value = msgid;
}
return value;
}
function npgettext(context, singular, plural, count) {
var value = ngettext(context + '\\x04' + singular, context + '\\x04' + plural, count);
if (value.indexOf('\\x04') != -1) {
value = ngettext(singular, plural, count);
}
return value;
}
"""
LibFormatHead = """
/* formatting library */
var formats = new Array();
"""
LibFormatFoot = """
function get_format(format_type) {
var value = formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
}
"""
SimplePlural = """
function pluralidx(count) { return (count == 1) ? 0 : 1; }
"""
InterPolate = r"""
function interpolate(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
}
"""
PluralIdx = r"""
function pluralidx(n) {
var v=%s;
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
}
"""
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
src = [NullSource, InterPolate, LibFormatHead, get_formats(), LibFormatFoot]
return http.HttpResponse(''.join(src), 'text/javascript')
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
if request.GET:
if 'language' in request.GET:
if check_for_language(request.GET['language']):
activate(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
default_locale = to_locale(settings.LANGUAGE_CODE)
locale = to_locale(get_language())
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
src = [LibHead]
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':',1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=',1)[1]
src.append(PluralIdx % plural)
else:
src.append(SimplePlural)
csrc = []
pdict = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
csrc.append("catalog['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(v)))
elif isinstance(k, tuple):
if k[0] not in pdict:
pdict[k[0]] = k[1]
else:
pdict[k[0]] = max(k[1], pdict[k[0]])
csrc.append("catalog['%s'][%d] = '%s';\n" % (javascript_quote(k[0]), k[1], javascript_quote(v)))
else:
raise TypeError(k)
csrc.sort()
for k, v in pdict.items():
src.append("catalog['%s'] = [%s];\n" % (javascript_quote(k), ','.join(["''"]*(v+1))))
src.extend(csrc)
src.append(LibFoot)
src.append(InterPolate)
src.append(LibFormatHead)
src.append(get_formats())
src.append(LibFormatFoot)
src = ''.join(src)
return http.HttpResponse(src, 'text/javascript') | unknown | codeparrot/codeparrot-clean | ||
"""Support for MQTT switches."""
import logging
import voluptuous as vol
from homeassistant.components import mqtt, switch
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import (
CONF_DEVICE, CONF_ICON, CONF_NAME, CONF_OPTIMISTIC, CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON, CONF_VALUE_TEMPLATE, STATE_ON)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN,
CONF_STATE_TOPIC, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability,
MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'MQTT Switch'
DEFAULT_PAYLOAD_ON = 'ON'
DEFAULT_PAYLOAD_OFF = 'OFF'
DEFAULT_OPTIMISTIC = False
CONF_STATE_ON = "state_on"
CONF_STATE_OFF = "state_off"
PLATFORM_SCHEMA = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_STATE_OFF): cv.string,
vol.Optional(CONF_STATE_ON): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info=None):
"""Set up MQTT switch through configuration.yaml."""
await _async_setup_entity(config, async_add_entities,
discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT switch dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT switch."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(switch.DOMAIN, 'mqtt'),
async_discover)
async def _async_setup_entity(config, async_add_entities, config_entry=None,
discovery_hash=None):
"""Set up the MQTT switch."""
async_add_entities([MqttSwitch(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttSwitch(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, SwitchDevice, RestoreEntity):
"""Representation of a switch that can be toggled using MQTT."""
def __init__(self, config, config_entry, discovery_hash):
"""Initialize the MQTT switch."""
self._state = False
self._sub_state = None
self._state_on = None
self._state_off = None
self._optimistic = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
state_on = config.get(CONF_STATE_ON)
self._state_on = state_on if state_on else config[CONF_PAYLOAD_ON]
state_off = config.get(CONF_STATE_OFF)
self._state_off = state_off if state_off else \
config[CONF_PAYLOAD_OFF]
self._optimistic = config[CONF_OPTIMISTIC]
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
template.hass = self.hass
@callback
def state_message_received(msg):
"""Handle new MQTT state messages."""
payload = msg.payload
if template is not None:
payload = template.async_render_with_possible_json_value(
payload)
if payload == self._state_on:
self._state = True
elif payload == self._state_off:
self._state = False
self.async_write_ha_state()
if self._config.get(CONF_STATE_TOPIC) is None:
# Force into optimistic mode.
self._optimistic = True
else:
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
{CONF_STATE_TOPIC:
{'topic': self._config.get(CONF_STATE_TOPIC),
'msg_callback': state_message_received,
'qos': self._config[CONF_QOS]}})
if self._optimistic:
last_state = await self.async_get_last_state()
if last_state:
self._state = last_state.state == STATE_ON
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the switch."""
return self._config[CONF_NAME]
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def icon(self):
"""Return the icon."""
return self._config.get(CONF_ICON)
async def async_turn_on(self, **kwargs):
"""Turn the device on.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._config[CONF_COMMAND_TOPIC],
self._config[CONF_PAYLOAD_ON],
self._config[CONF_QOS],
self._config[CONF_RETAIN])
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._config[CONF_COMMAND_TOPIC],
self._config[CONF_PAYLOAD_OFF],
self._config[CONF_QOS],
self._config[CONF_RETAIN])
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = False
self.async_write_ha_state() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2022 - 2025 R. Thomas
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_ASM_ENGINE_H
#define LIEF_ASM_ENGINE_H
#include "LIEF/visibility.h"
#include "LIEF/iterators.hpp"
#include "LIEF/asm/Instruction.hpp"
#include "LIEF/asm/AssemblerConfig.hpp"
#include <memory>
namespace LIEF {
class Binary;
/// Namespace related to assembly/disassembly support
namespace assembly {
namespace details {
class Engine;
}
/// This class interfaces the assembler/disassembler support
class LIEF_API Engine {
public:
/// Disassembly instruction iterator
using instructions_it = iterator_range<Instruction::Iterator>;
Engine() = delete;
Engine(std::unique_ptr<details::Engine> impl);
Engine(const Engine&) = delete;
Engine& operator=(const Engine&) = delete;
Engine(Engine&&) noexcept;
Engine& operator=(Engine&&) noexcept;
/// Disassemble the provided buffer with the address specified in the second
/// parameter.
instructions_it disassemble(const uint8_t* buffer, size_t size, uint64_t addr);
/// Disassemble the given vector of bytes with the address specified in the second
/// parameter.
instructions_it disassemble(const std::vector<uint8_t>& bytes, uint64_t addr) {
return disassemble(bytes.data(), bytes.size(), addr);
}
std::vector<uint8_t> assemble(uint64_t address, const std::string& Asm,
AssemblerConfig& config = AssemblerConfig::default_config());
std::vector<uint8_t> assemble(uint64_t address, const std::string& Asm,
LIEF::Binary& bin, AssemblerConfig& config = AssemblerConfig::default_config());
std::vector<uint8_t> assemble(uint64_t address, const llvm::MCInst& inst,
LIEF::Binary& bin);
std::vector<uint8_t> assemble(
uint64_t address, const std::vector<llvm::MCInst>& inst, LIEF::Binary& bin);
~Engine();
/// \private
LIEF_LOCAL const details::Engine& impl() const {
assert(impl_ != nullptr);
return *impl_;
}
/// \private
LIEF_LOCAL details::Engine& impl() {
assert(impl_ != nullptr);
return *impl_;
}
private:
std::unique_ptr<details::Engine> impl_;
};
}
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/include/LIEF/asm/Engine.hpp |
import pytest
from mitmproxy.addons import onboarding
from mitmproxy.test import taddons
@pytest.fixture
def client():
with onboarding.app.test_client() as client:
yield client
class TestApp:
def addons(self):
return [onboarding.Onboarding()]
@pytest.mark.asyncio
async def test_basic(self, client):
ob = onboarding.Onboarding()
with taddons.context(ob) as tctx:
tctx.configure(ob)
assert client.get("/").status_code == 200
@pytest.mark.parametrize("ext", ["pem", "p12", "cer"])
@pytest.mark.asyncio
async def test_cert(self, client, ext, tdata):
ob = onboarding.Onboarding()
with taddons.context(ob) as tctx:
tctx.configure(ob, confdir=tdata.path("mitmproxy/data/confdir"))
resp = client.get(f"/cert/{ext}")
assert resp.status_code == 200
assert resp.data
@pytest.mark.parametrize("ext", ["pem", "p12", "cer"])
@pytest.mark.asyncio
async def test_head(self, client, ext, tdata):
ob = onboarding.Onboarding()
with taddons.context(ob) as tctx:
tctx.configure(ob, confdir=tdata.path("mitmproxy/data/confdir"))
resp = client.head(f"http://{tctx.options.onboarding_host}/cert/{ext}")
assert resp.status_code == 200
assert "Content-Length" in resp.headers
assert not resp.data | unknown | codeparrot/codeparrot-clean | ||
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal, TypeAlias
import numpy as np
Device: TypeAlias = Literal["cpu"]
if TYPE_CHECKING:
# NumPy 1.x on Python 3.10 fails to parse np.dtype[]
DType: TypeAlias = np.dtype[
np.bool_
| np.integer[Any]
| np.float32
| np.float64
| np.complex64
| np.complex128
]
Array: TypeAlias = np.ndarray[Any, DType]
else:
DType: TypeAlias = np.dtype
Array: TypeAlias = np.ndarray
__all__ = ["Array", "DType", "Device"]
def __dir__() -> list[str]:
return __all__ | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/externals/array_api_compat/numpy/_typing.py |
#!/bin/bash
# Configure FwC test branches
# We do not want 7.x branch and only to run for branches that:
# - have released at least one minor version (not main)
# - have previous minor unreleased (not the oldest development branch)
FWC_BRANCHES=()
for branch in "${BRANCHES[@]}"; do
if [[ ! "$branch" =~ ^7\..* ]]; then
FWC_BRANCHES+=("$branch")
fi
done
# Remove first and last element
FWC_BRANCHES=("${FWC_BRANCHES[@]:1:${#FWC_BRANCHES[@]}-2}")
shouldRunFwcFor() {
local branch=$1
for fwc_branch in "${FWC_BRANCHES[@]}"; do
if [[ "$fwc_branch" == "$branch" ]]; then
return 0
fi
done
return 1
} | unknown | github | https://github.com/elastic/elasticsearch | .buildkite/scripts/fwc-branches.sh |
"""A collection of string constants.
Public module variables:
whitespace -- a string containing all ASCII whitespace
ascii_lowercase -- a string containing all ASCII lowercase letters
ascii_uppercase -- a string containing all ASCII uppercase letters
ascii_letters -- a string containing all ASCII letters
digits -- a string containing all ASCII decimal digits
hexdigits -- a string containing all ASCII hexadecimal digits
octdigits -- a string containing all ASCII octal digits
punctuation -- a string containing all ASCII punctuation characters
printable -- a string containing all ASCII characters considered printable
"""
__all__ = ["ascii_letters", "ascii_lowercase", "ascii_uppercase", "capwords",
"digits", "hexdigits", "octdigits", "printable", "punctuation",
"whitespace", "Formatter", "Template"]
import _string
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(map(str.capitalize, s.split(sep)))
####################################################################
_sentinel_dict = {}
class _TemplatePattern:
# This descriptor is overwritten in ``Template._compile_pattern()``.
def __get__(self, instance, cls=None):
if cls is None:
return self
return cls._compile_pattern()
_TemplatePattern = _TemplatePattern()
class Template:
"""A string class for supporting $-substitutions."""
delimiter = '$'
# r'[a-z]' matches to non-ASCII letters when used with IGNORECASE, but
# without the ASCII flag. We can't add re.ASCII to flags because of
# backward compatibility. So we use the ?a local flag and [a-z] pattern.
# See https://bugs.python.org/issue31672
idpattern = r'(?a:[_a-z][_a-z0-9]*)'
braceidpattern = None
flags = None # default: re.IGNORECASE
pattern = _TemplatePattern # use a descriptor to compile the pattern
def __init_subclass__(cls):
super().__init_subclass__()
cls._compile_pattern()
@classmethod
def _compile_pattern(cls):
import re # deferred import, for performance
pattern = cls.__dict__.get('pattern', _TemplatePattern)
if pattern is _TemplatePattern:
delim = re.escape(cls.delimiter)
id = cls.idpattern
bid = cls.braceidpattern or cls.idpattern
pattern = fr"""
{delim}(?:
(?P<escaped>{delim}) | # Escape sequence of two delimiters
(?P<named>{id}) | # delimiter and a Python identifier
{{(?P<braced>{bid})}} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
if cls.flags is None:
cls.flags = re.IGNORECASE
pat = cls.pattern = re.compile(pattern, cls.flags | re.VERBOSE)
return pat
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(keepends=True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, mapping=_sentinel_dict, /, **kws):
if mapping is _sentinel_dict:
mapping = kws
elif kws:
from collections import ChainMap
mapping = ChainMap(kws, mapping)
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
return str(mapping[named])
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, mapping=_sentinel_dict, /, **kws):
if mapping is _sentinel_dict:
mapping = kws
elif kws:
from collections import ChainMap
mapping = ChainMap(kws, mapping)
# Helper function for .sub()
def convert(mo):
named = mo.group('named') or mo.group('braced')
if named is not None:
try:
return str(mapping[named])
except KeyError:
return mo.group()
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return mo.group()
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def is_valid(self):
for mo in self.pattern.finditer(self.template):
if mo.group('invalid') is not None:
return False
if (mo.group('named') is None
and mo.group('braced') is None
and mo.group('escaped') is None):
# If all the groups are None, there must be
# another group we're not expecting
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return True
def get_identifiers(self):
ids = []
for mo in self.pattern.finditer(self.template):
named = mo.group('named') or mo.group('braced')
if named is not None and named not in ids:
# add a named group only the first time it appears
ids.append(named)
elif (named is None
and mo.group('invalid') is None
and mo.group('escaped') is None):
# If all the groups are None, there must be
# another group we're not expecting
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return ids
########################################################################
# The Formatter class (PEP 3101).
#
# The overall parser is implemented in _string.formatter_parser.
# The field name parser is implemented in _string.formatter_field_name_split.
class Formatter:
"""See PEP 3101 for details and purpose of this class."""
def format(self, format_string, /, *args, **kwargs):
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth,
auto_arg_index=0):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# handle arg indexing when empty field first parts are given.
field_first, _ = _string.formatter_field_name_split(field_name)
if field_first == '':
if auto_arg_index is False:
raise ValueError('cannot switch from manual field '
'specification to automatic field '
'numbering')
field_name = str(auto_arg_index) + field_name
auto_arg_index += 1
elif isinstance(field_first, int):
if auto_arg_index:
raise ValueError('cannot switch from automatic field '
'numbering to manual field '
'specification')
# disable auto arg incrementing, if it gets
# used later on, then an exception will be raised
auto_arg_index = False
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec, auto_arg_index = self._vformat(
format_spec, args, kwargs,
used_args, recursion_depth-1,
auto_arg_index=auto_arg_index)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
return ''.join(result), auto_arg_index
def get_value(self, key, args, kwargs):
if isinstance(key, int):
return args[key]
else:
return kwargs[key]
def check_unused_args(self, used_args, args, kwargs):
pass
def format_field(self, value, format_spec):
return format(value, format_spec)
def convert_field(self, value, conversion):
# do any conversion on the resulting object
if conversion is None:
return value
elif conversion == 's':
return str(value)
elif conversion == 'r':
return repr(value)
elif conversion == 'a':
return ascii(value)
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
def parse(self, format_string):
"""
Return an iterable that contains tuples of the form
(literal_text, field_name, format_spec, conversion).
*field_name* can be None, in which case there's no object
to format and output; otherwise, it is looked up and
formatted with *format_spec* and *conversion*.
"""
return _string.formatter_parser(format_string)
def get_field(self, field_name, args, kwargs):
"""Find the object referenced by a given field name.
The field name *field_name* can be for instance "0.name"
or "lookup[3]". The *args* and *kwargs* arguments are
passed to get_value().
"""
first, rest = _string.formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
# loop through the rest of the field_name, doing
# getattr or getitem as needed
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[i]
return obj, first | python | github | https://github.com/python/cpython | Lib/string/__init__.py |
try:
import simplejson as json
except ImportError:
import json
import dateutil.parser
from collections import namedtuple
from .utils import unicode_class, resource_for_link, unresolvable
from .resource import FieldsResource, Link, Resource
"""
contentful.content_type_field_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements the Field Coercion classes.
:copyright: (c) 2016 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class BasicField(object):
"""Base Coercion Class"""
def __init__(self, items=None):
self._items = items
def coerce(self, value, **kwargs):
"""Just returns the value."""
return value
def __repr__(self):
return "<{0}>".format(
self.__class__.__name__
)
class SymbolField(BasicField):
"""Symbol Coercion Class"""
def coerce(self, value, **kwargs):
"""Coerces value to str"""
return unicode_class()(value)
class TextField(BasicField):
"""Text Coercion Class"""
def coerce(self, value, **kwargs):
"""Coerces value to str"""
return unicode_class()(value)
class IntegerField(BasicField):
"""Integer Coercion Class"""
def coerce(self, value, **kwargs):
"""Coerces value to int"""
return int(value)
class NumberField(BasicField):
"""Number Coercion Class"""
def coerce(self, value, **kwargs):
"""Coerces value to float"""
return float(value)
class DateField(BasicField):
"""Date Coercion Class"""
def coerce(self, value, **kwargs):
"""Coerces ISO8601 date to :class:`datetime.datetime` object."""
return dateutil.parser.parse(value)
class BooleanField(BasicField):
"""Boolean Coercion Class"""
def coerce(self, value, **kwargs):
"""Coerces value to boolean"""
return bool(value)
class LocationField(BasicField):
"""Location Coercion Class"""
def coerce(self, value, **kwargs):
"""Coerces value to Location object"""
Location = namedtuple('Location', ['lat', 'lon'])
return Location(float(value.get('lat')), float(value.get('lon')))
class LinkField(BasicField):
"""
LinkField
Nothing should be done here as include resolution is handled within
entries due to depth handling (explained within Entry).
Only present as a placeholder for proper resolution within ContentType.
"""
pass
class ArrayField(BasicField):
"""Array Coercion Class
Coerces items in collection with it's proper Coercion Class.
"""
def __init__(self, items=None):
super(ArrayField, self).__init__(items)
self._coercion = self._get_coercion()
def coerce(self, value, **kwargs):
"""Coerces array items with proper coercion."""
result = []
for v in value:
result.append(self._coercion.coerce(v, **kwargs))
return result
def _get_coercion(self):
return globals()["{0}Field".format(self._items.get('type'))]()
class ObjectField(BasicField):
"""
Object Coercion Class.
"""
def coerce(self, value, **kwargs):
"""Coerces JSON values properly."""
return json.loads(json.dumps(value))
class RichTextField(BasicField):
"""
Coerces Rich Text fields and resolves includes for entries included.
"""
def _coerce_link(self, value, includes=None, errors=None, resources=None, default_locale='en-US', locale=None):
if value['data']['target']['sys']['type'] != 'Link':
return value['data']['target']
if unresolvable(value['data']['target'], errors):
return None
resource = resource_for_link(
value['data']['target'],
includes,
resources,
locale=locale if locale else '*'
)
if isinstance(resource, FieldsResource): # Resource comes from instance cache
return resource
if resource is None: # Resource is valid but not reachable on includes
return Link(value['data']['target'])
from .resource_builder import ResourceBuilder
return ResourceBuilder(
default_locale,
locale and locale == '*',
resource,
includes_for_single=includes,
errors_for_single=errors,
reuse_entries=bool(resources),
resources=resources
).build()
def _coerce_block(self, value, includes=None, errors=None, resources=None, default_locale='en-US', locale=None):
if not (isinstance(value, dict) and 'content' in value):
return value
invalid_nodes = []
coerced_nodes = {}
for index, node in enumerate(value['content']):
if node.get('data', None) and node['data'].get('target', None):
# Resource has already been hydrated previously
if isinstance(node['data']['target'], Resource):
continue
link = self._coerce_link(
node,
includes=includes,
errors=errors,
resources=resources,
default_locale=default_locale,
locale=locale
)
if link:
node['data']['target'] = link
else:
invalid_nodes.append(index)
if node.get('content', None):
coerced_nodes[index] = self._coerce_block(
node,
includes=includes,
errors=errors,
resources=resources,
default_locale=default_locale,
locale=locale
)
for node_index, coerced_node in coerced_nodes.items():
value['content'][node_index] = coerced_node
for node_index in reversed(invalid_nodes):
del value['content'][node_index]
return value
def coerce(self, value, includes=None, errors=None, resources=None, default_locale='en-US', locale=None):
"""Coerces Rich Text properly."""
if includes is None:
includes = []
if errors is None:
errors = []
return self._coerce_block(
value,
includes=includes,
errors=errors,
resources=resources,
default_locale=default_locale,
locale=locale
) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module Arel # :nodoc: all
module FilterPredications
def filter(expr)
Nodes::Filter.new(self, expr)
end
end
end | ruby | github | https://github.com/rails/rails | activerecord/lib/arel/filter_predications.rb |
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_TFRT_UTILS_GRAPH_PARTITION_H_
#define TENSORFLOW_CORE_TFRT_UTILS_GRAPH_PARTITION_H_
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace tfrt_stub {
// Inserts send/recv ops to `graph` if nodes are assigned to multiple devices.
// Specifically, nodes on the same device will be wrapped in a function and
// invoked by a PartitionedCall op. All PartitionedCall ops are connected to a
// StatefulPartitionedCall op (which behaves as a 'stateful IdentityN') to
// protect them from being pruned in the subsequent MLIR lowering passes
// (b/232026253).
//
// The following shows a simple example of using this method.
//
// The original graph has four nodes that are placed on different devices.
//
// -----> op1(host) ------
// / \
// input(host) output(host)
// \ /
// -----> op2(device) ------
//
// Calling this method will return the following graph, where `op1` is wrapped
// in the function invoked by `PartitionedCall_1`, and `op2` is wrapped in the
// function invoked by `PartitionedCall_2`. Both of them have a data dependency
// with the `StatefulPartitionedCall` op.
//
// input ---> PartitionedCall_1 ----
// \
// StatefulPartitionedCall ---> output
// /
// PartitionedCall_2 ----
//
absl::StatusOr<std::unique_ptr<Graph>> InsertTransferOps(
const std::string& graph_func_name, const DeviceSet& device_set,
const Device* host_device, const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs,
const std::vector<std::string>& control_outputs,
std::unique_ptr<Graph> graph);
} // namespace tfrt_stub
} // namespace tensorflow
#endif // TENSORFLOW_CORE_TFRT_UTILS_GRAPH_PARTITION_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/tfrt/utils/graph_partition.h |
"""
REQUIREMENTS:
- install pip with distribute (http://packages.python.org/distribute/)
- sudo pip install Fabric
"""
from fabric.api import local
def lang(mode="extract"):
"""
REQUIREMENTS:
- Install before pip with distribute_setup.py (Read the environment setup document)
- sudo pip install babel
- sudo pip install jinja2
HOW TO RUN:
option 1) fab lang
option 2) fab lang:compile
"""
if mode == "compile":
local("pybabel compile -f -d ./locale")
else:
local("pybabel extract -F ./locale/babel.cfg -o ./locale/messages.pot ./ --sort-output --no-location --omit-header")
local("pybabel update -l cs_CZ -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l de_DE -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l en_US -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l es_ES -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l fr_FR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l id_ID -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l it_IT -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l nl_NL -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l pt_BR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l ru_RU -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l vi_VN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l zh_CN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
def start(mode="normal"):
"""
HOW TO RUN:
option 1) fab start
option 2) fab start:clear
"""
if mode == "clear":
local("dev_appserver.py ./ --host 0.0.0.0 --port 8002 --clear_datastore=yes")
else:
local("dev_appserver.py ./ --host 0.0.0.0 --port 8002")
def deploy():
"""
app.yaml never has to be version:default
"""
local("appcfg.py --oauth2 update .")
def test(os="mac"):
"""
REQUIREMENTS:
- install pip with distribute (http://packages.python.org/distribute/)
- sudo pip install mock
- sudo pip install webtest
- sudo pip install pyquery
HOW TO RUN:
option 1) fab test
option 2) fab test:mac
option 3) fab test:linux
"""
path = {
"mac": "/usr/local/google_appengine",
}[os]
local("python testrunner.py {0} ./".format(path)) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.io.TestOption.CLOSE_THROWS;
import static com.google.common.io.TestOption.OPEN_THROWS;
import static com.google.common.io.TestOption.READ_THROWS;
import static com.google.common.io.TestOption.WRITE_THROWS;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertThrows;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.EnumSet;
import org.jspecify.annotations.NullUnmarked;
/**
* Tests for the default implementations of {@code ByteSink} methods.
*
* @author Colin Decker
*/
@NullUnmarked
public class ByteSinkTest extends IoTestCase {
private final byte[] bytes = newPreFilledByteArray(10000);
private TestByteSink sink;
@Override
protected void setUp() throws Exception {
sink = new TestByteSink();
}
public void testOpenBufferedStream() throws IOException {
OutputStream out = sink.openBufferedStream();
assertTrue(sink.wasStreamOpened());
assertFalse(sink.wasStreamClosed());
out.write(new byte[] {1, 2, 3, 4});
out.close();
assertTrue(sink.wasStreamClosed());
assertArrayEquals(new byte[] {1, 2, 3, 4}, sink.getBytes());
}
public void testWrite_bytes() throws IOException {
assertArrayEquals(new byte[0], sink.getBytes());
sink.write(bytes);
assertTrue(sink.wasStreamOpened() && sink.wasStreamClosed());
assertArrayEquals(bytes, sink.getBytes());
}
public void testWriteFrom_inputStream() throws IOException {
ByteArrayInputStream in = new ByteArrayInputStream(bytes);
sink.writeFrom(in);
assertTrue(sink.wasStreamOpened() && sink.wasStreamClosed());
assertArrayEquals(bytes, sink.getBytes());
}
public void testWriteFromStream_doesNotCloseThatStream() throws IOException {
TestInputStream in = new TestInputStream(new ByteArrayInputStream(new byte[10]));
assertFalse(in.closed());
sink.writeFrom(in);
assertFalse(in.closed());
}
public void testClosesOnErrors_copyingFromByteSourceThatThrows() {
for (TestOption option : EnumSet.of(OPEN_THROWS, READ_THROWS, CLOSE_THROWS)) {
TestByteSource failSource = new TestByteSource(new byte[10], option);
TestByteSink okSink = new TestByteSink();
assertThrows(IOException.class, () -> failSource.copyTo(okSink));
// ensure stream was closed IF it was opened (depends on implementation whether or not it's
// opened at all if source.newInputStream() throws).
assertTrue(
"stream not closed when copying from source with option: " + option,
!okSink.wasStreamOpened() || okSink.wasStreamClosed());
}
}
public void testClosesOnErrors_whenWriteThrows() {
TestByteSink failSink = new TestByteSink(WRITE_THROWS);
assertThrows(IOException.class, () -> new TestByteSource(new byte[10]).copyTo(failSink));
assertTrue(failSink.wasStreamClosed());
}
public void testClosesOnErrors_writingFromInputStreamThatThrows() throws IOException {
TestByteSink okSink = new TestByteSink();
TestInputStream in = new TestInputStream(new ByteArrayInputStream(new byte[10]), READ_THROWS);
assertThrows(IOException.class, () -> okSink.writeFrom(in));
assertTrue(okSink.wasStreamClosed());
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/io/ByteSinkTest.java |
"""Chain pipeline where the outputs of one step feed directly into next."""
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils.input import get_color_mapping
from pydantic import ConfigDict, model_validator
from typing_extensions import Self
from langchain_classic.chains.base import Chain
class SequentialChain(Chain):
"""Chain where the outputs of one chain feed directly into next."""
chains: list[Chain]
input_variables: list[str]
output_variables: list[str]
return_all: bool = False
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Return expected input keys to the chain."""
return self.input_variables
@property
def output_keys(self) -> list[str]:
"""Return output key."""
return self.output_variables
@model_validator(mode="before")
@classmethod
def validate_chains(cls, values: dict) -> Any:
"""Validate that the correct inputs exist for all chains."""
chains = values["chains"]
input_variables = values["input_variables"]
memory_keys = []
if "memory" in values and values["memory"] is not None:
"""Validate that prompt input variables are consistent."""
memory_keys = values["memory"].memory_variables
if set(input_variables).intersection(set(memory_keys)):
overlapping_keys = set(input_variables) & set(memory_keys)
msg = (
f"The input key(s) {''.join(overlapping_keys)} are found "
f"in the Memory keys ({memory_keys}) - please use input and "
f"memory keys that don't overlap."
)
raise ValueError(msg)
known_variables = set(input_variables + memory_keys)
for chain in chains:
missing_vars = set(chain.input_keys).difference(known_variables)
if chain.memory:
missing_vars = missing_vars.difference(chain.memory.memory_variables)
if missing_vars:
msg = (
f"Missing required input keys: {missing_vars}, "
f"only had {known_variables}"
)
raise ValueError(msg)
overlapping_keys = known_variables.intersection(chain.output_keys)
if overlapping_keys:
msg = f"Chain returned keys that already exist: {overlapping_keys}"
raise ValueError(msg)
known_variables |= set(chain.output_keys)
if "output_variables" not in values:
if values.get("return_all", False):
output_keys = known_variables.difference(input_variables)
else:
output_keys = chains[-1].output_keys
values["output_variables"] = output_keys
else:
missing_vars = set(values["output_variables"]).difference(known_variables)
if missing_vars:
msg = f"Expected output variables that were not found: {missing_vars}."
raise ValueError(msg)
return values
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
known_values = inputs.copy()
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
for _i, chain in enumerate(self.chains):
callbacks = _run_manager.get_child()
outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
known_values = inputs.copy()
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
for _i, chain in enumerate(self.chains):
outputs = await chain.acall(
known_values,
return_only_outputs=True,
callbacks=callbacks,
)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
class SimpleSequentialChain(Chain):
"""Simple chain where the outputs of one step feed directly into next."""
chains: list[Chain]
strip_outputs: bool = False
input_key: str = "input"
output_key: str = "output"
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Expect input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key."""
return [self.output_key]
@model_validator(mode="after")
def validate_chains(self) -> Self:
"""Validate that chains are all single input/output."""
for chain in self.chains:
if len(chain.input_keys) != 1:
msg = (
"Chains used in SimplePipeline should all have one input, got "
f"{chain} with {len(chain.input_keys)} inputs."
)
raise ValueError(msg)
if len(chain.output_keys) != 1:
msg = (
"Chains used in SimplePipeline should all have one output, got "
f"{chain} with {len(chain.output_keys)} outputs."
)
raise ValueError(msg)
return self
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
for i, chain in enumerate(self.chains):
_input = chain.run(
_input,
callbacks=_run_manager.get_child(f"step_{i + 1}"),
)
if self.strip_outputs:
_input = _input.strip()
_run_manager.on_text(
_input,
color=color_mapping[str(i)],
end="\n",
verbose=self.verbose,
)
return {self.output_key: _input}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
for i, chain in enumerate(self.chains):
_input = await chain.arun(
_input,
callbacks=_run_manager.get_child(f"step_{i + 1}"),
)
if self.strip_outputs:
_input = _input.strip()
await _run_manager.on_text(
_input,
color=color_mapping[str(i)],
end="\n",
verbose=self.verbose,
)
return {self.output_key: _input} | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/chains/sequential.py |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/cli"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/configs/configschema"
"github.com/hashicorp/terraform/internal/providers"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/states/statefile"
"github.com/hashicorp/terraform/internal/states/statemgr"
"github.com/hashicorp/terraform/internal/tfdiags"
)
var equateEmpty = cmpopts.EquateEmpty()
func TestRefresh(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh"), td)
t.Chdir(td)
state := testState()
statePath := testStateFile(t, state)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = refreshFixtureSchema()
p.ReadResourceFn = nil
p.ReadResourceResponse = &providers.ReadResourceResponse{
NewState: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("yes"),
}),
}
args := []string{
"-state", statePath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
if !p.ReadResourceCalled {
t.Fatal("ReadResource should have been called")
}
f, err := os.Open(statePath)
if err != nil {
t.Fatalf("err: %s", err)
}
newStateFile, err := statefile.Read(f)
f.Close()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(newStateFile.State.String())
expected := strings.TrimSpace(testRefreshStr)
if actual != expected {
t.Fatalf("bad:\n\n%s", actual)
}
}
func TestRefresh_empty(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh-empty"), td)
t.Chdir(td)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.ReadResourceFn = nil
p.ReadResourceResponse = &providers.ReadResourceResponse{
NewState: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("yes"),
}),
}
args := []string{}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
if p.ReadResourceCalled {
t.Fatal("ReadResource should not have been called")
}
}
func TestRefresh_lockedState(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh"), td)
t.Chdir(td)
state := testState()
statePath := testStateFile(t, state)
unlock, err := testLockState(t, testDataDir, statePath)
if err != nil {
t.Fatal(err)
}
defer unlock()
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = refreshFixtureSchema()
p.ReadResourceFn = nil
p.ReadResourceResponse = &providers.ReadResourceResponse{
NewState: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("yes"),
}),
}
args := []string{
"-state", statePath,
}
code := c.Run(args)
output := done(t)
if code == 0 {
t.Fatal("expected error")
}
got := output.Stderr()
if !strings.Contains(got, "lock") {
t.Fatal("command output does not look like a lock error:", got)
}
}
func TestRefresh_cwd(t *testing.T) {
cwd, err := os.Getwd()
if err != nil {
t.Fatalf("err: %s", err)
}
if err := os.Chdir(testFixturePath("refresh")); err != nil {
t.Fatalf("err: %s", err)
}
defer os.Chdir(cwd)
state := testState()
statePath := testStateFile(t, state)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = refreshFixtureSchema()
p.ReadResourceFn = nil
p.ReadResourceResponse = &providers.ReadResourceResponse{
NewState: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("yes"),
}),
}
args := []string{
"-state", statePath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
if !p.ReadResourceCalled {
t.Fatal("ReadResource should have been called")
}
f, err := os.Open(statePath)
if err != nil {
t.Fatalf("err: %s", err)
}
newStateFile, err := statefile.Read(f)
f.Close()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(newStateFile.State.String())
expected := strings.TrimSpace(testRefreshCwdStr)
if actual != expected {
t.Fatalf("bad:\n\n%s", actual)
}
}
func TestRefresh_defaultState(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh"), td)
t.Chdir(td)
originalState := testState()
// Write the state file in a temporary directory with the
// default filename.
statePath := testStateFile(t, originalState)
localState := statemgr.NewFilesystem(statePath)
if err := localState.RefreshState(); err != nil {
t.Fatal(err)
}
s := localState.State()
if s == nil {
t.Fatal("empty test state")
}
// Change to that directory
cwd, err := os.Getwd()
if err != nil {
t.Fatalf("err: %s", err)
}
if err := os.Chdir(filepath.Dir(statePath)); err != nil {
t.Fatalf("err: %s", err)
}
defer os.Chdir(cwd)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = refreshFixtureSchema()
p.ReadResourceFn = nil
p.ReadResourceResponse = &providers.ReadResourceResponse{
NewState: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("yes"),
}),
}
args := []string{
"-state", statePath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
if !p.ReadResourceCalled {
t.Fatal("ReadResource should have been called")
}
newState := testStateRead(t, statePath)
actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current
expected := &states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"yes\"\n }"),
Dependencies: []addrs.ConfigResource{},
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected))
}
backupState := testStateRead(t, statePath+DefaultBackupExtension)
actual = backupState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current
expected = originalState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected))
}
}
func TestRefresh_outPath(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh"), td)
t.Chdir(td)
state := testState()
statePath := testStateFile(t, state)
// Output path
outf, err := ioutil.TempFile(td, "tf")
if err != nil {
t.Fatalf("err: %s", err)
}
outPath := outf.Name()
outf.Close()
os.Remove(outPath)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = refreshFixtureSchema()
p.ReadResourceFn = nil
p.ReadResourceResponse = &providers.ReadResourceResponse{
NewState: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("yes"),
}),
}
args := []string{
"-state", statePath,
"-state-out", outPath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
newState := testStateRead(t, statePath)
if !reflect.DeepEqual(newState, state) {
t.Fatalf("bad: %#v", newState)
}
newState = testStateRead(t, outPath)
actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current
expected := &states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"yes\"\n }"),
Dependencies: []addrs.ConfigResource{},
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected))
}
if _, err := os.Stat(outPath + DefaultBackupExtension); !os.IsNotExist(err) {
if err != nil {
t.Fatalf("failed to test for backup file: %s", err)
}
t.Fatalf("backup file exists, but it should not because output file did not initially exist")
}
}
func TestRefresh_var(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh-var"), td)
t.Chdir(td)
state := testState()
statePath := testStateFile(t, state)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = refreshVarFixtureSchema()
args := []string{
"-var", "foo=bar",
"-state", statePath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
if !p.ConfigureProviderCalled {
t.Fatal("configure should be called")
}
if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) {
t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want)
}
}
func TestRefresh_varFile(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh-var"), td)
t.Chdir(td)
state := testState()
statePath := testStateFile(t, state)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = refreshVarFixtureSchema()
varFilePath := testTempFile(t)
if err := ioutil.WriteFile(varFilePath, []byte(refreshVarFile), 0644); err != nil {
t.Fatalf("err: %s", err)
}
args := []string{
"-var-file", varFilePath,
"-state", statePath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
if !p.ConfigureProviderCalled {
t.Fatal("configure should be called")
}
if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) {
t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want)
}
}
func TestRefresh_varFileDefault(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh-var"), td)
t.Chdir(td)
state := testState()
statePath := testStateFile(t, state)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = refreshVarFixtureSchema()
varFilePath := filepath.Join(td, "terraform.tfvars")
if err := ioutil.WriteFile(varFilePath, []byte(refreshVarFile), 0644); err != nil {
t.Fatalf("err: %s", err)
}
args := []string{
"-state", statePath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
if !p.ConfigureProviderCalled {
t.Fatal("configure should be called")
}
if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) {
t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want)
}
}
func TestRefresh_varsUnset(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh-unset-var"), td)
t.Chdir(td)
// Disable test mode so input would be asked
test = false
defer func() { test = true }()
defaultInputReader = bytes.NewBufferString("bar\n")
state := testState()
statePath := testStateFile(t, state)
p := testProvider()
ui := new(cli.MockUi)
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
Ui: ui,
View: view,
},
}
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
ResourceTypes: map[string]providers.Schema{
"test_instance": {
Body: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
},
},
},
}
args := []string{
"-state", statePath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
}
func TestRefresh_backup(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh"), td)
t.Chdir(td)
state := testState()
statePath := testStateFile(t, state)
// Output path
outf, err := os.CreateTemp(td, "tf")
if err != nil {
t.Fatalf("err: %s", err)
}
outPath := outf.Name()
defer outf.Close()
// Need to put some state content in the output file so that there's
// something to back up.
err = statefile.Write(statefile.New(state, "baz", 0), outf)
if err != nil {
t.Fatalf("error writing initial output state file %s", err)
}
// Backup path
backupf, err := os.CreateTemp(td, "tf")
if err != nil {
t.Fatalf("err: %s", err)
}
backupPath := backupf.Name()
backupf.Close()
os.Remove(backupPath)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = refreshFixtureSchema()
p.ReadResourceFn = nil
p.ReadResourceResponse = &providers.ReadResourceResponse{
NewState: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("changed"),
}),
}
args := []string{
"-state", statePath,
"-state-out", outPath,
"-backup", backupPath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
newState := testStateRead(t, statePath)
if !cmp.Equal(newState, state, cmpopts.EquateEmpty()) {
t.Fatalf("got:\n%s\nexpected:\n%s\n", newState, state)
}
newState = testStateRead(t, outPath)
actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current
expected := &states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"changed\"\n }"),
Dependencies: []addrs.ConfigResource{},
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected))
}
backupState := testStateRead(t, backupPath)
actualStr := strings.TrimSpace(backupState.String())
expectedStr := strings.TrimSpace(state.String())
if actualStr != expectedStr {
t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr)
}
}
func TestRefresh_disableBackup(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh"), td)
t.Chdir(td)
state := testState()
statePath := testStateFile(t, state)
// Output path
outf, err := ioutil.TempFile(td, "tf")
if err != nil {
t.Fatalf("err: %s", err)
}
outPath := outf.Name()
outf.Close()
os.Remove(outPath)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = refreshFixtureSchema()
p.ReadResourceFn = nil
p.ReadResourceResponse = &providers.ReadResourceResponse{
NewState: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("yes"),
}),
}
args := []string{
"-state", statePath,
"-state-out", outPath,
"-backup", "-",
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
newState := testStateRead(t, statePath)
if !cmp.Equal(state, newState, equateEmpty) {
spew.Config.DisableMethods = true
fmt.Println(cmp.Diff(state, newState, equateEmpty))
t.Fatalf("bad: %s", newState)
}
newState = testStateRead(t, outPath)
actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current
expected := &states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"yes\"\n }"),
Dependencies: []addrs.ConfigResource{},
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected))
}
// Ensure there is no backup
_, err = os.Stat(outPath + DefaultBackupExtension)
if err == nil || !os.IsNotExist(err) {
t.Fatalf("backup should not exist")
}
_, err = os.Stat("-")
if err == nil || !os.IsNotExist(err) {
t.Fatalf("backup should not exist")
}
}
func TestRefresh_displaysOutputs(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh-output"), td)
t.Chdir(td)
state := testState()
statePath := testStateFile(t, state)
p := testProvider()
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
ResourceTypes: map[string]providers.Schema{
"test_instance": {
Body: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
},
},
},
}
args := []string{
"-state", statePath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
// Test that outputs were displayed
outputValue := "foo.example.com"
actual := output.Stdout()
if !strings.Contains(actual, outputValue) {
t.Fatalf("Expected:\n%s\n\nTo include: %q", actual, outputValue)
}
}
// Config with multiple resources, targeting refresh of a subset
func TestRefresh_targeted(t *testing.T) {
td := t.TempDir()
testCopyDir(t, testFixturePath("refresh-targeted"), td)
t.Chdir(td)
state := testState()
statePath := testStateFile(t, state)
p := testProvider()
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
ResourceTypes: map[string]providers.Schema{
"test_instance": {
Body: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
},
},
},
},
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
return providers.PlanResourceChangeResponse{
PlannedState: req.ProposedNewState,
}
}
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
args := []string{
"-target", "test_instance.foo",
"-state", statePath,
}
code := c.Run(args)
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
got := output.Stdout()
if want := "test_instance.foo: Refreshing"; !strings.Contains(got, want) {
t.Fatalf("expected output to contain %q, got:\n%s", want, got)
}
if doNotWant := "test_instance.bar: Refreshing"; strings.Contains(got, doNotWant) {
t.Fatalf("expected output not to contain %q, got:\n%s", doNotWant, got)
}
}
// Diagnostics for invalid -target flags
func TestRefresh_targetFlagsDiags(t *testing.T) {
testCases := map[string]string{
"test_instance.": "Dot must be followed by attribute name.",
"test_instance": "Resource specification must include a resource type and name.",
}
for target, wantDiag := range testCases {
t.Run(target, func(t *testing.T) {
td := testTempDir(t)
defer os.RemoveAll(td)
t.Chdir(td)
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
View: view,
},
}
args := []string{
"-target", target,
}
code := c.Run(args)
output := done(t)
if code != 1 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
got := output.Stderr()
if !strings.Contains(got, target) {
t.Fatalf("bad error output, want %q, got:\n%s", target, got)
}
if !strings.Contains(got, wantDiag) {
t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got)
}
})
}
}
func TestRefresh_warnings(t *testing.T) {
// Create a temporary working directory that is empty
td := t.TempDir()
testCopyDir(t, testFixturePath("apply"), td)
t.Chdir(td)
p := testProvider()
p.GetProviderSchemaResponse = refreshFixtureSchema()
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
return providers.PlanResourceChangeResponse{
PlannedState: req.ProposedNewState,
Diagnostics: tfdiags.Diagnostics{
tfdiags.SimpleWarning("warning 1"),
tfdiags.SimpleWarning("warning 2"),
},
}
}
t.Run("full warnings", func(t *testing.T) {
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
code := c.Run([]string{})
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
wantWarnings := []string{
"warning 1",
"warning 2",
}
for _, want := range wantWarnings {
if !strings.Contains(output.Stdout(), want) {
t.Errorf("missing warning %s", want)
}
}
})
t.Run("compact warnings", func(t *testing.T) {
view, done := testView(t)
c := &RefreshCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(p),
View: view,
},
}
code := c.Run([]string{"-compact-warnings"})
output := done(t)
if code != 0 {
t.Fatalf("bad: %d\n\n%s", code, output.Stderr())
}
// the output should contain 2 warnings and a message about -compact-warnings
wantWarnings := []string{
"warning 1",
"warning 2",
"To see the full warning notes, run Terraform without -compact-warnings.",
}
for _, want := range wantWarnings {
if !strings.Contains(output.Stdout(), want) {
t.Errorf("missing warning %s", want)
}
}
})
}
// configuration in testdata/refresh . This schema should be
// assigned to a mock provider named "test".
func refreshFixtureSchema() *providers.GetProviderSchemaResponse {
return &providers.GetProviderSchemaResponse{
ResourceTypes: map[string]providers.Schema{
"test_instance": {
Body: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
},
},
},
}
}
// refreshVarFixtureSchema returns a schema suitable for processing the
// configuration in testdata/refresh-var . This schema should be
// assigned to a mock provider named "test".
func refreshVarFixtureSchema() *providers.GetProviderSchemaResponse {
return &providers.GetProviderSchemaResponse{
Provider: providers.Schema{
Body: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
},
},
},
ResourceTypes: map[string]providers.Schema{
"test_instance": {
Body: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
},
},
},
},
}
}
const refreshVarFile = `
foo = "bar"
`
const testRefreshStr = `
test_instance.foo:
ID = yes
provider = provider["registry.terraform.io/hashicorp/test"]
`
const testRefreshCwdStr = `
test_instance.foo:
ID = yes
provider = provider["registry.terraform.io/hashicorp/test"]
` | go | github | https://github.com/hashicorp/terraform | internal/command/refresh_test.go |
"""Tests for asyncio/threads.py"""
import asyncio
import unittest
from contextvars import ContextVar
from unittest import mock
def tearDownModule():
asyncio.events._set_event_loop_policy(None)
class ToThreadTests(unittest.IsolatedAsyncioTestCase):
async def test_to_thread(self):
result = await asyncio.to_thread(sum, [40, 2])
self.assertEqual(result, 42)
async def test_to_thread_exception(self):
def raise_runtime():
raise RuntimeError("test")
with self.assertRaisesRegex(RuntimeError, "test"):
await asyncio.to_thread(raise_runtime)
async def test_to_thread_once(self):
func = mock.Mock()
await asyncio.to_thread(func)
func.assert_called_once()
async def test_to_thread_concurrent(self):
calls = []
def func():
calls.append(1)
futs = []
for _ in range(10):
fut = asyncio.to_thread(func)
futs.append(fut)
await asyncio.gather(*futs)
self.assertEqual(sum(calls), 10)
async def test_to_thread_args_kwargs(self):
# Unlike run_in_executor(), to_thread() should directly accept kwargs.
func = mock.Mock()
await asyncio.to_thread(func, 'test', something=True)
func.assert_called_once_with('test', something=True)
async def test_to_thread_contextvars(self):
test_ctx = ContextVar('test_ctx')
def get_ctx():
return test_ctx.get()
test_ctx.set('parrot')
result = await asyncio.to_thread(get_ctx)
self.assertEqual(result, 'parrot')
if __name__ == "__main__":
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_asyncio/test_threads.py |
"""Lattice module.
In this module the lattice of the corresponding accelerator is defined.
"""
import math as _math
from pyaccel import lattice as _pyacc_lat, elements as _pyacc_ele, \
accelerator as _pyacc_acc, optics as _pyacc_opt
from . import segmented_models as _segmented_models
energy = 0.150e9 # [eV]
default_optics_mode = 'M1'
class LatticeError(Exception):
"""LatticeError class."""
def create_lattice(optics_mode=default_optics_mode):
"""Create lattice function."""
strengths, twiss_at_start = get_optics_mode(optics_mode)
# -- shortcut symbols --
marker = _pyacc_ele.marker
drift = _pyacc_ele.drift
quadrupole = _pyacc_ele.quadrupole
rbend_sirius = _pyacc_ele.rbend
sextupole = _pyacc_ele.sextupole
deg_2_rad = _math.pi / 180.0
corr_length = 0.082
# --- drift spaces ---
lp2 = drift('lp2', 0.0002)
lp3 = drift('lp3', 0.0003)
lp4 = drift('lp4', 0.0004)
lp5 = drift('lp5', 0.0005)
lp6 = drift('lp6', 0.0006)
lp7 = drift('lp7', 0.0007)
l1 = drift('l1', 0.001)
l2 = drift('l2', 0.002)
l3 = drift('l3', 0.003)
l4 = drift('l4', 0.004)
l5 = drift('l5', 0.005)
l6 = drift('l6', 0.006)
l7 = drift('l7', 0.007)
l8 = drift('l8', 0.008)
l9 = drift('l9', 0.009)
l10 = drift('l10', 0.010)
l30 = drift('l30', 0.030)
l40 = drift('l40', 0.040)
l60 = drift('l60', 0.060)
l70 = drift('l70', 0.070)
l80 = drift('l80', 0.080)
l90 = drift('l90', 0.090)
l100 = drift('l100', 0.100)
l200 = drift('l200', 0.200)
# --- markers ---
inicio = marker('start')
fim = marker('end')
# --- slits ---
slith = marker('SlitH')
slitv = marker('SlitV')
# --- beam screens ---
scrn = marker('Scrn')
# --- beam current monitors ---
ict = marker('ICT')
fct = marker('FCT')
# --- beam position monitors ---
bpm = marker('BPM')
# --- correctors ---
chv = sextupole('CHV', corr_length, 0.0)
# cv = sextupole('CV', corr_length, 0.0)
# --- quadrupoles ---
qf2L = quadrupole('QF2L', 0.112, strengths['qf2l']) # LINAC TRIPLET
qd2L = quadrupole('QD2L', 0.162, strengths['qd2l']) # LINAC TRIPLET
qf3L = quadrupole('QF3L', 0.112, strengths['qf3l']) # LINAC QUADRUPOLE
# -- spec --
ang = 15.0 # injection mode
dip_nam = 'Spect'
dip_len = 0.45003
dip_ang = -ang * deg_2_rad
dip_K = 0.0
dip_S = 0.00
spech = rbend_sirius(dip_nam, dip_len/2, dip_ang/2,
0, 0,
0, 0, 0, [0, 0, 0], [0, dip_K, dip_S])
spec = [spech, spech]
qd1 = quadrupole('QD1', 0.100, strengths['qd1'])
qf1 = quadrupole('QF1', 0.100, strengths['qf1'])
qd2a = quadrupole('QD2A', 0.100, strengths['qd2a'])
qf2a = quadrupole('QF2A', 0.100, strengths['qf2a'])
qf2b = quadrupole('QF2B', 0.100, strengths['qf2b'])
qd2b = quadrupole('QD2B', 0.100, strengths['qd2b'])
qf3 = quadrupole('QF3', 0.100, strengths['qf3'])
qd3 = quadrupole('QD3', 0.100, strengths['qd3'])
qf4 = quadrupole('QF4', 0.100, strengths['qf4'])
qd4 = quadrupole('QD4', 0.100, strengths['qd4'])
# --- bending magnets ---
bp = _segmented_models.dipole(sign=+1)
bn = _segmented_models.dipole(sign=-1)
# -- bo injection septum --
dip_nam = 'InjSept'
dip_len = 0.50
dip_ang = 21.75 * deg_2_rad
dip_K = 0.0
dip_S = 0.00
septine = rbend_sirius(dip_nam, dip_len/2, dip_ang/2,
1*dip_ang/2, 0*dip_ang,
0, 0, 0, [0, 0, 0], [0, dip_K, dip_S])
septins = rbend_sirius(dip_nam, dip_len/2, dip_ang/2,
0*dip_ang, 1*dip_ang/2,
0, 0, 0, [0, 0, 0], [0, dip_K, dip_S])
bseptin = marker('bInjS')
eseptin = marker('eInjS')
# Excluded ch to make it consistent with other codes.
# The corrector can be implemented in the polynomB:
septin = [bseptin, septine, septins, eseptin]
# --- lines ---
s00_1 = [l80, l4, qf2L, l30, l8, qd2L, l30, l8, qf2L, l30, l8, qf3L]
s00_2 = [l80, l7, bpm, l200, l40, l6, ict, l200, l100, l90, l5]
s01_1 = [
l200, l200, l200, l80, l4, lp2, scrn, l100, l40, lp2, bpm,
l100, l2, lp4]
s01_2 = [l80, l8, lp4, chv, l200, l90, l1, lp2]
s01_3 = [
l200, l200, l200, l200, l200, l40, l4, slith, l100, l80, scrn,
l100, l40, bpm, l100, l90, l9, chv, l100, l90, l3, lp3, slitv,
l200, l10, lp4]
s02_1 = [l100, l90, l4, lp4, ict, l200, l200, l200, l10, l6]
s02_2 = [l200, l70]
s02_3 = [
l200, scrn, l100, l40, bpm, l60, l9, chv] + [l200]*26 + \
[l100, l70, l3]
s02_4 = [l200, l70]
s02_5 = [
l200, scrn, l100, l40, bpm, l60, l8, lp5, chv, l200, l100,
l10, l9, lp7]
s03_1 = [l200] * 10 + [l100, l90, l9, lp6]
s03_2 = [l200, l6]
s03_3 = [l100, bpm, l100, l40, l4, scrn, l200, l10, lp4]
s04_1 = [
l200, l70, l2, lp4, chv, l200, l200, l100, l80, lp5, fct,
l100, l40, ict, l200, l100, l5, lp7, bpm, l100, l10, l5, lp6]
s04_2 = [l200, l10, l6]
s04_3 = [l100, l70, scrn, l60, l1, lp2, chv, l80, l6, lp6]
sector00 = [s00_1, s00_2, spec]
sector01 = [s01_1, qd1, s01_2, qf1, s01_3, bn]
sector02 = [s02_1, qd2a, s02_2, qf2a, s02_3, qf2b, s02_4, qd2b, s02_5, bp]
sector03 = [s03_1, qf3, s03_2, qd3, s03_3, bp]
sector04 = [s04_1, qf4, s04_2, qd4, s04_3, septin]
# TB beamline
ltlb = [inicio, sector00, sector01, sector02, sector03, sector04, fim]
elist = ltlb
the_line = _pyacc_lat.build(elist)
# --- shifts model to marker 'start' ---
idx = _pyacc_lat.find_indices(the_line, 'fam_name', 'start')
the_line = _pyacc_lat.shift(the_line, idx[0])
lengths = _pyacc_lat.get_attribute(the_line, 'length')
for length in lengths:
if length < 0:
raise LatticeError('Model with negative drift!')
# sets number of integration steps
set_num_integ_steps(the_line)
# -- define vacuum chamber for all elements
the_line = set_vacuum_chamber(the_line)
return the_line, twiss_at_start
def get_optics_mode(optics_mode):
"""Return magnet strengths of a given opics mode."""
# -- selection of optics mode --
if optics_mode == 'M1':
# Initial Conditions from Linac measured parameters on 16/07/2019
# Linac second quadrupole triplet set to same values used during
# measurements (Sem tripleto)
twiss_at_start = _pyacc_opt.Twiss.make_new(
beta=[2.71462, 4.69925], alpha=[-2.34174, 1.04009],
etax=[0.0, 0.0])
strengths = {
'qf2l': 12.37,
'qd2l': -14.85,
'qf3l': 5.713160289024,
'qd1': -8.821809143987,
'qf1': 13.335946597802,
'qd2a': -11.859318300947,
'qf2a': 14.532892396682,
'qf2b': 8.647545577362,
'qd2b': -8.836916532517,
'qf3': 10.020651462368,
'qd3': -4.974049498621,
'qf4': 11.168208453391,
'qd4': -6.191738912262,
}
elif optics_mode == 'M2':
# Initial Conditions from Linac measured parameters on 16/07/2019
# Linac second quadrupole triplet is used to match the LBT optics
# (Sem tripleto)
twiss_at_start = _pyacc_opt.Twiss.make_new(
beta=[2.71462, 4.69925], alpha=[-2.34174, 1.04009],
etax=[0.0, 0.0])
strengths = {
'qf2L': 11.78860,
'qd2L': -14.298290,
'qf3L': 4.801910,
'qd1': -8.822256368219,
'qf1': 13.336060990905,
'qd2a': -9.382785447106,
'qf2a': 12.670391768958,
'qf2b': 7.994238513566,
'qd2b': -7.118805773505,
'qf3': 10.328752039153,
'qd3': -5.519539215470,
'qf4': 11.635406805193,
'qd4': -6.936225524796,
}
else:
_pyacc_acc.AcceleratorException(
'Invalid TB optics mode: ' + optics_mode)
return strengths, twiss_at_start
def set_num_integ_steps(the_line):
"""Set number of integration steps in each lattice element."""
dl = 0.035
for i, _ in enumerate(the_line):
if the_line[i].angle:
length = the_line[i].length
the_line[i].nr_steps = max(10, int(_math.ceil(length/dl)))
elif the_line[i].polynom_b[1]:
the_line[i].nr_steps = 10
elif the_line[i].polynom_b[2]:
the_line[i].nr_steps = 10
else:
the_line[i].nr_steps = 1
ch_indices = _pyacc_lat.find_indices(the_line, 'fam_name', 'CHV')
cv_indices = _pyacc_lat.find_indices(the_line, 'fam_name', 'CHV')
corr_indices = ch_indices + cv_indices
for idx in corr_indices:
the_line[idx].nr_steps = 5
def set_vacuum_chamber(the_line):
"""Set vacuum chamber for all elements."""
# -- default physical apertures --
for i, _ in enumerate(the_line):
the_line[i].hmin = -0.018
the_line[i].hmax = +0.018
the_line[i].vmin = -0.018
the_line[i].vmax = +0.018
# -- bo injection septum --
beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bInjS')[0]
end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eInjS')[0]
for i in range(beg, end+1):
the_line[i].hmin = -0.0075
the_line[i].hmax = +0.0075
the_line[i].vmin = -0.0080
the_line[i].vmax = +0.0080
# -- dipoles --
bnd = _pyacc_lat.find_indices(the_line, 'fam_name', 'B')
for i in bnd:
the_line[i].hmin = -0.0117
the_line[i].hmax = +0.0117
the_line[i].vmin = -0.0117
the_line[i].vmax = +0.0117
return the_line | unknown | codeparrot/codeparrot-clean | ||
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import crypt
import glob
import hashlib
import itertools
import json
import ntpath
import os.path
import re
import string
import sys
import uuid
from datetime import datetime
from functools import partial
from random import Random, SystemRandom, shuffle
import yaml
from jinja2.filters import environmentfilter, do_groupby as _do_groupby
try:
import passlib.hash
HAS_PASSLIB = True
except:
HAS_PASSLIB = False
from ansible import errors
from ansible.module_utils.six import iteritems, string_types, integer_types
from ansible.module_utils.six.moves import reduce, shlex_quote
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap
from ansible.utils.vars import merge_hash
from ansible.vars.hostvars import HostVars
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
class AnsibleJSONEncoder(json.JSONEncoder):
'''
Simple encoder class to deal with JSON encoding of internal
types like HostVars
'''
def default(self, o):
if isinstance(o, HostVars):
return dict(o)
else:
return super(AnsibleJSONEncoder, self).default(o)
def to_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw)
return to_text(transformed)
def to_nice_yaml(a, indent=4, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
return to_text(transformed)
def to_json(a, *args, **kw):
''' Convert the value to JSON '''
return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
def to_nice_json(a, indent=4, *args, **kw):
'''Make verbose, human readable JSON'''
# python-2.6's json encoder is buggy (can't encode hostvars)
if sys.version_info < (2, 7):
try:
import simplejson
except ImportError:
pass
else:
try:
major = int(simplejson.__version__.split('.')[0])
except:
pass
else:
if major >= 2:
return simplejson.dumps(a, indent=indent, sort_keys=True, *args, **kw)
try:
return json.dumps(a, indent=indent, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
except:
# Fallback to the to_json filter
return to_json(a, *args, **kw)
def to_bool(a):
''' return a bool for the arg '''
if a is None or type(a) == bool:
return a
if isinstance(a, string_types):
a = a.lower()
if a in ['yes', 'on', '1', 'true', 1]:
return True
else:
return False
def to_datetime(string, format="%Y-%d-%m %H:%M:%S"):
return datetime.strptime(string, format)
def quote(a):
''' return its argument quoted for shell usage '''
return shlex_quote(a)
def fileglob(pathname):
''' return list of matched regular files for glob '''
return [ g for g in glob.glob(pathname) if os.path.isfile(g) ]
def regex_replace(value='', pattern='', replacement='', ignorecase=False):
''' Perform a `re.sub` returning a string '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def regex_findall(value, regex, multiline=False, ignorecase=False):
''' Perform re.findall and return the list of matches '''
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def regex_search(value, regex, *args, **kwargs):
''' Perform re.search and return the list of matches or a backref '''
groups = list()
for arg in args:
if arg.startswith('\\g'):
match = re.match(r'\\g<(\S+)>', arg).group(1)
groups.append(match)
elif arg.startswith('\\'):
match = int(re.match(r'\\(\d+)', arg).group(1))
groups.append(match)
else:
raise errors.AnsibleFilterError('Unknown argument')
flags = 0
if kwargs.get('ignorecase'):
flags |= re.I
if kwargs.get('multiline'):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def ternary(value, true_val, false_val):
''' value ? true_val : false_val '''
if value:
return true_val
else:
return false_val
def regex_escape(string):
'''Escape all regular expressions special characters from STRING.'''
return re.escape(string)
def from_yaml(data):
if isinstance(data, string_types):
return yaml.safe_load(data)
return data
@environmentfilter
def rand(environment, end, start=None, step=None, seed=None):
if seed is None:
r = SystemRandom()
else:
r = Random(seed)
if isinstance(end, integer_types):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise errors.AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise errors.AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try: # see if hash is supported
h = hashlib.new(hashtype)
except:
return None
h.update(to_bytes(data, errors='surrogate_then_strict'))
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None):
# TODO: find a way to construct dynamically from system
cryptmethod= {
'md5': '1',
'blowfish': '2a',
'sha256': '5',
'sha512': '6',
}
if hashtype in cryptmethod:
if salt is None:
r = SystemRandom()
if hashtype in ['md5']:
saltsize = 8
else:
saltsize = 16
salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(saltsize)])
if not HAS_PASSLIB:
if sys.platform.startswith('darwin'):
raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin')
saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
encrypted = crypt.crypt(password, saltstring)
else:
if hashtype == 'blowfish':
cls = passlib.hash.bcrypt
else:
cls = getattr(passlib.hash, '%s_crypt' % hashtype)
encrypted = cls.encrypt(password, salt=salt)
return encrypted
return None
def to_uuid(string):
return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
def mandatory(a):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
raise errors.AnsibleFilterError('Mandatory variable not defined.')
return a
def combine(*terms, **kwargs):
recursive = kwargs.get('recursive', False)
if len(kwargs) > 1 or (len(kwargs) == 1 and 'recursive' not in kwargs):
raise errors.AnsibleFilterError("'recursive' is the only valid keyword argument")
for t in terms:
if not isinstance(t, dict):
raise errors.AnsibleFilterError("|combine expects dictionaries, got " + repr(t))
if recursive:
return reduce(merge_hash, terms)
else:
return dict(itertools.chain(*map(iteritems, terms)))
def comment(text, style='plain', **kw):
# Predefined comment types
comment_styles = {
'plain': {
'decoration': '# '
},
'erlang': {
'decoration': '% '
},
'c': {
'decoration': '// '
},
'cblock': {
'beginning': '/*',
'decoration': ' * ',
'end': ' */'
},
'xml': {
'beginning': '<!--',
'decoration': ' - ',
'end': '-->'
}
}
# Pointer to the right comment type
style_params = comment_styles[style]
if 'decoration' in kw:
prepostfix = kw['decoration']
else:
prepostfix = style_params['decoration']
# Default params
p = {
'newline': '\n',
'beginning': '',
'prefix': (prepostfix).rstrip(),
'prefix_count': 1,
'decoration': '',
'postfix': (prepostfix).rstrip(),
'postfix_count': 1,
'end': ''
}
# Update default params
p.update(style_params)
p.update(kw)
# Compose substrings for the final string
str_beginning = ''
if p['beginning']:
str_beginning = "%s%s" % (p['beginning'], p['newline'])
str_prefix = ''
if p['prefix']:
if p['prefix'] != p['newline']:
str_prefix = str(
"%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
else:
str_prefix = str(
"%s" % (p['newline'])) * int(p['prefix_count'])
str_text = ("%s%s" % (
p['decoration'],
# Prepend each line of the text with the decorator
text.replace(
p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
# Remove trailing spaces when only decorator is on the line
"%s%s" % (p['decoration'], p['newline']),
"%s%s" % (p['decoration'].rstrip(), p['newline']))
str_postfix = p['newline'].join(
[''] + [p['postfix'] for x in range(p['postfix_count'])])
str_end = ''
if p['end']:
str_end = "%s%s" % (p['newline'], p['end'])
# Return the final string
return "%s%s%s%s%s" % (
str_beginning,
str_prefix,
str_text,
str_postfix,
str_end)
def extract(item, container, morekeys=None):
from jinja2.runtime import Undefined
value = container[item]
if value is not Undefined and morekeys is not None:
if not isinstance(morekeys, list):
morekeys = [morekeys]
try:
value = reduce(lambda d, k: d[k], morekeys, value)
except KeyError:
value = Undefined()
return value
def failed(*a, **kw):
''' Test if task result yields failed '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|failed expects a dictionary")
rc = item.get('rc',0)
failed = item.get('failed',False)
if rc != 0 or failed:
return True
else:
return False
def success(*a, **kw):
''' Test if task result yields success '''
return not failed(*a, **kw)
def changed(*a, **kw):
''' Test if task result yields changed '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|changed expects a dictionary")
if not 'changed' in item:
changed = False
if ('results' in item # some modules return a 'results' key
and type(item['results']) == list
and type(item['results'][0]) == dict):
for result in item['results']:
changed = changed or result.get('changed', False)
else:
changed = item.get('changed', False)
return changed
def skipped(*a, **kw):
''' Test if task result yields skipped '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|skipped expects a dictionary")
skipped = item.get('skipped', False)
return skipped
@environmentfilter
def do_groupby(environment, value, attribute):
"""Overridden groupby filter for jinja2, to address an issue with
jinja2>=2.9.0,<2.9.5 where a namedtuple was returned which
has repr that prevents ansible.template.safe_eval.safe_eval from being
able to parse and eval the data.
jinja2<2.9.0,>=2.9.5 is not affected, as <2.9.0 uses a tuple, and
>=2.9.5 uses a standard tuple repr on the namedtuple.
The adaptation here, is to run the jinja2 `do_groupby` function, and
cast all of the namedtuples to a regular tuple.
See https://github.com/ansible/ansible/issues/20098
We may be able to remove this in the future.
"""
return [tuple(t) for t in _do_groupby(environment, value, attribute)]
def b64encode(string):
return to_text(base64.b64encode(to_bytes(string, errors='surrogate_then_strict')))
def b64decode(string):
return to_text(base64.b64decode(to_bytes(string, errors='surrogate_then_strict')))
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
# jinja2 overrides
'groupby': do_groupby,
# base 64
'b64decode': b64decode,
'b64encode': b64encode,
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': from_yaml,
#date
'to_datetime': to_datetime,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
# value as boolean
'bool': to_bool,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digeset of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksuming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# file glob
'fileglob': fileglob,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
'regex_search': regex_search,
'regex_findall': regex_findall,
# ? : ;
'ternary': ternary,
# list
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# merge dicts
'combine': combine,
# comment-style decoration
'comment': comment,
# array and dict lookups
'extract': extract,
# failure testing
'failed' : failed,
'failure' : failed,
'success' : success,
'succeeded' : success,
# changed testing
'changed' : changed,
'change' : changed,
# skip testing
'skipped' : skipped,
'skip' : skipped,
# debug
'type_debug': lambda o: o.__class__.__name__,
} | unknown | codeparrot/codeparrot-clean | ||
function genLink(Ref) {
// we treat the file paths different depending on if we're
// serving via a http server or viewing from a local
var Path = window.location.protocol.startsWith("file")
? `${window.location.protocol}//${RootPath}/${Ref.Path}`
: `${window.location.protocol}//${window.location.host}/${
Base}/${Ref.Path}`;
if (Ref.RefType === "namespace") {
Path = `${Path}/index.html`
} else if (Ref.Path === "") {
Path = `${Path}${Ref.Name}.html`;
} else {
Path = `${Path}/${Ref.Name}.html`;
}
ANode = document.createElement("a");
ANode.setAttribute("href", Path);
var TextNode = document.createTextNode(Ref.Name);
ANode.appendChild(TextNode);
return ANode;
}
function genHTMLOfIndex(Index, CurrentDirectory, IsOutermostList) {
// Out will store the HTML elements that Index requires to be generated
var Out = [];
if (Index.Name) {
var SpanNode = document.createElement("span");
var TextNode = document.createTextNode(Index.Name);
SpanNode.appendChild(genLink(Index, CurrentDirectory));
Out.push(SpanNode);
}
if (Index.Children.length == 0)
return Out;
// Only the outermost list should use ol, the others should use ul
var ListNodeName = IsOutermostList ? "ol" : "ul";
var ListNode = document.createElement(ListNodeName);
for (Child of Index.Children) {
var LiNode = document.createElement("li");
ChildNodes = genHTMLOfIndex(Child, CurrentDirectory, false);
for (Node of ChildNodes)
LiNode.appendChild(Node);
ListNode.appendChild(LiNode);
}
Out.push(ListNode);
return Out;
}
function createIndex(Index) {
// Get the DOM element where the index will be created
var IndexDiv = document.getElementById("sidebar-left");
// Get the relative path of this file
CurrentDirectory = IndexDiv.getAttribute("path");
var IndexNodes = genHTMLOfIndex(Index, CurrentDirectory, true);
for (Node of IndexNodes)
IndexDiv.appendChild(Node);
}
// Runs after DOM loads
document.addEventListener("DOMContentLoaded", function() {
// LoadIndex is an asynchronous function that will be generated clang-doc.
// It ensures that the function call will not block as soon the page loads,
// since the index object are often huge and can contain thousands of lines.
LoadIndex().then((Index) => { createIndex(Index); });
}); | javascript | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-doc/assets/index.js |
import os
import glob
import logging
import pprint
from virtualenvapi.manage import VirtualEnvironment
import virtualenvapi.exceptions as ve
from pyp2rpm.exceptions import VirtualenvFailException
from pyp2rpm.settings import DEFAULT_PYTHON_VERSION, MODULE_SUFFIXES
logger = logging.getLogger(__name__)
def site_packages_filter(site_packages_list):
'''Removes wheel .dist-info files'''
return set([x for x in site_packages_list if not x.endswith(
('dist-info', '.pth'))])
def scripts_filter(scripts):
'''
Removes .pyc files and __pycache__ from scripts
'''
return [x for x in scripts if not x.split('.')[-1] == 'pyc' and
not x == '__pycache__']
class DirsContent(object):
'''
Object to store and compare directory content before and
after instalation of package.
'''
def __init__(self, bindir=None, lib_sitepackages=None):
self.bindir = bindir
self.lib_sitepackages = lib_sitepackages
def fill(self, path):
'''
Scans content of directories
'''
self.bindir = set(os.listdir(path + 'bin/'))
self.lib_sitepackages = set(os.listdir(glob.glob(
path + 'lib/python?.?/site-packages/')[0]))
def __sub__(self, other):
'''
Makes differance of DirsContents objects attributes
'''
if any([self.bindir is None, self.lib_sitepackages is None,
other.bindir is None, other.lib_sitepackages is None]):
raise ValueError("Some of the attributes is uninicialized")
result = DirsContent(
self.bindir - other.bindir,
self.lib_sitepackages - other.lib_sitepackages)
return result
class VirtualEnv(object):
def __init__(self, name, temp_dir, name_convertor, base_python_version):
self.name = name
self.temp_dir = temp_dir
self.name_convertor = name_convertor
if not base_python_version:
base_python_version = DEFAULT_PYTHON_VERSION
python_version = 'python' + base_python_version
self.env = VirtualEnvironment(temp_dir + '/venv',
python=python_version)
try:
self.env.open_or_create()
except (ve.VirtualenvCreationException,
ve.VirtualenvReadonlyException):
raise VirtualenvFailException('Failed to create virtualenv')
self.dirs_before_install = DirsContent()
self.dirs_after_install = DirsContent()
self.dirs_before_install.fill(temp_dir + '/venv/')
self.data = {}
def install_package_to_venv(self):
'''
Installs package given as first argument to virtualenv without
dependencies
'''
try:
self.env.install(self.name, force=True, options=["--no-deps"])
except (ve.PackageInstallationException,
ve.VirtualenvReadonlyException):
raise VirtualenvFailException(
'Failed to install package to virtualenv')
self.dirs_after_install.fill(self.temp_dir + '/venv/')
def get_dirs_differance(self):
'''
Makes final versions of site_packages and scripts using DirsContent
sub method and filters
'''
try:
diff = self.dirs_after_install - self.dirs_before_install
except ValueError:
raise VirtualenvFailException(
"Some of the DirsContent attributes is uninicialized")
self.data['has_pth'] = \
any([x for x in diff.lib_sitepackages if x.endswith('.pth')])
site_packages = site_packages_filter(diff.lib_sitepackages)
self.data['packages'] = sorted(
[p for p in site_packages if not p.endswith(MODULE_SUFFIXES)])
self.data['py_modules'] = sorted(set(
[os.path.splitext(m)[0] for m in site_packages - set(
self.data['packages'])]))
self.data['scripts'] = scripts_filter(sorted(diff.bindir))
logger.debug('Data from files differance in virtualenv:')
logger.debug(pprint.pformat(self.data))
@property
def get_venv_data(self):
self.install_package_to_venv()
self.get_dirs_differance()
return self.data | unknown | codeparrot/codeparrot-clean | ||
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "PreferIsaOrDynCastInConditionalsCheck.h"
#include "clang/AST/ASTContext.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/Lex/Lexer.h"
#include "llvm/Support/FormatVariadic.h"
using namespace clang::ast_matchers;
namespace clang::tidy::llvm_check {
namespace {
AST_MATCHER(Expr, isMacroID) { return Node.getExprLoc().isMacroID(); }
} // namespace
void PreferIsaOrDynCastInConditionalsCheck::registerMatchers(
MatchFinder *Finder) {
auto AnyCalleeName = [](ArrayRef<StringRef> CalleeName) {
return allOf(unless(isMacroID()), unless(cxxMemberCallExpr()),
callee(expr(ignoringImpCasts(
declRefExpr(to(namedDecl(hasAnyName(CalleeName))),
hasAnyTemplateArgumentLoc(anything()))
.bind("callee")))));
};
auto CondExpr = hasCondition(implicitCastExpr(
has(callExpr(AnyCalleeName({"cast", "dyn_cast"})).bind("cond"))));
auto CondExprOrCondVar =
anyOf(hasConditionVariableStatement(containsDeclaration(
0, varDecl(hasInitializer(callExpr(AnyCalleeName({"cast"}))))
.bind("var"))),
CondExpr);
auto CallWithBindedArg =
callExpr(
AnyCalleeName(
{"isa", "cast", "cast_or_null", "dyn_cast", "dyn_cast_or_null"}),
hasArgument(0, mapAnyOf(declRefExpr, cxxMemberCallExpr).bind("arg")))
.bind("rhs");
Finder->addMatcher(ifStmt(CondExprOrCondVar), this);
Finder->addMatcher(forStmt(CondExprOrCondVar), this);
Finder->addMatcher(whileStmt(CondExprOrCondVar), this);
Finder->addMatcher(doStmt(CondExpr), this);
Finder->addMatcher(binaryOperator(hasRHS(ignoringImpCasts(CallWithBindedArg)),
hasLHS(implicitCastExpr().bind("lhs")),
hasOperatorName("&&"),
unless(isExpansionInFileMatching(
"llvm/include/llvm/Support/Casting.h")))
.bind("and"),
this);
}
void PreferIsaOrDynCastInConditionalsCheck::check(
const MatchFinder::MatchResult &Result) {
const auto *Callee = Result.Nodes.getNodeAs<DeclRefExpr>("callee");
assert(Callee && "Callee should be binded if anything is matched");
// The first and last letter of the identifier
// llvm::cast<T>(x)
// ^ ^
// StartLoc EndLoc
const SourceLocation StartLoc = Callee->getLocation();
const SourceLocation EndLoc = Callee->getNameInfo().getEndLoc();
if (Result.Nodes.getNodeAs<VarDecl>("var")) {
diag(StartLoc,
"cast<> in conditional will assert rather than return a null pointer")
<< FixItHint::CreateReplacement(SourceRange(StartLoc, EndLoc),
"dyn_cast");
} else if (Result.Nodes.getNodeAs<CallExpr>("cond")) {
StringRef Message =
"cast<> in conditional will assert rather than return a null pointer";
if (Callee->getDecl()->getName() == "dyn_cast")
Message = "return value from dyn_cast<> not used";
diag(StartLoc, Message)
<< FixItHint::CreateReplacement(SourceRange(StartLoc, EndLoc), "isa");
} else if (Result.Nodes.getNodeAs<BinaryOperator>("and")) {
const auto *LHS = Result.Nodes.getNodeAs<ImplicitCastExpr>("lhs");
const auto *RHS = Result.Nodes.getNodeAs<CallExpr>("rhs");
const auto *Arg = Result.Nodes.getNodeAs<Expr>("arg");
assert(LHS && "LHS is null");
assert(RHS && "RHS is null");
assert(Arg && "Arg is null");
auto GetText = [&](SourceRange R) {
return Lexer::getSourceText(CharSourceRange::getTokenRange(R),
*Result.SourceManager, getLangOpts());
};
const StringRef LHSString = GetText(LHS->getSourceRange());
const StringRef ArgString = GetText(Arg->getSourceRange());
if (ArgString != LHSString)
return;
// It is not clear which is preferred between `isa_and_nonnull` and
// `isa_and_present`. See
// https://discourse.llvm.org/t/psa-swapping-out-or-null-with-if-present/65018
const std::string Replacement = llvm::formatv(
"{}isa_and_nonnull{}",
GetText(Callee->getQualifierLoc().getSourceRange()),
GetText(SourceRange(Callee->getLAngleLoc(), RHS->getEndLoc())));
diag(LHS->getBeginLoc(),
"isa_and_nonnull<> is preferred over an explicit test for null "
"followed by calling isa<>")
<< FixItHint::CreateReplacement(
SourceRange(LHS->getBeginLoc(), RHS->getEndLoc()), Replacement);
} else {
llvm_unreachable(
R"(One of "var", "cond" and "and" should be binded if anything is matched)");
}
}
} // namespace clang::tidy::llvm_check | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/llvm/PreferIsaOrDynCastInConditionalsCheck.cpp |
"""Common credentials classes and constructors."""
from __future__ import print_function
import datetime
import json
import os
import urllib
import urllib2
import httplib2
import oauth2client
import oauth2client.client
import oauth2client.gce
import oauth2client.locked_file
import oauth2client.multistore_file
import oauth2client.service_account
import oauth2client.tools # for flag declarations
from six.moves import http_client
import logging
from googlecloudapis.apitools.base.py import exceptions
from googlecloudapis.apitools.base.py import util
try:
from google.apputils import flags # pylint: disable=g-import-not-at-top
FLAGS = flags.FLAGS
except ImportError:
FLAGS = None
__all__ = [
'CredentialsFromFile',
'GaeAssertionCredentials',
'GceAssertionCredentials',
'GetCredentials',
'GetUserinfo',
'ServiceAccountCredentials',
'ServiceAccountCredentialsFromFile',
]
# TODO(craigcitro): Expose the extra args here somewhere higher up,
# possibly as flags in the generated CLI.
def GetCredentials(package_name, scopes, client_id, client_secret, user_agent,
credentials_filename=None,
service_account_name=None, service_account_keyfile=None,
service_account_json_keyfile=None,
api_key=None, client=None):
"""Attempt to get credentials, using an oauth dance as the last resort."""
scopes = util.NormalizeScopes(scopes)
if ((service_account_name and not service_account_keyfile) or
(service_account_keyfile and not service_account_name)):
raise exceptions.CredentialsError(
'Service account name or keyfile provided without the other')
# TODO(craigcitro): Error checking.
client_info = {
'client_id': client_id,
'client_secret': client_secret,
'scope': ' '.join(sorted(util.NormalizeScopes(scopes))),
'user_agent': user_agent or '%s-generated/0.1' % package_name,
}
service_account_kwargs = {
'user_agent': client_info['user_agent'],
}
if service_account_json_keyfile:
with open(service_account_json_keyfile) as keyfile:
service_account_info = json.load(keyfile)
if service_account_info.get('type') != oauth2client.client.SERVICE_ACCOUNT:
raise exceptions.CredentialsError(
'Invalid service account credentials: %s' % (
service_account_json_keyfile,))
credentials = oauth2client.service_account._ServiceAccountCredentials( # pylint: disable=protected-access
service_account_id=service_account_info['client_id'],
service_account_email=service_account_info['client_email'],
private_key_id=service_account_info['private_key_id'],
private_key_pkcs8_text=service_account_info['private_key'],
scopes=scopes,
**service_account_kwargs)
return credentials
if service_account_name:
credentials = ServiceAccountCredentialsFromFile(
service_account_name, service_account_keyfile, scopes,
service_account_kwargs=service_account_kwargs)
if credentials is not None:
return credentials
credentials = GaeAssertionCredentials.Get(scopes)
if credentials is not None:
return credentials
credentials = GceAssertionCredentials.Get(scopes)
if credentials is not None:
return credentials
credentials_filename = credentials_filename or os.path.expanduser(
'~/.apitools.token')
credentials = CredentialsFromFile(credentials_filename, client_info)
if credentials is not None:
return credentials
raise exceptions.CredentialsError('Could not create valid credentials')
def ServiceAccountCredentialsFromFile(
service_account_name, private_key_filename, scopes,
service_account_kwargs=None):
with open(private_key_filename) as key_file:
return ServiceAccountCredentials(
service_account_name, key_file.read(), scopes,
service_account_kwargs=service_account_kwargs)
def ServiceAccountCredentials(service_account_name, private_key, scopes,
service_account_kwargs=None):
service_account_kwargs = service_account_kwargs or {}
scopes = util.NormalizeScopes(scopes)
return oauth2client.client.SignedJwtAssertionCredentials(
service_account_name, private_key, scopes, **service_account_kwargs)
def _EnsureFileExists(filename):
"""Touches a file; returns False on error, True on success."""
if not os.path.exists(filename):
old_umask = os.umask(0o177)
try:
open(filename, 'a+b').close()
except OSError:
return False
finally:
os.umask(old_umask)
return True
def _OpenNoProxy(request):
"""Wrapper around urllib2.open that ignores proxies."""
opener = urllib2.build_opener(urllib2.ProxyHandler({}))
return opener.open(request)
# TODO(craigcitro): We override to add some utility code, and to
# update the old refresh implementation. Push this code into
# oauth2client.
class GceAssertionCredentials(oauth2client.gce.AppAssertionCredentials):
"""Assertion credentials for GCE instances."""
def __init__(self, scopes=None, service_account_name='default', **kwds):
"""Initializes the credentials instance.
Args:
scopes: The scopes to get. If None, whatever scopes that are available
to the instance are used.
service_account_name: The service account to retrieve the scopes from.
**kwds: Additional keyword args.
"""
# If there is a connectivity issue with the metadata server,
# detection calls may fail even if we've already successfully identified
# these scopes in the same execution. However, the available scopes don't
# change once an instance is created, so there is no reason to perform
# more than one query.
#
# TODO(craigcitro): Move this into oauth2client.
self.__service_account_name = service_account_name
cache_filename = None
cached_scopes = None
if 'cache_filename' in kwds:
cache_filename = kwds['cache_filename']
cached_scopes = self._CheckCacheFileForMatch(cache_filename, scopes)
scopes = cached_scopes or self._ScopesFromMetadataServer(scopes)
if cache_filename and not cached_scopes:
self._WriteCacheFile(cache_filename, scopes)
super(GceAssertionCredentials, self).__init__(scopes, **kwds)
@classmethod
def Get(cls, *args, **kwds):
try:
return cls(*args, **kwds)
except exceptions.Error:
return None
def _CheckCacheFileForMatch(self, cache_filename, scopes):
"""Checks the cache file to see if it matches the given credentials.
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
Returns:
List of scopes (if cache matches) or None.
"""
creds = { # Credentials metadata dict.
'scopes': sorted(list(scopes)) if scopes else None,
'svc_acct_name': self.__service_account_name}
if _EnsureFileExists(cache_filename):
locked_file = oauth2client.locked_file.LockedFile(
cache_filename, 'r+b', 'rb')
try:
locked_file.open_and_lock()
cached_creds_str = locked_file.file_handle().read()
if cached_creds_str:
# Cached credentials metadata dict.
cached_creds = json.loads(cached_creds_str)
if (creds['svc_acct_name'] == cached_creds['svc_acct_name'] and
(creds['scopes'] is None or
creds['scopes'] == cached_creds['scopes'])):
scopes = cached_creds['scopes']
finally:
locked_file.unlock_and_close()
return scopes
def _WriteCacheFile(self, cache_filename, scopes):
"""Writes the credential metadata to the cache file.
This does not save the credentials themselves (CredentialStore class
optionally handles that after this class is initialized).
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
"""
if _EnsureFileExists(cache_filename):
locked_file = oauth2client.locked_file.LockedFile(
cache_filename, 'r+b', 'rb')
try:
locked_file.open_and_lock()
if locked_file.is_locked():
creds = { # Credentials metadata dict.
'scopes': sorted(list(scopes)),
'svc_acct_name': self.__service_account_name}
locked_file.file_handle().write(json.dumps(creds, encoding='ascii'))
# If it's not locked, the locking process will write the same
# data to the file, so just continue.
finally:
locked_file.unlock_and_close()
def _ScopesFromMetadataServer(self, scopes):
if not util.DetectGce():
raise exceptions.ResourceUnavailableError(
'GCE credentials requested outside a GCE instance')
if not self.GetServiceAccount(self.__service_account_name):
raise exceptions.ResourceUnavailableError(
'GCE credentials requested but service account %s does not exist.' %
self.__service_account_name)
if scopes:
scope_ls = util.NormalizeScopes(scopes)
instance_scopes = self.GetInstanceScopes()
if scope_ls > instance_scopes:
raise exceptions.CredentialsError(
'Instance did not have access to scopes %s' % (
sorted(list(scope_ls - instance_scopes)),))
else:
scopes = self.GetInstanceScopes()
return scopes
def GetServiceAccount(self, account):
account_uri = (
'http://metadata.google.internal/computeMetadata/'
'v1/instance/service-accounts')
additional_headers = {'X-Google-Metadata-Request': 'True'}
request = urllib2.Request(account_uri, headers=additional_headers)
try:
response = _OpenNoProxy(request)
except urllib2.URLError as e:
raise exceptions.CommunicationError(
'Could not reach metadata service: %s' % e.reason)
response_lines = [line.rstrip('/\n\r') for line in response.readlines()]
return account in response_lines
def GetInstanceScopes(self):
# Extra header requirement can be found here:
# https://developers.google.com/compute/docs/metadata
scopes_uri = (
'http://metadata.google.internal/computeMetadata/v1/instance/'
'service-accounts/%s/scopes') % self.__service_account_name
additional_headers = {'X-Google-Metadata-Request': 'True'}
request = urllib2.Request(scopes_uri, headers=additional_headers)
try:
response = _OpenNoProxy(request)
except urllib2.URLError as e:
raise exceptions.CommunicationError(
'Could not reach metadata service: %s' % e.reason)
return util.NormalizeScopes(scope.strip() for scope in response.readlines())
def _refresh(self, do_request): # pylint: disable=g-bad-name
"""Refresh self.access_token.
This function replaces AppAssertionCredentials._refresh, which does not use
the credential store and is therefore poorly suited for multi-threaded
scenarios.
Args:
do_request: A function matching httplib2.Http.request's signature.
"""
# pylint: disable=protected-access
oauth2client.client.OAuth2Credentials._refresh(self, do_request)
# pylint: enable=protected-access
def _do_refresh_request(self, unused_http_request):
"""Refresh self.access_token by querying the metadata server.
If self.store is initialized, store acquired credentials there.
"""
token_uri = (
'http://metadata.google.internal/computeMetadata/v1/instance/'
'service-accounts/%s/token') % self.__service_account_name
extra_headers = {'X-Google-Metadata-Request': 'True'}
request = urllib2.Request(token_uri, headers=extra_headers)
try:
content = _OpenNoProxy(request).read()
except urllib2.URLError as e:
self.invalid = True
if self.store:
self.store.locked_put(self)
raise exceptions.CommunicationError(
'Could not reach metadata service: %s' % e.reason)
try:
credential_info = json.loads(content)
except ValueError:
raise exceptions.CredentialsError(
'Invalid credentials response: uri %s' % token_uri)
self.access_token = credential_info['access_token']
if 'expires_in' in credential_info:
self.token_expiry = (
datetime.timedelta(seconds=int(credential_info['expires_in'])) +
datetime.datetime.utcnow())
else:
self.token_expiry = None
self.invalid = False
if self.store:
self.store.locked_put(self)
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
credentials = GceAssertionCredentials(scopes=[data['scope']])
if 'access_token' in data:
credentials.access_token = data['access_token']
if 'token_expiry' in data:
credentials.token_expiry = datetime.datetime.strptime(
data['token_expiry'], oauth2client.client.EXPIRY_FORMAT)
if 'invalid' in data:
credentials.invalid = data['invalid']
return credentials
# TODO(craigcitro): Currently, we can't even *load*
# `oauth2client.appengine` without being on appengine, because of how
# it handles imports. Fix that by splitting that module into
# GAE-specific and GAE-independent bits, and guarding imports.
class GaeAssertionCredentials(oauth2client.client.AssertionCredentials):
"""Assertion credentials for Google App Engine apps."""
def __init__(self, scopes, **kwds):
if not util.DetectGae():
raise exceptions.ResourceUnavailableError(
'GCE credentials requested outside a GCE instance')
self._scopes = list(util.NormalizeScopes(scopes))
super(GaeAssertionCredentials, self).__init__(None, **kwds)
@classmethod
def Get(cls, *args, **kwds):
try:
return cls(*args, **kwds)
except exceptions.Error:
return None
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
return GaeAssertionCredentials(data['_scopes'])
def _refresh(self, _):
"""Refresh self.access_token.
Args:
_: (ignored) A function matching httplib2.Http.request's signature.
"""
from google.appengine.api import app_identity # pylint: disable=g-import-not-at-top
try:
token, _ = app_identity.get_access_token(self._scopes)
except app_identity.Error as e:
raise exceptions.CredentialsError(str(e))
self.access_token = token
# TODO(craigcitro): Switch this from taking a path to taking a stream.
def CredentialsFromFile(path, client_info):
"""Read credentials from a file."""
credential_store = oauth2client.multistore_file.get_credential_storage(
path,
client_info['client_id'],
client_info['user_agent'],
client_info['scope'])
if hasattr(FLAGS, 'auth_local_webserver'):
FLAGS.auth_local_webserver = False
credentials = credential_store.get()
if credentials is None or credentials.invalid:
print('Generating new OAuth credentials ...')
while True:
# If authorization fails, we want to retry, rather than let this
# cascade up and get caught elsewhere. If users want out of the
# retry loop, they can ^C.
try:
flow = oauth2client.client.OAuth2WebServerFlow(**client_info)
# We delay this import because it's rarely needed and takes a long time.
from oauth2client import tools # pylint:disable=g-import-not-at-top
credentials = tools.run(flow, credential_store)
break
except (oauth2client.client.FlowExchangeError, SystemExit) as e:
# Here SystemExit is "no credential at all", and the
# FlowExchangeError is "invalid" -- usually because you reused
# a token.
print('Invalid authorization: %s' % (e,))
except httplib2.HttpLib2Error as e:
print('Communication error: %s' % (e,))
raise exceptions.CredentialsError(
'Communication error creating credentials: %s' % e)
return credentials
# TODO(craigcitro): Push this into oauth2client.
def GetUserinfo(credentials, http=None): # pylint: disable=invalid-name
"""Get the userinfo associated with the given credentials.
This is dependent on the token having either the userinfo.email or
userinfo.profile scope for the given token.
Args:
credentials: (oauth2client.client.Credentials) incoming credentials
http: (httplib2.Http, optional) http instance to use
Returns:
The email address for this token, or None if the required scopes
aren't available.
"""
http = http or httplib2.Http()
url_root = 'https://www.googleapis.com/oauth2/v2/tokeninfo'
query_args = {'access_token': credentials.access_token}
url = '?'.join((url_root, urllib.urlencode(query_args)))
# We ignore communication woes here (i.e. SSL errors, socket
# timeout), as handling these should be done in a common location.
response, content = http.request(url)
if response.status == http_client.BAD_REQUEST:
credentials.refresh(http)
response, content = http.request(url)
return json.loads(content or '{}') # Save ourselves from an empty reply. | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to model visualization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot # pylint: disable=g-import-not-at-top
except ImportError:
# Fall back on pydot if necessary.
# Silence a `print` statement that occurs in case of import error,
# by temporarily replacing sys.stdout.
_stdout = sys.stdout
sys.stdout = sys.stderr
try:
import pydot # pylint: disable=g-import-not-at-top
except ImportError:
pydot = None
finally:
# Restore sys.stdout.
sys.stdout = _stdout
def _check_pydot():
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
except Exception:
# pydot raises a generic Exception here,
# so no specific class can be caught.
raise ImportError('Failed to import pydot. You must install pydot'
' and graphviz for `pydotprint` to work.')
def model_to_dot(model, show_shapes=False, show_layer_names=True, rankdir='TB'):
"""Convert a Keras model to dot format.
Arguments:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
Returns:
A `pydot.Dot` instance representing the Keras model.
"""
from tensorflow.contrib.keras.python.keras.layers.wrappers import Wrapper # pylint: disable=g-import-not-at-top
from tensorflow.contrib.keras.python.keras.models import Sequential # pylint: disable=g-import-not-at-top
_check_pydot()
dot = pydot.Dot()
dot.set('rankdir', rankdir)
dot.set('concentrate', True)
dot.set_node_defaults(shape='record')
if isinstance(model, Sequential):
if not model.built:
model.build()
model = model.model
layers = model.layers
# Create graph nodes.
for layer in layers:
layer_id = str(id(layer))
# Append a wrapped layer's label to node's label, if it exists.
layer_name = layer.name
class_name = layer.__class__.__name__
if isinstance(layer, Wrapper):
layer_name = '{}({})'.format(layer_name, layer.layer.name)
child_class_name = layer.layer.__class__.__name__
class_name = '{}({})'.format(class_name, child_class_name)
# Create node's label.
if show_layer_names:
label = '{}: {}'.format(layer_name, class_name)
else:
label = class_name
# Rebuild the label as a table including input/output shapes.
if show_shapes:
try:
outputlabels = str(layer.output_shape)
except AttributeError:
outputlabels = 'multiple'
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join([str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = 'multiple'
label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label, inputlabels,
outputlabels)
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# Connect nodes with edges.
for layer in layers:
layer_id = str(id(layer))
for i, node in enumerate(layer.inbound_nodes):
node_key = layer.name + '_ib-' + str(i)
if node_key in model.container_nodes:
for inbound_layer in node.inbound_layers:
inbound_layer_id = str(id(inbound_layer))
layer_id = str(id(layer))
dot.add_edge(pydot.Edge(inbound_layer_id, layer_id))
return dot
def plot_model(model,
to_file='model.png',
show_shapes=False,
show_layer_names=True,
rankdir='TB'):
"""Converts a Keras model to dot format and save to a file.
Arguments:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
"""
dot = model_to_dot(model, show_shapes, show_layer_names, rankdir)
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
dot.write(to_file, format=extension) | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* tsacct.c - System accounting over taskstats interface
*
* Copyright (C) Jay Lan, <jlan@sgi.com>
*/
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/sched/cputime.h>
#include <linux/tsacct_kern.h>
#include <linux/acct.h>
#include <linux/jiffies.h>
#include <linux/mm.h>
/*
* fill in basic accounting fields
*/
void bacct_add_tsk(struct user_namespace *user_ns,
struct pid_namespace *pid_ns,
struct taskstats *stats, struct task_struct *tsk)
{
const struct cred *tcred;
u64 utime, stime, utimescaled, stimescaled;
u64 now_ns, delta;
time64_t btime;
BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
/* calculate task elapsed time in nsec */
now_ns = ktime_get_ns();
/* store whole group time first */
delta = now_ns - tsk->group_leader->start_time;
/* Convert to micro seconds */
do_div(delta, NSEC_PER_USEC);
stats->ac_tgetime = delta;
delta = now_ns - tsk->start_time;
do_div(delta, NSEC_PER_USEC);
stats->ac_etime = delta;
/* Convert to seconds for btime (note y2106 limit) */
btime = ktime_get_real_seconds() - div_u64(delta, USEC_PER_SEC);
stats->ac_btime = clamp_t(time64_t, btime, 0, U32_MAX);
stats->ac_btime64 = btime;
if (tsk->flags & PF_EXITING)
stats->ac_exitcode = tsk->exit_code;
if (thread_group_leader(tsk) && (tsk->flags & PF_FORKNOEXEC))
stats->ac_flag |= AFORK;
if (tsk->flags & PF_SUPERPRIV)
stats->ac_flag |= ASU;
if (tsk->flags & PF_DUMPCORE)
stats->ac_flag |= ACORE;
if (tsk->flags & PF_SIGNALED)
stats->ac_flag |= AXSIG;
stats->ac_nice = task_nice(tsk);
stats->ac_sched = tsk->policy;
stats->ac_pid = task_pid_nr_ns(tsk, pid_ns);
stats->ac_tgid = task_tgid_nr_ns(tsk, pid_ns);
stats->ac_ppid = task_ppid_nr_ns(tsk, pid_ns);
rcu_read_lock();
tcred = __task_cred(tsk);
stats->ac_uid = from_kuid_munged(user_ns, tcred->uid);
stats->ac_gid = from_kgid_munged(user_ns, tcred->gid);
rcu_read_unlock();
task_cputime(tsk, &utime, &stime);
stats->ac_utime = div_u64(utime, NSEC_PER_USEC);
stats->ac_stime = div_u64(stime, NSEC_PER_USEC);
task_cputime_scaled(tsk, &utimescaled, &stimescaled);
stats->ac_utimescaled = div_u64(utimescaled, NSEC_PER_USEC);
stats->ac_stimescaled = div_u64(stimescaled, NSEC_PER_USEC);
stats->ac_minflt = tsk->min_flt;
stats->ac_majflt = tsk->maj_flt;
strscpy_pad(stats->ac_comm, tsk->comm);
}
#ifdef CONFIG_TASK_XACCT
#define KB 1024
#define MB (1024*KB)
#define KB_MASK (~(KB-1))
/*
* fill in extended accounting fields
*/
void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
{
struct mm_struct *mm;
/* convert pages-nsec/1024 to Mbyte-usec, see __acct_update_integrals */
stats->coremem = p->acct_rss_mem1 * PAGE_SIZE;
do_div(stats->coremem, 1000 * KB);
stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE;
do_div(stats->virtmem, 1000 * KB);
mm = get_task_mm(p);
if (mm) {
/* adjust to KB unit */
stats->hiwater_rss = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB;
stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB;
mmput(mm);
}
stats->read_char = p->ioac.rchar & KB_MASK;
stats->write_char = p->ioac.wchar & KB_MASK;
stats->read_syscalls = p->ioac.syscr & KB_MASK;
stats->write_syscalls = p->ioac.syscw & KB_MASK;
#ifdef CONFIG_TASK_IO_ACCOUNTING
stats->read_bytes = p->ioac.read_bytes & KB_MASK;
stats->write_bytes = p->ioac.write_bytes & KB_MASK;
stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
#else
stats->read_bytes = 0;
stats->write_bytes = 0;
stats->cancelled_write_bytes = 0;
#endif
}
#undef KB
#undef MB
static void __acct_update_integrals(struct task_struct *tsk,
u64 utime, u64 stime)
{
u64 time, delta;
if (unlikely(!tsk->mm || (tsk->flags & PF_KTHREAD)))
return;
time = stime + utime;
delta = time - tsk->acct_timexpd;
if (delta < TICK_NSEC)
return;
tsk->acct_timexpd = time;
/*
* Divide by 1024 to avoid overflow, and to avoid division.
* The final unit reported to userspace is Mbyte-usecs,
* the rest of the math is done in xacct_add_tsk.
*/
tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm) >> 10;
tsk->acct_vm_mem1 += delta * READ_ONCE(tsk->mm->total_vm) >> 10;
}
/**
* acct_update_integrals - update mm integral fields in task_struct
* @tsk: task_struct for accounting
*/
void acct_update_integrals(struct task_struct *tsk)
{
u64 utime, stime;
unsigned long flags;
local_irq_save(flags);
task_cputime(tsk, &utime, &stime);
__acct_update_integrals(tsk, utime, stime);
local_irq_restore(flags);
}
/**
* acct_account_cputime - update mm integral after cputime update
* @tsk: task_struct for accounting
*/
void acct_account_cputime(struct task_struct *tsk)
{
__acct_update_integrals(tsk, tsk->utime, tsk->stime);
}
/**
* acct_clear_integrals - clear the mm integral fields in task_struct
* @tsk: task_struct whose accounting fields are cleared
*/
void acct_clear_integrals(struct task_struct *tsk)
{
tsk->acct_timexpd = 0;
tsk->acct_rss_mem1 = 0;
tsk->acct_vm_mem1 = 0;
}
#endif | c | github | https://github.com/torvalds/linux | kernel/tsacct.c |
//===--- SourceLoc.h - ReST source location and source manager classes ----===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_MARKUP_SOURCELOC_H
#define SWIFT_MARKUP_SOURCELOC_H
#include "llvm/ADT/StringRef.h"
#include <algorithm>
#include <cassert>
#include <utility>
#include <vector>
namespace swift {
namespace markup {
class SourceLoc {
friend class SourceManagerBase;
template<typename ExternalSourceLocTy>
friend class SourceManager;
unsigned Value;
static const unsigned InvalidValue = 0;
public:
SourceLoc() : Value(InvalidValue) {}
SourceLoc(const SourceLoc &) = default;
bool isValid() const { return !isInvalid(); }
bool isInvalid() const { return Value == InvalidValue; }
bool operator==(SourceLoc RHS) const { return Value == RHS.Value; }
bool operator!=(SourceLoc RHS) const { return !(*this == RHS); }
/// Return a source location advanced a specified number of bytes.
SourceLoc getAdvancedLoc(int ByteOffset) const {
assert(isValid() && "Can't advance an invalid location");
SourceLoc Result = *this;
Result.Value += ByteOffset;
return Result;
}
};
class SourceRange {
public:
/// The source range is a half-open byte range [Start; End).
SourceLoc Start, End;
SourceRange() {}
SourceRange(SourceLoc Loc) : Start(Loc), End(Loc) { }
SourceRange(SourceLoc Start, SourceLoc End) : Start(Start), End(End) {
assert(Start.isValid() == End.isValid() &&
"Start and end should either both be valid or both be invalid!");
}
bool isValid() const { return Start.isValid(); }
bool isInvalid() const { return Start.isInvalid(); }
};
class SourceManagerBase {
protected:
SourceLoc NextUnassignedLoc;
/// All source pieces, in order of increasing source location.
std::vector<SourceRange> RegisteredRanges;
public:
SourceManagerBase() : NextUnassignedLoc() {
NextUnassignedLoc.Value = 1;
}
SourceManagerBase(const SourceManagerBase &) = delete;
void operator=(const SourceManagerBase &) = delete;
bool isBeforeInBuffer(SourceLoc LHS, SourceLoc RHS) const {
// When we support multiple buffers, assert that locations come from the
// same buffer.
return LHS.Value < RHS.Value;
}
/// Returns true if range \c R contains the location \c Loc.
bool containsLoc(SourceRange R, SourceLoc Loc) const {
return Loc == R.Start ||
(isBeforeInBuffer(R.Start, Loc) && isBeforeInBuffer(Loc, R.End));
}
};
template <typename ExternalSourceLocTy>
class SourceManager : public SourceManagerBase {
std::vector<ExternalSourceLocTy> ExternalLocs;
public:
SourceManager() = default;
SourceManager(const SourceManager &) = delete;
void operator=(const SourceManager &) = delete;
SourceRange registerLine(StringRef Line, ExternalSourceLocTy ExternalLoc);
/// Returns the external source range and a byte offset inside it.
std::pair<ExternalSourceLocTy, unsigned>
toExternalSourceLoc(SourceLoc Loc) const;
};
template <typename ExternalSourceLocTy>
SourceRange SourceManager<ExternalSourceLocTy>::registerLine(
StringRef Line, ExternalSourceLocTy ExternalLoc) {
if (Line.size() > 4095)
return SourceRange();
SourceLoc Start = NextUnassignedLoc;
SourceLoc End = Start.getAdvancedLoc(Line.size());
RegisteredRanges.push_back(SourceRange(Start, End));
ExternalLocs.push_back(ExternalLoc);
NextUnassignedLoc = End.getAdvancedLoc(2);
#ifndef NDEBUG
// To make debugging easier, make each line start at offset that is equal to
// 1 mod 1000.
NextUnassignedLoc.Value = ((NextUnassignedLoc.Value + 999) / 1000) * 1000 + 1;
#endif
return SourceRange(Start, End);
}
template <typename ExternalSourceLocTy>
std::pair<ExternalSourceLocTy, unsigned>
SourceManager<ExternalSourceLocTy>::toExternalSourceLoc(SourceLoc Loc) const {
auto I = std::lower_bound(RegisteredRanges.begin(), RegisteredRanges.end(),
Loc, [this](const SourceRange &LHS, SourceLoc Loc) {
return this->isBeforeInBuffer(LHS.Start, Loc);
});
assert(I != RegisteredRanges.end() && "unknown source location");
const auto &InternalRange = *I;
assert(containsLoc(InternalRange, Loc) && "unknown source location");
const auto &ExternalLoc = ExternalLocs[I - RegisteredRanges.begin()];
return { ExternalLoc, Loc.Value - InternalRange.Start.Value };
}
} // namespace markup
} // namespace swift
#endif // SWIFT_MARKUP_SOURCELOC_H | c | github | https://github.com/apple/swift | include/swift/Markup/SourceLoc.h |
from abc import ABCMeta, abstractmethod
class Preprocess:
__metaclass__ = ABCMeta
def __init__(self, parent_preprocess=None):
self._parent_preprocess = parent_preprocess
def __call__(self, dset):
return self.apply_preprocess(dset)
def apply_preprocess(self, dset):
try:
dset = self._parent_preprocess.apply_preprocess(dset)
except AttributeError:
pass
return self._apply_preprocessing(dset)
def reverse_preprocessing(self, dset):
try:
dset = self._reverse_preprocessing(dset)
except NotImplementedError:
raise NotImplementedError(type(self).__name__ + " preprocessing can't be reversed.")
try:
dset = self._parent_preprocess.reverse_preprocess(dset)
except AttributeError:
pass
return dset
@staticmethod
def _dataset_copy(dataset, inputs=None, targets=None):
inp = inputs if inputs is not None else dataset.inputs
tar = targets if targets is not None else dataset.targets
return dataset.create_linked_dataset(inp, tar, dataset.name, dataset.keep_on_cpu)
@abstractmethod
def _reverse_preprocessing(self, dset):
raise NotImplementedError("Subclass of 'Preprocess' must implement '_reverse_preprocess'.")
@abstractmethod
def _apply_preprocessing(self, dset):
raise NotImplementedError("Subclass of 'Preprocess' must implement '_apply_preprocess'.") | unknown | codeparrot/codeparrot-clean | ||
from statistics import mode, StatisticsError
VALUE = {k: v for k, v in zip("23456789TJQKA", range(13))}
def poker(hands):
values = list(map(hand_value, hands))
max_val = max(values)
return [hand for hand, value in zip(hands, values) if value == max_val]
def hand_value(hand):
values = [VALUE[card[0]] for card in hand]
suits = [card[1] for card in hand]
if is_flush(suits) and is_straight(values):
type_score = 8
elif is_fourkind(values):
type_score = 7
elif is_fullhouse(values):
type_score = 6
elif is_flush(suits):
type_score = 5
elif is_straight(values):
type_score = 4
elif is_threekind(values):
type_score = 3
elif is_twopair(values):
type_score = 2
elif is_onepair(values):
type_score = 1
else:
type_score = 0
return (13**5 * type_score
+ sum(13**i * val for i, val in enumerate(poker_sort(values))))
def is_flush(suits):
return len(set(suits)) == 1
def is_straight(values):
return len(set(values)) is 5 and (max(values) - min(values) == 4)
def is_fourkind(values):
try:
return len(set(values)) is 2 and values.count(mode(values)) is 4
except StatisticsError:
return False
def is_fullhouse(values):
try:
return len(set(values)) is 2 and values.count(mode(values)) is 3
except StatisticsError:
return False
def is_threekind(values):
try:
return len(set(values)) is 3 and values.count(mode(values)) is 3
except StatisticsError:
return False
def is_twopair(values):
if len(set(values)) is 3:
try:
return values.count(mode(values)) is 2
except StatisticsError:
return True
else:
return False
def is_onepair(values):
try:
return len(set(values)) is 4 and values.count(mode(values)) is 2
except StatisticsError:
return False
def poker_sort(values):
return sorted(values, key=lambda x: 13*values.count(x)+x) | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* attmap.c
* Attribute mapping support.
*
* This file provides utility routines to build and manage attribute
* mappings by comparing input and output TupleDescs. Such mappings
* are typically used by DDL operating on inheritance and partition trees
* to do a conversion between rowtypes logically equivalent but with
* columns in a different order, taking into account dropped columns.
* They are also used by the tuple conversion routines in tupconvert.c.
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/common/attmap.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/attmap.h"
#include "utils/builtins.h"
static bool check_attrmap_match(TupleDesc indesc,
TupleDesc outdesc,
AttrMap *attrMap);
/*
* make_attrmap
*
* Utility routine to allocate an attribute map in the current memory
* context.
*/
AttrMap *
make_attrmap(int maplen)
{
AttrMap *res;
res = palloc0_object(AttrMap);
res->maplen = maplen;
res->attnums = palloc0_array(AttrNumber, maplen);
return res;
}
/*
* free_attrmap
*
* Utility routine to release an attribute map.
*/
void
free_attrmap(AttrMap *map)
{
pfree(map->attnums);
pfree(map);
}
/*
* build_attrmap_by_position
*
* Return a palloc'd bare attribute map for tuple conversion, matching input
* and output columns by position. Dropped columns are ignored in both input
* and output, marked as 0. This is normally a subroutine for
* convert_tuples_by_position in tupconvert.c, but it can be used standalone.
*
* Note: the errdetail messages speak of indesc as the "returned" rowtype,
* outdesc as the "expected" rowtype. This is okay for current uses but
* might need generalization in future.
*/
AttrMap *
build_attrmap_by_position(TupleDesc indesc,
TupleDesc outdesc,
const char *msg)
{
AttrMap *attrMap;
int nincols;
int noutcols;
int n;
int i;
int j;
bool same;
/*
* The length is computed as the number of attributes of the expected
* rowtype as it includes dropped attributes in its count.
*/
n = outdesc->natts;
attrMap = make_attrmap(n);
j = 0; /* j is next physical input attribute */
nincols = noutcols = 0; /* these count non-dropped attributes */
same = true;
for (i = 0; i < n; i++)
{
Form_pg_attribute outatt = TupleDescAttr(outdesc, i);
if (outatt->attisdropped)
continue; /* attrMap->attnums[i] is already 0 */
noutcols++;
for (; j < indesc->natts; j++)
{
Form_pg_attribute inatt = TupleDescAttr(indesc, j);
if (inatt->attisdropped)
continue;
nincols++;
/* Found matching column, now check type */
if (outatt->atttypid != inatt->atttypid ||
(outatt->atttypmod != inatt->atttypmod && outatt->atttypmod >= 0))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg_internal("%s", _(msg)),
errdetail("Returned type %s does not match expected type %s in column \"%s\" (position %d).",
format_type_with_typemod(inatt->atttypid,
inatt->atttypmod),
format_type_with_typemod(outatt->atttypid,
outatt->atttypmod),
NameStr(outatt->attname),
noutcols)));
attrMap->attnums[i] = (AttrNumber) (j + 1);
j++;
break;
}
if (attrMap->attnums[i] == 0)
same = false; /* we'll complain below */
}
/* Check for unused input columns */
for (; j < indesc->natts; j++)
{
if (TupleDescCompactAttr(indesc, j)->attisdropped)
continue;
nincols++;
same = false; /* we'll complain below */
}
/* Report column count mismatch using the non-dropped-column counts */
if (!same)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg_internal("%s", _(msg)),
errdetail("Number of returned columns (%d) does not match "
"expected column count (%d).",
nincols, noutcols)));
/* Check if the map has a one-to-one match */
if (check_attrmap_match(indesc, outdesc, attrMap))
{
/* Runtime conversion is not needed */
free_attrmap(attrMap);
return NULL;
}
return attrMap;
}
/*
* build_attrmap_by_name
*
* Return a palloc'd bare attribute map for tuple conversion, matching input
* and output columns by name. (Dropped columns are ignored in both input and
* output.) This is normally a subroutine for convert_tuples_by_name in
* tupconvert.c, but can be used standalone.
*
* If 'missing_ok' is true, a column from 'outdesc' not being present in
* 'indesc' is not flagged as an error; AttrMap.attnums[] entry for such an
* outdesc column will be 0 in that case.
*/
AttrMap *
build_attrmap_by_name(TupleDesc indesc,
TupleDesc outdesc,
bool missing_ok)
{
AttrMap *attrMap;
int outnatts;
int innatts;
int i;
int nextindesc = -1;
outnatts = outdesc->natts;
innatts = indesc->natts;
attrMap = make_attrmap(outnatts);
for (i = 0; i < outnatts; i++)
{
Form_pg_attribute outatt = TupleDescAttr(outdesc, i);
char *attname;
Oid atttypid;
int32 atttypmod;
int j;
if (outatt->attisdropped)
continue; /* attrMap->attnums[i] is already 0 */
attname = NameStr(outatt->attname);
atttypid = outatt->atttypid;
atttypmod = outatt->atttypmod;
/*
* Now search for an attribute with the same name in the indesc. It
* seems likely that a partitioned table will have the attributes in
* the same order as the partition, so the search below is optimized
* for that case. It is possible that columns are dropped in one of
* the relations, but not the other, so we use the 'nextindesc'
* counter to track the starting point of the search. If the inner
* loop encounters dropped columns then it will have to skip over
* them, but it should leave 'nextindesc' at the correct position for
* the next outer loop.
*/
for (j = 0; j < innatts; j++)
{
Form_pg_attribute inatt;
nextindesc++;
if (nextindesc >= innatts)
nextindesc = 0;
inatt = TupleDescAttr(indesc, nextindesc);
if (inatt->attisdropped)
continue;
if (strcmp(attname, NameStr(inatt->attname)) == 0)
{
/* Found it, check type */
if (atttypid != inatt->atttypid || atttypmod != inatt->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("could not convert row type"),
errdetail("Attribute \"%s\" of type %s does not match corresponding attribute of type %s.",
attname,
format_type_be(outdesc->tdtypeid),
format_type_be(indesc->tdtypeid))));
attrMap->attnums[i] = inatt->attnum;
break;
}
}
if (attrMap->attnums[i] == 0 && !missing_ok)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("could not convert row type"),
errdetail("Attribute \"%s\" of type %s does not exist in type %s.",
attname,
format_type_be(outdesc->tdtypeid),
format_type_be(indesc->tdtypeid))));
}
return attrMap;
}
/*
* build_attrmap_by_name_if_req
*
* Returns mapping created by build_attrmap_by_name, or NULL if no
* conversion is required. This is a convenience routine for
* convert_tuples_by_name() in tupconvert.c and other functions, but it
* can be used standalone.
*/
AttrMap *
build_attrmap_by_name_if_req(TupleDesc indesc,
TupleDesc outdesc,
bool missing_ok)
{
AttrMap *attrMap;
/* Verify compatibility and prepare attribute-number map */
attrMap = build_attrmap_by_name(indesc, outdesc, missing_ok);
/* Check if the map has a one-to-one match */
if (check_attrmap_match(indesc, outdesc, attrMap))
{
/* Runtime conversion is not needed */
free_attrmap(attrMap);
return NULL;
}
return attrMap;
}
/*
* check_attrmap_match
*
* Check to see if the map is a one-to-one match, in which case we need
* not to do a tuple conversion, and the attribute map is not necessary.
*/
static bool
check_attrmap_match(TupleDesc indesc,
TupleDesc outdesc,
AttrMap *attrMap)
{
int i;
/* no match if attribute numbers are not the same */
if (indesc->natts != outdesc->natts)
return false;
for (i = 0; i < attrMap->maplen; i++)
{
CompactAttribute *inatt = TupleDescCompactAttr(indesc, i);
CompactAttribute *outatt;
/*
* If the input column has a missing attribute, we need a conversion.
*/
if (inatt->atthasmissing)
return false;
if (attrMap->attnums[i] == (i + 1))
continue;
outatt = TupleDescCompactAttr(outdesc, i);
/*
* If it's a dropped column and the corresponding input column is also
* dropped, we don't need a conversion. However, attlen and
* attalignby must agree.
*/
if (attrMap->attnums[i] == 0 &&
inatt->attisdropped &&
inatt->attlen == outatt->attlen &&
inatt->attalignby == outatt->attalignby)
continue;
return false;
}
return true;
} | c | github | https://github.com/postgres/postgres | src/backend/access/common/attmap.c |
#!/usr/bin/env python
"""
Reads from and write to the config file, 'config.json'.
"""
import argparse
from cjh.cli import Cli
from cjh.config import Config
__author__ = 'Chris Horn <hammerhorn@gmail.com>'
def _parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-s', type=str, help='set shell')
parser.add_argument('-e', type=str, help='set editor')
parser.add_argument('-t', type=str, help='set terminal')
parser.add_argument('-l', type=str, help='set language')
parser.add_argument('-v', action='count')
args = parser.parse_args()
return args
if __name__ == '__main__':
ARGS = _parse_args()
else:
ARGS = None
CONFIG = Config()
FILENAME = 'cjh/config.json'
#make more efficient
if 'shell' in list(CONFIG.config_dict.keys()):
shell = CONFIG.config_dict['shell']
else:
shell = None
if 'editor' in list(CONFIG.config_dict.keys()):
editor = CONFIG.config_dict['editor']
else:
editor = None
if 'terminal' in list(CONFIG.config_dict.keys()):
terminal = CONFIG.config_dict['terminal']
else:
terminal = None
if 'language' in list(CONFIG.config_dict.keys()):
language = CONFIG.config_dict['language']
else:
language = None
def main():
"""
Writes requested modifications to the 'config.json' file, and sends
some kind of feedback to stdout.
"""
if ARGS.s is not None:
CONFIG.write_to_config_file(shell=ARGS.s)
if ARGS.e is not None:
CONFIG.write_to_config_file(editor=ARGS.e)
if ARGS.t is not None:
CONFIG.write_to_config_file(terminal=ARGS.t)
if ARGS.l is not None:
CONFIG.write_to_config_file(language=ARGS.l)
# config_dict = {'shell':shell}
# with open(filename, 'w') as outfile: json.dump(
# config_dict, outfile, indent=2)
string = ''
if (ARGS.v >= 1 and ARGS.s is not None) or not (
ARGS.e or ARGS.s or ARGS.t or ARGS.l):
string += "\n shell: '{}'".format(shell)
if (ARGS.v >= 1 and ARGS.e is not None) or not (
ARGS.e or ARGS.s or ARGS.t or ARGS.l):
string += "\n editor: '{}'".format(editor)
if (ARGS.v >= 1 and ARGS.t is not None) or not (
ARGS.e or ARGS.s or ARGS.t or ARGS.l):
string += "\nterminal: '{}'".format(terminal)
if (ARGS.v >= 1 and ARGS.l is not None) or not (
ARGS.e or ARGS.s or ARGS.t or ARGS.l):
string += "\nlanguage: '{}'".format(language)
if len(string) > 0:
print(string + '\n') # pylint: disable=C0325
if (ARGS.v >= 2) or (
not (ARGS.e or ARGS.s or ARGS.t or ARGS.l) and ARGS.v >= 1):
Cli.view_source(FILENAME)
if ARGS.s or ARGS.e or ARGS.t or ARGS.l:
Cli.report_filesave(FILENAME)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_spaces(word):
return word.replace('$ ','$$ ').replace(' ','$ ')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def rule(self, name, command, description=None, depfile=None,
generator=False, restat=False, rspfile=None, rspfile_content=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_spaces, outputs))
all_inputs = list(map(escape_spaces, all_inputs))
if implicit:
implicit = map(escape_spaces, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_spaces, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s %s' % (' '.join(out_outputs),
rule,
' '.join(all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = variables.iteritems()
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$') | unknown | codeparrot/codeparrot-clean | ||
# mode: run
# ticket: 5
class CyTest(object):
"""
>>> cy = CyTest()
>>> '_CyTest__private' in dir(cy)
True
>>> cy._CyTest__private()
8
>>> '__private' in dir(cy)
False
>>> '_CyTest__x' in dir(cy)
True
>>> '__x' in dir(cy)
False
"""
__x = 1
def __private(self): return 8
def get(self):
"""
>>> CyTest().get()
(1, 1, 8)
"""
return self._CyTest__x, self.__x, self.__private()
def get_inner(self):
"""
>>> CyTest().get_inner()
(1, 1, 8)
"""
def get(o):
return o._CyTest__x, o.__x, o.__private()
return get(self)
class CyTestSub(CyTest):
"""
>>> cy = CyTestSub()
>>> '_CyTestSub__private' in dir(cy)
True
>>> cy._CyTestSub__private()
9
>>> '_CyTest__private' in dir(cy)
True
>>> cy._CyTest__private()
8
>>> '__private' in dir(cy)
False
>>> '_CyTestSub__x' in dir(cy)
False
>>> '_CyTestSub__y' in dir(cy)
True
>>> '_CyTest__x' in dir(cy)
True
>>> '__x' in dir(cy)
False
"""
__y = 2
def __private(self): return 9
def get(self):
"""
>>> CyTestSub().get()
(1, 2, 2, 9)
"""
return self._CyTest__x, self._CyTestSub__y, self.__y, self.__private()
def get_inner(self):
"""
>>> CyTestSub().get_inner()
(1, 2, 2, 9)
"""
def get(o):
return o._CyTest__x, o._CyTestSub__y, o.__y, o.__private()
return get(self)
class _UnderscoreTest(object):
"""
>>> ut = _UnderscoreTest()
>>> '__x' in dir(ut)
False
>>> '_UnderscoreTest__x' in dir(ut)
True
>>> ut._UnderscoreTest__x
1
>>> ut.get()
1
"""
__x = 1
def get(self):
return self.__x | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright 2015-2016 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" Base class for PyBOMBS commands """
import argparse
from pybombs import pb_logging
from pybombs.config_manager import config_manager
from pybombs.pb_exception import PBException
class CommandBase(object):
"""
Base class for all PyBOMBS commands classes.
All PyBOMBS command classes must derive from this.
"""
cmds = {} # Add a key for every command; the value is the help string.
hidden = False # Set to True if you don't want the command to show up in the help
def __init__(self,
cmd, args,
load_recipes=False,
require_prefix=True,
):
self.cmd = cmd
self.args = args
self.log = pb_logging.logger.getChild(cmd)
self.log.debug("Initializing command class for command {}".format(cmd))
self.cfg = config_manager
if not cmd in self.cmds.keys():
raise PBException("{} is not a valid name for this command.".format(cmd))
if load_recipes:
from pybombs import recipe_manager
self.recipe_manager = recipe_manager.recipe_manager
self.prefix = None
if self.cfg.get_active_prefix().prefix_dir is not None:
self.prefix = self.cfg.get_active_prefix()
elif require_prefix:
self.log.error("No prefix specified. Aborting.")
raise PBException("No prefix specified.")
if self.prefix is not None:
self.inventory = self.prefix.inventory
@staticmethod
def setup_subparser(parser, cmd=None):
"""
Set up a subparser for a specific command
"""
pass
def run(self):
""" Override this. """
raise PBException("run() method not implemented for command {1}!".format(self.cmd))
class SubCommandBase(CommandBase):
"""
Supplies methods if the command only consists of other subcommands
"""
# Every entry: {'cmd': COMMAND, 'help': HELP_STR, 'subparser': SUBPARSER_CALLBACK, 'run': RUN_CALLBACK}
subcommands = []
def __init__(self,
cmd, args,
load_recipes=False,
require_prefix=True,
):
CommandBase.__init__(self, cmd, args, load_recipes, require_prefix)
@staticmethod
def setup_subcommandparser(parser, help_title, subcommands):
"""
Set up a subparser for a specific subommand
"""
subparsers = parser.add_subparsers(
help=help_title,
dest='sub_command',
)
for cmd, cmd_info in subcommands.iteritems():
subparser = subparsers.add_parser(cmd, help=cmd_info['help'])
if cmd_info['subparser'] is None:
continue
if isinstance(cmd_info['subparser'], tuple) or isinstance(cmd_info['subparser'], list):
pass
#for args in cmd_info['subparser']:
#subparser.add_argument(**args)
else:
cmd_info['subparser'](subparser)
return parser
def run(self):
""" Go, go, go! """
try:
return self.subcommands[self.args.sub_command]['run'](self)()
except KeyError:
self.log.error("Illegal recipes command: {}".format(self.args.sub_command))
return -1
##############################################################################
# Argument Parser
##############################################################################
def init_arg_parser(show_help_for=None, hide_hidden=True):
"""
Create a base argument parser
"""
def dummy_error(msg):
raise PBException('parse error')
cmd_list = get_cmd_list(hide_hidden=hide_hidden)
# Set up global options:
parser = argparse.ArgumentParser(
description='PyBOMBS: A meta-package manager integrated with CGRAN.',
epilog='Run `pybombs <command> --help to learn about command-specific options.',
)
config_manager.setup_parser(parser)
subparsers = parser.add_subparsers(
title='PyBOMBS subcommands',
#description='valid subcommands',
help="Description:",
dest='command',
metavar='<command>',
)
if hide_hidden:
parser.error = dummy_error
# Set up options for each command:
for cmd in cmd_list:
for cmd_name, cmd_help in cmd.cmds.iteritems():
subparser = subparsers.add_parser(cmd_name, help=cmd_help, add_help=True)
cmd.setup_subparser(subparser, cmd_name)
if cmd_name == show_help_for:
subparser.print_help()
exit(0)
return parser
##############################################################################
# Dispatcher functions
##############################################################################
def get_cmd_list(hide_hidden=False):
"""
Returns a list of all command classes, excluding PyBombsCmd
"""
from pybombs import commands
cmd_list = []
for g in commands.__dict__.values():
try:
if issubclass(g, CommandBase) and len(g.cmds) and not (hide_hidden and g.hidden):
cmd_list.append(g)
except (TypeError, AttributeError):
pass
return cmd_list
def get_cmd_dict(cmd_list):
"""
Create a command: class type dict of all commands
"""
cmd_dict = {}
for cmd in cmd_list:
for cmd_name in cmd.cmds.iterkeys():
cmd_dict[cmd_name] = cmd
return cmd_dict
def dispatch():
"""
Dispatch the actual command class
"""
try:
args = init_arg_parser().parse_args()
except PBException:
args = init_arg_parser(hide_hidden=False).parse_args()
cmd_list = get_cmd_list()
return get_cmd_dict(cmd_list)[args.command](cmd=args.command, args=args).run() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Service Manager
"""
import datetime
import time
from oslo.config import cfg
from oslo import messaging as oslo_messaging
from nova.cells import messaging
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova import manager
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
from nova.openstack.common import timeutils
cell_manager_opts = [
cfg.StrOpt('driver',
default='nova.cells.rpc_driver.CellsRPCDriver',
help='Cells communication driver to use'),
cfg.IntOpt("instance_updated_at_threshold",
default=3600,
help="Number of seconds after an instance was updated "
"or deleted to continue to update cells"),
cfg.IntOpt("instance_update_num_instances",
default=1,
help="Number of instances to update per periodic task run")
]
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_manager_opts, group='cells')
LOG = logging.getLogger(__name__)
class CellsManager(manager.Manager):
"""The nova-cells manager class. This class defines RPC
methods that the local cell may call. This class is NOT used for
messages coming from other cells. That communication is
driver-specific.
Communication to other cells happens via the nova.cells.messaging module.
The MessageRunner from that module will handle routing the message to
the correct cell via the communications driver. Most methods below
create 'targeted' (where we want to route a message to a specific cell)
or 'broadcast' (where we want a message to go to multiple cells)
messages.
Scheduling requests get passed to the scheduler class.
"""
target = oslo_messaging.Target(version='1.26')
def __init__(self, *args, **kwargs):
LOG.warn(_('The cells feature of Nova is considered experimental '
'by the OpenStack project because it receives much '
'less testing than the rest of Nova. This may change '
'in the future, but current deployers should be aware '
'that the use of it in production right now may be '
'risky.'))
# Mostly for tests.
cell_state_manager = kwargs.pop('cell_state_manager', None)
super(CellsManager, self).__init__(service_name='cells',
*args, **kwargs)
if cell_state_manager is None:
cell_state_manager = cells_state.CellStateManager
self.state_manager = cell_state_manager()
self.msg_runner = messaging.MessageRunner(self.state_manager)
cells_driver_cls = importutils.import_class(
CONF.cells.driver)
self.driver = cells_driver_cls()
self.instances_to_heal = iter([])
def post_start_hook(self):
"""Have the driver start its servers for inter-cell communication.
Also ask our child cells for their capacities and capabilities so
we get them more quickly than just waiting for the next periodic
update. Receiving the updates from the children will cause us to
update our parents. If we don't have any children, just update
our parents immediately.
"""
# FIXME(comstud): There's currently no hooks when services are
# stopping, so we have no way to stop servers cleanly.
self.driver.start_servers(self.msg_runner)
ctxt = context.get_admin_context()
if self.state_manager.get_child_cells():
self.msg_runner.ask_children_for_capabilities(ctxt)
self.msg_runner.ask_children_for_capacities(ctxt)
else:
self._update_our_parents(ctxt)
@periodic_task.periodic_task
def _update_our_parents(self, ctxt):
"""Update our parent cells with our capabilities and capacity
if we're at the bottom of the tree.
"""
self.msg_runner.tell_parents_our_capabilities(ctxt)
self.msg_runner.tell_parents_our_capacities(ctxt)
@periodic_task.periodic_task
def _heal_instances(self, ctxt):
"""Periodic task to send updates for a number of instances to
parent cells.
On every run of the periodic task, we will attempt to sync
'CONF.cells.instance_update_num_instances' number of instances.
When we get the list of instances, we shuffle them so that multiple
nova-cells services aren't attempting to sync the same instances
in lockstep.
If CONF.cells.instance_update_at_threshold is set, only attempt
to sync instances that have been updated recently. The CONF
setting defines the maximum number of seconds old the updated_at
can be. Ie, a threshold of 3600 means to only update instances
that have modified in the last hour.
"""
if not self.state_manager.get_parent_cells():
# No need to sync up if we have no parents.
return
info = {'updated_list': False}
def _next_instance():
try:
instance = self.instances_to_heal.next()
except StopIteration:
if info['updated_list']:
return
threshold = CONF.cells.instance_updated_at_threshold
updated_since = None
if threshold > 0:
updated_since = timeutils.utcnow() - datetime.timedelta(
seconds=threshold)
self.instances_to_heal = cells_utils.get_instances_to_sync(
ctxt, updated_since=updated_since, shuffle=True,
uuids_only=True)
info['updated_list'] = True
try:
instance = self.instances_to_heal.next()
except StopIteration:
return
return instance
rd_context = ctxt.elevated(read_deleted='yes')
for i in xrange(CONF.cells.instance_update_num_instances):
while True:
# Yield to other greenthreads
time.sleep(0)
instance_uuid = _next_instance()
if not instance_uuid:
return
try:
instance = self.db.instance_get_by_uuid(rd_context,
instance_uuid)
except exception.InstanceNotFound:
continue
self._sync_instance(ctxt, instance)
break
def _sync_instance(self, ctxt, instance):
"""Broadcast an instance_update or instance_destroy message up to
parent cells.
"""
if instance['deleted']:
self.instance_destroy_at_top(ctxt, instance)
else:
self.instance_update_at_top(ctxt, instance)
def schedule_run_instance(self, ctxt, host_sched_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s)
and forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(ctxt, our_cell,
host_sched_kwargs)
def build_instances(self, ctxt, build_inst_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s) and
forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.build_instances(ctxt, our_cell, build_inst_kwargs)
def get_cell_info_for_neighbors(self, _ctxt):
"""Return cell information for our neighbor cells."""
return self.state_manager.get_cell_info_for_neighbors()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
response = self.msg_runner.run_compute_api_method(ctxt,
cell_name,
method_info,
call)
if call:
return response.value_or_raise()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
self.msg_runner.instance_update_at_top(ctxt, instance)
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
self.msg_runner.instance_destroy_at_top(ctxt, instance)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
self.msg_runner.instance_delete_everywhere(ctxt, instance,
delete_type)
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
self.msg_runner.instance_fault_create_at_top(ctxt, instance_fault)
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info)
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
self.msg_runner.sync_instances(ctxt, project_id, updated_since,
deleted)
def service_get_all(self, ctxt, filters):
"""Return services in this cell and in all child cells."""
responses = self.msg_runner.service_get_all(ctxt, filters)
ret_services = []
# 1 response per cell. Each response is a list of services.
for response in responses:
services = response.value_or_raise()
for service in services:
cells_utils.add_cell_to_service(service, response.cell_name)
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, ctxt, host_name):
"""Return a service entry for a compute host in a certain cell."""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_get_by_compute_host(ctxt,
cell_name,
host_name)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def get_host_uptime(self, ctxt, host_name):
"""Return host uptime for a compute host in a certain cell
:param host_name: fully qualified hostname. It should be in format of
parent!child@host_id
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.get_host_uptime(ctxt, cell_name,
host_name)
return response.value_or_raise()
def service_update(self, ctxt, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
:returns: the service reference
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_update(
ctxt, cell_name, host_name, binary, params_to_update)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def service_delete(self, ctxt, cell_service_id):
"""Deletes the specified service."""
cell_name, service_id = cells_utils.split_cell_and_item(
cell_service_id)
self.msg_runner.service_delete(ctxt, cell_name, service_id)
def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout):
"""Proxy an RPC message as-is to a manager."""
compute_topic = CONF.compute_topic
cell_and_host = topic[len(compute_topic) + 1:]
cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host)
response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name,
host_name, topic, rpc_message, call, timeout)
return response.value_or_raise()
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'host' is not None, host will be of the format 'cell!name@host',
with '@host' being optional. The query will be directed to the
appropriate cell and return all task logs, or task logs matching
the host if specified.
'state' also may be None. If it's not, filter by the state as well.
"""
if host is None:
cell_name = None
else:
cell_name, host = cells_utils.split_cell_and_item(host)
# If no cell name was given, assume that the host name is the
# cell_name and that the target is all hosts
if cell_name is None:
cell_name, host = host, cell_name
responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
task_name, period_beginning, period_ending,
host=host, state=state)
# 1 response per cell. Each response is a list of task log
# entries.
ret_task_logs = []
for response in responses:
task_logs = response.value_or_raise()
for task_log in task_logs:
cells_utils.add_cell_to_task_log(task_log,
response.cell_name)
ret_task_logs.append(task_log)
return ret_task_logs
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
cell_name, compute_id = cells_utils.split_cell_and_item(
compute_id)
response = self.msg_runner.compute_node_get(ctxt, cell_name,
compute_id)
node = response.value_or_raise()
cells_utils.add_cell_to_compute_node(node, cell_name)
return node
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells."""
responses = self.msg_runner.compute_node_get_all(ctxt,
hypervisor_match=hypervisor_match)
# 1 response per cell. Each response is a list of compute_node
# entries.
ret_nodes = []
for response in responses:
nodes = response.value_or_raise()
for node in nodes:
cells_utils.add_cell_to_compute_node(node,
response.cell_name)
ret_nodes.append(node)
return ret_nodes
def compute_node_stats(self, ctxt):
"""Return compute node stats totals from all cells."""
responses = self.msg_runner.compute_node_stats(ctxt)
totals = {}
for response in responses:
data = response.value_or_raise()
for key, val in data.iteritems():
totals.setdefault(key, 0)
totals[key] += val
return totals
def actions_get(self, ctxt, cell_name, instance_uuid):
response = self.msg_runner.actions_get(ctxt, cell_name, instance_uuid)
return response.value_or_raise()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
response = self.msg_runner.action_get_by_request_id(ctxt, cell_name,
instance_uuid,
request_id)
return response.value_or_raise()
def action_events_get(self, ctxt, cell_name, action_id):
response = self.msg_runner.action_events_get(ctxt, cell_name,
action_id)
return response.value_or_raise()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
self.msg_runner.consoleauth_delete_tokens(ctxt, instance_uuid)
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
instance = self.db.instance_get_by_uuid(ctxt, instance_uuid)
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
response = self.msg_runner.validate_console_port(ctxt,
instance['cell_name'], instance_uuid, console_port,
console_type)
return response.value_or_raise()
def get_capacities(self, ctxt, cell_name):
return self.state_manager.get_capacities(cell_name)
def bdm_update_or_create_at_top(self, ctxt, bdm, create=None):
"""BDM was created/updated in this cell. Tell the API cells."""
self.msg_runner.bdm_update_or_create_at_top(ctxt, bdm, create=create)
def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None,
volume_id=None):
"""BDM was destroyed for instance in this cell. Tell the API cells."""
self.msg_runner.bdm_destroy_at_top(ctxt, instance_uuid,
device_name=device_name,
volume_id=volume_id)
def get_migrations(self, ctxt, filters):
"""Fetch migrations applying the filters."""
target_cell = None
if "cell_name" in filters:
_path_cell_sep = cells_utils.PATH_CELL_SEP
target_cell = '%s%s%s' % (CONF.cells.name, _path_cell_sep,
filters['cell_name'])
responses = self.msg_runner.get_migrations(ctxt, target_cell,
False, filters)
migrations = []
for response in responses:
migrations += response.value_or_raise()
return migrations
def instance_update_from_api(self, ctxt, instance, expected_vm_state,
expected_task_state, admin_state_reset):
"""Update an instance in its cell."""
self.msg_runner.instance_update_from_api(ctxt, instance,
expected_vm_state,
expected_task_state,
admin_state_reset)
def start_instance(self, ctxt, instance):
"""Start an instance in its cell."""
self.msg_runner.start_instance(ctxt, instance)
def stop_instance(self, ctxt, instance, do_cast=True):
"""Stop an instance in its cell."""
response = self.msg_runner.stop_instance(ctxt, instance,
do_cast=do_cast)
if not do_cast:
return response.value_or_raise()
def cell_create(self, ctxt, values):
return self.state_manager.cell_create(ctxt, values)
def cell_update(self, ctxt, cell_name, values):
return self.state_manager.cell_update(ctxt, cell_name, values)
def cell_delete(self, ctxt, cell_name):
return self.state_manager.cell_delete(ctxt, cell_name)
def cell_get(self, ctxt, cell_name):
return self.state_manager.cell_get(ctxt, cell_name)
def reboot_instance(self, ctxt, instance, reboot_type):
"""Reboot an instance in its cell."""
self.msg_runner.reboot_instance(ctxt, instance, reboot_type)
def pause_instance(self, ctxt, instance):
"""Pause an instance in its cell."""
self.msg_runner.pause_instance(ctxt, instance)
def unpause_instance(self, ctxt, instance):
"""Unpause an instance in its cell."""
self.msg_runner.unpause_instance(ctxt, instance)
def suspend_instance(self, ctxt, instance):
"""Suspend an instance in its cell."""
self.msg_runner.suspend_instance(ctxt, instance)
def resume_instance(self, ctxt, instance):
"""Resume an instance in its cell."""
self.msg_runner.resume_instance(ctxt, instance)
def terminate_instance(self, ctxt, instance):
"""Delete an instance in its cell."""
self.msg_runner.terminate_instance(ctxt, instance)
def soft_delete_instance(self, ctxt, instance):
"""Soft-delete an instance in its cell."""
self.msg_runner.soft_delete_instance(ctxt, instance)
def resize_instance(self, ctxt, instance, flavor,
extra_instance_updates):
"""Resize an instance in its cell."""
self.msg_runner.resize_instance(ctxt, instance,
flavor, extra_instance_updates)
def live_migrate_instance(self, ctxt, instance, block_migration,
disk_over_commit, host_name):
"""Live migrate an instance in its cell."""
self.msg_runner.live_migrate_instance(ctxt, instance,
block_migration,
disk_over_commit,
host_name)
def revert_resize(self, ctxt, instance):
"""Revert a resize for an instance in its cell."""
self.msg_runner.revert_resize(ctxt, instance)
def confirm_resize(self, ctxt, instance):
"""Confirm a resize for an instance in its cell."""
self.msg_runner.confirm_resize(ctxt, instance)
def reset_network(self, ctxt, instance):
"""Reset networking for an instance in its cell."""
self.msg_runner.reset_network(ctxt, instance)
def inject_network_info(self, ctxt, instance):
"""Inject networking for an instance in its cell."""
self.msg_runner.inject_network_info(ctxt, instance)
def snapshot_instance(self, ctxt, instance, image_id):
"""Snapshot an instance in its cell."""
self.msg_runner.snapshot_instance(ctxt, instance, image_id)
def backup_instance(self, ctxt, instance, image_id, backup_type, rotation):
"""Backup an instance in its cell."""
self.msg_runner.backup_instance(ctxt, instance, image_id,
backup_type, rotation)
def rebuild_instance(self, ctxt, instance, image_href, admin_password,
files_to_inject, preserve_ephemeral, kwargs):
self.msg_runner.rebuild_instance(ctxt, instance, image_href,
admin_password, files_to_inject,
preserve_ephemeral, kwargs) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api_sample_tests import api_sample_base
class AgentsJsonTest(api_sample_base.ApiSampleTestBaseV21):
def test_agent_create(self):
# Creates a new agent build.
self.api.api_post('/os-agents', {}, check_response_status=[410])
def test_agent_list(self):
# Return a list of all agent builds.
self.api.api_get('/os-agents', check_response_status=[410])
def test_agent_update(self):
# Update an existing agent build.
self.api.api_put('/os-agents/1', {}, check_response_status=[410])
def test_agent_delete(self):
# Deletes an existing agent build.
self.api.api_delete('/os-agents/1', check_response_status=[410]) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.client.webrtc
import WebRTC.*
import io.ktor.client.webrtc.media.*
import kotlinx.cinterop.ExperimentalForeignApi
import kotlinx.cinterop.ObjCSignatureOverride
import kotlinx.coroutines.suspendCancellableCoroutine
import platform.darwin.NSObject
import kotlin.coroutines.CoroutineContext
import kotlin.coroutines.resume
import kotlin.coroutines.resumeWithException
/**
* iOS-specific implementation of a WebRTC peer connection.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.IosWebRtcConnection)
*
* @param coroutineContext coroutine context used to deliver connection callbacks.
* @param config configuration describing ICE servers, media constraints, and other connection options.
* @param createConnection factory function that creates a native peer connection instance using a provided delegate.
*/
@OptIn(ExperimentalForeignApi::class)
public class IosWebRtcConnection(
coroutineContext: CoroutineContext,
config: WebRtcConnectionConfig,
createConnection: (RTCPeerConnectionDelegateProtocol) -> RTCPeerConnection?
) : WebRtcPeerConnection(coroutineContext, config) {
internal val peerConnection: RTCPeerConnection
init {
peerConnection = createConnection(createDelegate()) ?: error("Failed to create peer connection.")
}
override suspend fun getStatistics(): List<WebRtc.Stats> = suspendCancellableCoroutine { cont ->
peerConnection.statisticsWithCompletionHandler { stats ->
cont.resume(stats?.toKtor() ?: emptyList())
}
}
private fun createDelegate() = object : RTCPeerConnectionDelegateProtocol, NSObject() {
override fun peerConnection(
peerConnection: RTCPeerConnection,
didChangeConnectionState: RTCPeerConnectionState
) = runInConnectionScope {
events.emitConnectionStateChange(didChangeConnectionState.toKtor())
}
override fun peerConnection(
peerConnection: RTCPeerConnection,
didChangeIceConnectionState: RTCIceConnectionState
) = runInConnectionScope {
events.emitIceConnectionStateChange(didChangeIceConnectionState.toKtor())
}
override fun peerConnection(
peerConnection: RTCPeerConnection,
didChangeIceGatheringState: RTCIceGatheringState
) = runInConnectionScope {
events.emitIceGatheringStateChange(didChangeIceGatheringState.toKtor())
}
override fun peerConnection(
peerConnection: RTCPeerConnection,
didGenerateIceCandidate: RTCIceCandidate
) = runInConnectionScope {
events.emitIceCandidate(didGenerateIceCandidate.toKtor())
}
override fun peerConnection(
peerConnection: RTCPeerConnection,
didChangeSignalingState: RTCSignalingState
) = runInConnectionScope {
events.emitSignalingStateChange(didChangeSignalingState.toKtor())
}
override fun peerConnectionShouldNegotiate(peerConnection: RTCPeerConnection) = runInConnectionScope {
events.emitNegotiationNeeded()
}
override fun peerConnection(
peerConnection: RTCPeerConnection,
didOpenDataChannel: RTCDataChannel
) = runInConnectionScope {
val channel = IosWebRtcDataChannel(
nativeChannel = didOpenDataChannel,
coroutineScope = coroutineScope,
receiveOptions = DataChannelReceiveOptions()
)
channel.setupEvents(events)
events.emitDataChannelEvent(event = DataChannelEvent.Open(channel))
}
override fun peerConnection(peerConnection: RTCPeerConnection, didRemoveIceCandidates: List<*>) {}
@ObjCSignatureOverride
override fun peerConnection(peerConnection: RTCPeerConnection, didAddStream: RTCMediaStream) {
}
@ObjCSignatureOverride
override fun peerConnection(peerConnection: RTCPeerConnection, didRemoveStream: RTCMediaStream) {
}
override fun peerConnection(
peerConnection: RTCPeerConnection,
didAddReceiver: RTCRtpReceiver,
streams: List<*>
) = runInConnectionScope {
val nativeTrack = didAddReceiver.track ?: return@runInConnectionScope
events.emitAddTrack(track = IosMediaTrack.from(nativeTrack))
}
override fun peerConnection(
peerConnection: RTCPeerConnection,
didRemoveReceiver: RTCRtpReceiver
) = runInConnectionScope {
val nativeTrack = didRemoveReceiver.track ?: return@runInConnectionScope
events.emitRemoveTrack(track = IosMediaTrack.from(nativeTrack))
}
}
override val localDescription: WebRtc.SessionDescription?
get() = peerConnection.localDescription?.toKtor()
override val remoteDescription: WebRtc.SessionDescription?
get() = peerConnection.remoteDescription?.toKtor()
private fun hasAudio() = peerConnection.senders.any { (it as RTCRtpSender).track?.kind == "audio" }
private fun hasVideo() = peerConnection.senders.any { (it as RTCRtpSender).track?.kind == "video" }
private fun sdpConstraints(): RTCMediaConstraints {
return RTCMediaConstraints(
mandatoryConstraints = mutableMapOf<Any?, String>().apply {
if (hasAudio()) set("OfferToReceiveAudio", "true")
if (hasVideo()) set("OfferToReceiveVideo", "true")
},
optionalConstraints = null
)
}
override suspend fun createOffer(): WebRtc.SessionDescription {
val offer = suspendCancellableCoroutine { cont ->
peerConnection.offerForConstraints(
constraints = sdpConstraints(),
completionHandler = cont.toSdpCreateHandler()
)
}
return offer.toKtor()
}
override suspend fun createAnswer(): WebRtc.SessionDescription {
val answer = suspendCancellableCoroutine { cont ->
peerConnection.answerForConstraints(
constraints = sdpConstraints(),
completionHandler = cont.toSdpCreateHandler()
)
}
return answer.toKtor()
}
override suspend fun createDataChannel(
label: String,
options: WebRtcDataChannelOptions.() -> Unit
): WebRtcDataChannel {
val options = WebRtcDataChannelOptions().apply(options)
val configuration = RTCDataChannelConfiguration().apply {
if (options.id != null) {
channelId = options.id!!
}
if (options.maxRetransmits != null) {
maxRetransmits = options.maxRetransmits!!
}
if (options.maxPacketLifeTime != null) {
maxRetransmitTimeMs = options.maxPacketLifeTime!!.inWholeMilliseconds
}
isOrdered = options.ordered
protocol = options.protocol
isNegotiated = options.negotiated
}
val nativeChannel = requireNotNull(peerConnection.dataChannelForLabel(label, configuration)) {
"Failed to create data channel with label: $label"
}
val receiveOptions = DataChannelReceiveOptions().apply(options.receiveOptions)
return IosWebRtcDataChannel(nativeChannel, coroutineScope, receiveOptions).apply {
setupEvents(events)
}
}
override suspend fun setLocalDescription(description: WebRtc.SessionDescription): Unit =
suspendCancellableCoroutine { cont ->
peerConnection.setLocalDescription(sdp = description.toIos(), completionHandler = cont.toSdpSetHandler())
}
override suspend fun setRemoteDescription(description: WebRtc.SessionDescription): Unit =
suspendCancellableCoroutine { cont ->
peerConnection.setRemoteDescription(sdp = description.toIos(), completionHandler = cont.toSdpSetHandler())
}
override suspend fun addIceCandidate(candidate: WebRtc.IceCandidate): Unit = suspendCancellableCoroutine { cont ->
peerConnection.addIceCandidate(candidate.toIos()) { error ->
when {
error == null -> cont.resume(Unit)
else -> cont.resumeWithException(WebRtc.IceException(error.toString()))
}
}
}
override suspend fun addTrack(track: WebRtcMedia.Track): WebRtc.RtpSender {
val mediaTrack = (track as IosMediaTrack).nativeTrack
val nativeSender = peerConnection.addTrack(mediaTrack, streamIds = listOf<String>())
?: error("Failed to add track.")
return IosRtpSender(nativeSender)
}
override suspend fun removeTrack(track: WebRtcMedia.Track) {
val track = track as IosMediaTrack
val sender = peerConnection.senders.firstOrNull { (it as RTCRtpSender).track?.trackId == track.id }
?: error("Failed to find sender for the track.")
peerConnection.removeTrack(sender as RTCRtpSender)
}
override suspend fun removeTrack(sender: WebRtc.RtpSender) {
val rtpSender = sender as IosRtpSender
if (!peerConnection.removeTrack(rtpSender.nativeSender)) {
error("Failed to remove track.")
}
}
override fun restartIce() {
peerConnection.restartIce()
}
override fun close() {
super.close()
peerConnection.close()
}
}
/**
* Returns implementation of the peer connection that is used under the hood. Use it with caution.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.getNative)
*/
@OptIn(ExperimentalForeignApi::class)
public fun WebRtcPeerConnection.getNative(): RTCPeerConnection {
return (this as IosWebRtcConnection).peerConnection
} | kotlin | github | https://github.com/ktorio/ktor | ktor-client/ktor-client-webrtc/ios/src/io/ktor/client/webrtc/PeerConnection.kt |
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import { HybridComplete } from './hybrid';
import { CachedPrometheusClient, HTTPPrometheusClient, PrometheusClient, PrometheusConfig } from '../client/prometheus';
import { CompletionContext, CompletionResult } from '@codemirror/autocomplete';
// Complete is the interface that defines the simple method that returns a CompletionResult.
// Every different completion mode must implement this interface.
export interface CompleteStrategy {
promQL(context: CompletionContext): Promise<CompletionResult | null> | CompletionResult | null;
destroy?(): void;
}
// CompleteConfiguration should be used to customize the autocompletion.
export interface CompleteConfiguration {
remote?: PrometheusConfig | PrometheusClient;
// maxMetricsMetadata is the maximum number of metrics in Prometheus for which metadata is fetched.
// If the number of metrics exceeds this limit, no metric metadata is fetched at all.
maxMetricsMetadata?: number;
// When providing this custom CompleteStrategy, the settings above will not be used.
completeStrategy?: CompleteStrategy;
}
export function isPrometheusClient(remoteConfig: PrometheusConfig | PrometheusClient): remoteConfig is PrometheusClient {
const client = remoteConfig as PrometheusClient;
return (
typeof client.labelNames === 'function' &&
typeof client.labelValues === 'function' &&
typeof client.metricMetadata === 'function' &&
typeof client.series === 'function' &&
typeof client.metricNames === 'function'
);
}
export function newCompleteStrategy(conf?: CompleteConfiguration): CompleteStrategy {
if (conf?.completeStrategy) {
return conf.completeStrategy;
}
if (conf?.remote) {
if (isPrometheusClient(conf.remote)) {
return new HybridComplete(conf.remote, conf.maxMetricsMetadata);
}
return new HybridComplete(new CachedPrometheusClient(new HTTPPrometheusClient(conf.remote), conf.remote.cache), conf.maxMetricsMetadata);
}
return new HybridComplete();
} | typescript | github | https://github.com/prometheus/prometheus | web/ui/module/codemirror-promql/src/complete/index.ts |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
def find_in_path(name, path):
"Find a file in a search path"
# Adapted fom
# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
# Test if cuda could be foun
try:
CUDA = locate_cuda()
except EnvironmentError:
CUDA = None
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"bbox",
["bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs=[numpy_include]
),
Extension(
"cpu_nms",
["cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
]
if CUDA is not None:
ext_modules.append(
Extension('gpu_nms',
['nms_kernel.cu', 'gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
)
)
else:
print('Skipping GPU_NMS')
setup(
name='frcnn_cython',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
) | unknown | codeparrot/codeparrot-clean | ||
---
title: Overview
---
Svelte 5 introduced some significant changes to Svelte's API, including [runes](what-are-runes), [snippets](snippet) and event attributes. As a result, some Svelte 3/4 features are deprecated (though supported for now, unless otherwise specified) and will eventually be removed. We recommend that you incrementally [migrate your existing code](v5-migration-guide).
The following pages document these features for
- people still using Svelte 3/4
- people using Svelte 5, but with components that haven't yet been migrated
Since Svelte 3/4 syntax still works in Svelte 5, we will distinguish between _legacy mode_ and _runes mode_. Once a component is in runes mode (which you can opt into by using runes, or by explicitly setting the `runes: true` compiler option), legacy mode features are no longer available.
If you're exclusively interested in the Svelte 3/4 syntax, you can browse its documentation at [v4.svelte.dev](https://v4.svelte.dev). | unknown | github | https://github.com/sveltejs/svelte | documentation/docs/99-legacy/00-legacy-overview.md |
# frozen_string_literal: true
module ActiveRecord
module Encryption
# An encryptor that can encrypt data but can't decrypt it.
class EncryptingOnlyEncryptor < Encryptor
def decrypt(encrypted_text, key_provider: nil, cipher_options: {})
encrypted_text
end
end
end
end | ruby | github | https://github.com/rails/rails | activerecord/lib/active_record/encryption/encrypting_only_encryptor.rb |
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mvcc
import (
"bytes"
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"math"
mrand "math/rand"
"reflect"
"sort"
"strconv"
"sync"
"testing"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/pkg/v3/schedule"
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/storage/schema"
)
func TestStoreRev(t *testing.T) {
b, _ := betesting.NewDefaultTmpBackend(t)
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer s.Close()
for i := 1; i <= 3; i++ {
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
if r := s.Rev(); r != int64(i+1) {
t.Errorf("#%d: rev = %d, want %d", i, r, i+1)
}
}
}
func TestStorePut(t *testing.T) {
lg := zaptest.NewLogger(t)
kv := mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
CreateRevision: 1,
ModRevision: 2,
Version: 1,
}
kvb, err := kv.Marshal()
if err != nil {
t.Fatal(err)
}
tests := []struct {
rev Revision
r indexGetResp
rr *rangeResp
wrev Revision
wkey []byte
wkv mvccpb.KeyValue
wputrev Revision
}{
{
Revision{Main: 1},
indexGetResp{Revision{}, Revision{}, 0, ErrRevisionNotFound},
nil,
Revision{Main: 2},
newTestRevBytes(Revision{Main: 2}),
mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
CreateRevision: 2,
ModRevision: 2,
Version: 1,
Lease: 1,
},
Revision{Main: 2},
},
{
Revision{Main: 1, Sub: 1},
indexGetResp{Revision{Main: 2}, Revision{Main: 2}, 1, nil},
&rangeResp{[][]byte{newTestRevBytes(Revision{Main: 2, Sub: 1})}, [][]byte{kvb}},
Revision{Main: 2},
newTestRevBytes(Revision{Main: 2}),
mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
CreateRevision: 2,
ModRevision: 2,
Version: 2,
Lease: 2,
},
Revision{Main: 2},
},
{
Revision{Main: 2},
indexGetResp{Revision{Main: 2, Sub: 1}, Revision{Main: 2}, 2, nil},
&rangeResp{[][]byte{newTestRevBytes(Revision{Main: 2, Sub: 1})}, [][]byte{kvb}},
Revision{Main: 3},
newTestRevBytes(Revision{Main: 3}),
mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
CreateRevision: 2,
ModRevision: 3,
Version: 3,
Lease: 3,
},
Revision{Main: 3},
},
}
for i, tt := range tests {
s := newFakeStore(lg)
b := s.b.(*fakeBackend)
fi := s.kvindex.(*fakeIndex)
s.currentRev = tt.rev.Main
fi.indexGetRespc <- tt.r
if tt.rr != nil {
b.tx.rangeRespc <- *tt.rr
}
s.Put([]byte("foo"), []byte("bar"), lease.LeaseID(i+1))
data, err := tt.wkv.Marshal()
if err != nil {
t.Errorf("#%d: marshal err = %v, want nil", i, err)
}
wact := []testutil.Action{
{Name: "seqput", Params: []any{schema.Key, tt.wkey, data}},
}
if tt.rr != nil {
wact = []testutil.Action{
{Name: "seqput", Params: []any{schema.Key, tt.wkey, data}},
}
}
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
}
wact = []testutil.Action{
{Name: "get", Params: []any{[]byte("foo"), tt.wputrev.Main}},
{Name: "put", Params: []any{[]byte("foo"), tt.wputrev}},
}
if g := fi.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("#%d: index action = %+v, want %+v", i, g, wact)
}
if s.currentRev != tt.wrev.Main {
t.Errorf("#%d: rev = %+v, want %+v", i, s.currentRev, tt.wrev)
}
s.Close()
}
}
func TestStoreRange(t *testing.T) {
lg := zaptest.NewLogger(t)
key := newTestRevBytes(Revision{Main: 2})
kv := mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
CreateRevision: 1,
ModRevision: 2,
Version: 1,
}
kvb, err := kv.Marshal()
if err != nil {
t.Fatal(err)
}
wrev := int64(2)
tests := []struct {
idxr indexRangeResp
r rangeResp
}{
{
indexRangeResp{[][]byte{[]byte("foo")}, []Revision{{Main: 2}}},
rangeResp{[][]byte{key}, [][]byte{kvb}},
},
{
indexRangeResp{[][]byte{[]byte("foo"), []byte("foo1")}, []Revision{{Main: 2}, {Main: 3}}},
rangeResp{[][]byte{key}, [][]byte{kvb}},
},
}
ro := RangeOptions{Limit: 1, Rev: 0, Count: false}
for i, tt := range tests {
s := newFakeStore(lg)
b := s.b.(*fakeBackend)
fi := s.kvindex.(*fakeIndex)
s.currentRev = 2
b.tx.rangeRespc <- tt.r
fi.indexRangeRespc <- tt.idxr
ret, err := s.Range(t.Context(), []byte("foo"), []byte("goo"), ro)
if err != nil {
t.Errorf("#%d: err = %v, want nil", i, err)
}
if w := []mvccpb.KeyValue{kv}; !reflect.DeepEqual(ret.KVs, w) {
t.Errorf("#%d: kvs = %+v, want %+v", i, ret.KVs, w)
}
if ret.Rev != wrev {
t.Errorf("#%d: rev = %d, want %d", i, ret.Rev, wrev)
}
wstart := NewRevBytes()
wstart = RevToBytes(tt.idxr.revs[0], wstart)
wact := []testutil.Action{
{Name: "range", Params: []any{schema.Key, wstart, []byte(nil), int64(0)}},
}
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
}
wact = []testutil.Action{
{Name: "range", Params: []any{[]byte("foo"), []byte("goo"), wrev}},
}
if g := fi.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("#%d: index action = %+v, want %+v", i, g, wact)
}
if s.currentRev != 2 {
t.Errorf("#%d: current rev = %+v, want %+v", i, s.currentRev, 2)
}
s.Close()
}
}
func TestStoreDeleteRange(t *testing.T) {
lg := zaptest.NewLogger(t)
key := newTestRevBytes(Revision{Main: 2})
kv := mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
CreateRevision: 1,
ModRevision: 2,
Version: 1,
}
kvb, err := kv.Marshal()
if err != nil {
t.Fatal(err)
}
tests := []struct {
rev Revision
r indexRangeResp
rr rangeResp
wkey []byte
wrev Revision
wrrev int64
wdelrev Revision
}{
{
Revision{Main: 2},
indexRangeResp{[][]byte{[]byte("foo")}, []Revision{{Main: 2}}},
rangeResp{[][]byte{key}, [][]byte{kvb}},
newTestBucketKeyBytes(newBucketKey(3, 0, true)),
Revision{Main: 3},
2,
Revision{Main: 3},
},
}
for i, tt := range tests {
s := newFakeStore(lg)
b := s.b.(*fakeBackend)
fi := s.kvindex.(*fakeIndex)
s.currentRev = tt.rev.Main
fi.indexRangeRespc <- tt.r
b.tx.rangeRespc <- tt.rr
n, _ := s.DeleteRange([]byte("foo"), []byte("goo"))
if n != 1 {
t.Errorf("#%d: n = %d, want 1", i, n)
}
data, err := (&mvccpb.KeyValue{
Key: []byte("foo"),
}).Marshal()
if err != nil {
t.Errorf("#%d: marshal err = %v, want nil", i, err)
}
wact := []testutil.Action{
{Name: "seqput", Params: []any{schema.Key, tt.wkey, data}},
}
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
}
wact = []testutil.Action{
{Name: "range", Params: []any{[]byte("foo"), []byte("goo"), tt.wrrev}},
{Name: "tombstone", Params: []any{[]byte("foo"), tt.wdelrev}},
}
if g := fi.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("#%d: index action = %+v, want %+v", i, g, wact)
}
if s.currentRev != tt.wrev.Main {
t.Errorf("#%d: rev = %+v, want %+v", i, s.currentRev, tt.wrev)
}
s.Close()
}
}
func TestStoreCompact(t *testing.T) {
lg := zaptest.NewLogger(t)
s := newFakeStore(lg)
defer s.Close()
b := s.b.(*fakeBackend)
fi := s.kvindex.(*fakeIndex)
s.currentRev = 3
fi.indexCompactRespc <- map[Revision]struct{}{{Main: 1}: {}}
key1 := newTestRevBytes(Revision{Main: 1})
key2 := newTestRevBytes(Revision{Main: 2})
b.tx.rangeRespc <- rangeResp{[][]byte{}, [][]byte{}}
b.tx.rangeRespc <- rangeResp{[][]byte{}, [][]byte{}}
b.tx.rangeRespc <- rangeResp{[][]byte{key1, key2}, [][]byte{[]byte("alice"), []byte("bob")}}
s.Compact(traceutil.TODO(), 3)
s.fifoSched.WaitFinish(1)
if s.compactMainRev != 3 {
t.Errorf("compact main rev = %d, want 3", s.compactMainRev)
}
end := make([]byte, 8)
binary.BigEndian.PutUint64(end, uint64(4))
wact := []testutil.Action{
{Name: "range", Params: []any{schema.Meta, schema.ScheduledCompactKeyName, []uint8(nil), int64(0)}},
{Name: "range", Params: []any{schema.Meta, schema.FinishedCompactKeyName, []uint8(nil), int64(0)}},
{Name: "put", Params: []any{schema.Meta, schema.ScheduledCompactKeyName, newTestRevBytes(Revision{Main: 3})}},
{Name: "range", Params: []any{schema.Key, make([]byte, 17), end, int64(10000)}},
{Name: "delete", Params: []any{schema.Key, key2}},
{Name: "put", Params: []any{schema.Meta, schema.FinishedCompactKeyName, newTestRevBytes(Revision{Main: 3})}},
}
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("tx actions = %+v, want %+v", g, wact)
}
wact = []testutil.Action{
{Name: "compact", Params: []any{int64(3)}},
}
if g := fi.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("index action = %+v, want %+v", g, wact)
}
}
func TestStoreRestore(t *testing.T) {
lg := zaptest.NewLogger(t)
s := newFakeStore(lg)
b := s.b.(*fakeBackend)
fi := s.kvindex.(*fakeIndex)
defer s.Close()
putkey := newTestRevBytes(Revision{Main: 3})
putkv := mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
CreateRevision: 4,
ModRevision: 4,
Version: 1,
}
putkvb, err := putkv.Marshal()
if err != nil {
t.Fatal(err)
}
delkey := newTestBucketKeyBytes(newBucketKey(5, 0, true))
delkv := mvccpb.KeyValue{
Key: []byte("foo"),
}
delkvb, err := delkv.Marshal()
if err != nil {
t.Fatal(err)
}
b.tx.rangeRespc <- rangeResp{[][]byte{schema.FinishedCompactKeyName}, [][]byte{newTestRevBytes(Revision{Main: 3})}}
b.tx.rangeRespc <- rangeResp{[][]byte{schema.ScheduledCompactKeyName}, [][]byte{newTestRevBytes(Revision{Main: 3})}}
b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}}
b.tx.rangeRespc <- rangeResp{nil, nil}
s.restore()
if s.compactMainRev != 3 {
t.Errorf("compact rev = %d, want 3", s.compactMainRev)
}
if s.currentRev != 5 {
t.Errorf("current rev = %v, want 5", s.currentRev)
}
wact := []testutil.Action{
{Name: "range", Params: []any{schema.Meta, schema.FinishedCompactKeyName, []byte(nil), int64(0)}},
{Name: "range", Params: []any{schema.Meta, schema.ScheduledCompactKeyName, []byte(nil), int64(0)}},
{Name: "range", Params: []any{schema.Key, newTestRevBytes(Revision{Main: 1}), newTestRevBytes(Revision{Main: math.MaxInt64, Sub: math.MaxInt64}), int64(restoreChunkKeys)}},
}
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("tx actions = %+v, want %+v", g, wact)
}
gens := []generation{
{created: Revision{Main: 4}, ver: 2, revs: []Revision{{Main: 3}, {Main: 5}}},
{created: Revision{Main: 0}, ver: 0, revs: nil},
}
ki := &keyIndex{key: []byte("foo"), modified: Revision{Main: 5}, generations: gens}
wact = []testutil.Action{
{Name: "keyIndex", Params: []any{ki}},
{Name: "insert", Params: []any{ki}},
}
if g := fi.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("index action = %+v, want %+v", g, wact)
}
}
func TestRestoreDelete(t *testing.T) {
oldChunk := restoreChunkKeys
restoreChunkKeys = mrand.Intn(3) + 2
defer func() { restoreChunkKeys = oldChunk }()
b, _ := betesting.NewDefaultTmpBackend(t)
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer b.Close()
keys := make(map[string]struct{})
for i := 0; i < 20; i++ {
ks := fmt.Sprintf("foo-%d", i)
k := []byte(ks)
s.Put(k, []byte("bar"), lease.NoLease)
keys[ks] = struct{}{}
switch mrand.Intn(3) {
case 0:
// put random key from past via random range on map
ks = fmt.Sprintf("foo-%d", mrand.Intn(i+1))
s.Put([]byte(ks), []byte("baz"), lease.NoLease)
keys[ks] = struct{}{}
case 1:
// delete random key via random range on map
for k := range keys {
s.DeleteRange([]byte(k), nil)
delete(keys, k)
break
}
}
}
s.Close()
s = NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer s.Close()
for i := 0; i < 20; i++ {
ks := fmt.Sprintf("foo-%d", i)
r, err := s.Range(t.Context(), []byte(ks), nil, RangeOptions{})
if err != nil {
t.Fatal(err)
}
if _, ok := keys[ks]; ok {
if len(r.KVs) == 0 {
t.Errorf("#%d: expected %q, got deleted", i, ks)
}
} else if len(r.KVs) != 0 {
t.Errorf("#%d: expected deleted, got %q", i, ks)
}
}
}
func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
tests := []string{"recreate", "restore"}
for _, test := range tests {
test := test
t.Run(test, func(t *testing.T) {
b, _ := betesting.NewDefaultTmpBackend(t)
s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
s0.Put([]byte("foo"), []byte("bar2"), lease.NoLease)
// write scheduled compaction, but not do compaction
tx := s0.b.BatchTx()
tx.Lock()
UnsafeSetScheduledCompact(tx, 2)
tx.Unlock()
var s *store
switch test {
case "recreate":
s0.Close()
s = NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
case "restore":
// TODO(fuweid): store doesn't support to restore
// from a closed status because there is no lock
// for `Close` or action to mark it is closed.
s0.Restore(b)
s = s0
}
defer cleanup(s, b)
// wait for scheduled compaction to be finished
time.Sleep(100 * time.Millisecond)
if _, err := s.Range(t.Context(), []byte("foo"), nil, RangeOptions{Rev: 1}); !errors.Is(err, ErrCompacted) {
t.Errorf("range on compacted rev error = %v, want %v", err, ErrCompacted)
}
// check the key in backend is deleted
revbytes := NewRevBytes()
revbytes = BucketKeyToBytes(newBucketKey(1, 0, false), revbytes)
// The disk compaction is done asynchronously and requires more time on slow disk.
// try 5 times for CI with slow IO.
for i := 0; i < 5; i++ {
tx := s.b.BatchTx()
tx.Lock()
ks, _ := tx.UnsafeRange(schema.Key, revbytes, nil, 0)
tx.Unlock()
if len(ks) != 0 {
time.Sleep(100 * time.Millisecond)
continue
}
return
}
t.Errorf("key for rev %+v still exists, want deleted", BytesToBucketKey(revbytes))
})
}
}
type hashKVResult struct {
hash uint32
compactRev int64
}
// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting.
func TestHashKVWhenCompacting(t *testing.T) {
b, _ := betesting.NewDefaultTmpBackend(t)
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b)
rev := 10000
for i := 2; i <= rev; i++ {
s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease)
}
hashCompactc := make(chan hashKVResult, 1)
var wg sync.WaitGroup
donec := make(chan struct{})
stopc := make(chan struct{})
// Call HashByRev(10000) in multiple goroutines until donec is closed
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
hash, _, err := s.HashStorage().HashByRev(int64(rev))
if err != nil {
t.Error(err)
}
select {
case <-stopc:
return
case <-donec:
return
case hashCompactc <- hashKVResult{hash.Hash, hash.CompactRevision}:
}
}
}()
}
// Check computed hashes by HashByRev are correct in a goroutine, until donec is closed
wg.Add(1)
go func() {
defer wg.Done()
revHash := make(map[int64]uint32)
for {
select {
case r := <-hashCompactc:
if revHash[r.compactRev] == 0 {
revHash[r.compactRev] = r.hash
}
if r.hash != revHash[r.compactRev] {
t.Errorf("Hashes differ (current %v) != (saved %v)", r.hash, revHash[r.compactRev])
}
case <-stopc:
return
case <-donec:
return
}
}
}()
// Compact the store in a goroutine, using RevisionTombstone 9900 to 10000 and close donec when finished
wg.Add(1)
go func() {
defer func() {
close(donec)
wg.Done()
}()
for i := 100; i >= 0; i-- {
select {
case <-stopc:
return
default:
}
_, err := s.Compact(traceutil.TODO(), int64(rev-i))
if err != nil {
t.Error(err)
}
// Wait for the compaction job to finish
s.fifoSched.WaitFinish(1)
// Leave time for calls to HashByRev to take place after each compaction
time.Sleep(10 * time.Millisecond)
}
}()
select {
case <-donec:
case <-time.After(20 * time.Second):
close(stopc)
wg.Wait()
testutil.FatalStack(t, "timeout")
}
close(stopc)
wg.Wait()
}
// TestHashKVWithCompactedAndFutureRevisions ensures that HashKV returns a correct hash when called
// with a past RevisionTombstone (lower than compacted), a future RevisionTombstone, and the exact compacted RevisionTombstone
func TestHashKVWithCompactedAndFutureRevisions(t *testing.T) {
b, _ := betesting.NewDefaultTmpBackend(t)
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b)
rev := 10000
compactRev := rev / 2
for i := 2; i <= rev; i++ {
s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease)
}
if _, err := s.Compact(traceutil.TODO(), int64(compactRev)); err != nil {
t.Fatal(err)
}
_, _, errFutureRev := s.HashStorage().HashByRev(int64(rev + 1))
if !errors.Is(errFutureRev, ErrFutureRev) {
t.Error(errFutureRev)
}
_, _, errPastRev := s.HashStorage().HashByRev(int64(compactRev - 1))
if !errors.Is(errPastRev, ErrCompacted) {
t.Error(errPastRev)
}
_, _, errCompactRev := s.HashStorage().HashByRev(int64(compactRev))
if errCompactRev != nil {
t.Error(errCompactRev)
}
}
// TestHashKVZeroRevision ensures that "HashByRev(0)" computes
// correct hash value with latest RevisionTombstone.
func TestHashKVZeroRevision(t *testing.T) {
b, _ := betesting.NewDefaultTmpBackend(t)
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b)
rev := 10000
for i := 2; i <= rev; i++ {
s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease)
}
if _, err := s.Compact(traceutil.TODO(), int64(rev/2)); err != nil {
t.Fatal(err)
}
hash1, _, err := s.HashStorage().HashByRev(int64(rev))
if err != nil {
t.Fatal(err)
}
var hash2 KeyValueHash
hash2, _, err = s.HashStorage().HashByRev(0)
if err != nil {
t.Fatal(err)
}
if hash1 != hash2 {
t.Errorf("hash %d (rev %d) != hash %d (rev 0)", hash1, rev, hash2)
}
}
func TestTxnPut(t *testing.T) {
// assign arbitrary size
bytesN := 30
sliceN := 100
keys := createBytesSlice(bytesN, sliceN)
vals := createBytesSlice(bytesN, sliceN)
b, _ := betesting.NewDefaultTmpBackend(t)
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b)
for i := 0; i < sliceN; i++ {
txn := s.Write(traceutil.TODO())
base := int64(i + 2)
if rev := txn.Put(keys[i], vals[i], lease.NoLease); rev != base {
t.Errorf("#%d: rev = %d, want %d", i, rev, base)
}
txn.End()
}
}
// TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation
func TestConcurrentReadNotBlockingWrite(t *testing.T) {
b, _ := betesting.NewDefaultTmpBackend(t)
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b)
// write something to read later
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
// readTx simulates a long read request
readTx1 := s.Read(ConcurrentReadTxMode, traceutil.TODO())
// write should not be blocked by reads
done := make(chan struct{}, 1)
go func() {
s.Put([]byte("foo"), []byte("newBar"), lease.NoLease) // this is a write Txn
done <- struct{}{}
}()
select {
case <-done:
case <-time.After(1 * time.Second):
t.Fatalf("write should not be blocked by read")
}
// readTx2 simulates a short read request
readTx2 := s.Read(ConcurrentReadTxMode, traceutil.TODO())
ro := RangeOptions{Limit: 1, Rev: 0, Count: false}
ret, err := readTx2.Range(t.Context(), []byte("foo"), nil, ro)
if err != nil {
t.Fatalf("failed to range: %v", err)
}
// readTx2 should see the result of new write
w := mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("newBar"),
CreateRevision: 2,
ModRevision: 3,
Version: 2,
}
if !reflect.DeepEqual(ret.KVs[0], w) {
t.Fatalf("range result = %+v, want = %+v", ret.KVs[0], w)
}
readTx2.End()
ret, err = readTx1.Range(t.Context(), []byte("foo"), nil, ro)
if err != nil {
t.Fatalf("failed to range: %v", err)
}
// readTx1 should not see the result of new write
w = mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
CreateRevision: 2,
ModRevision: 2,
Version: 1,
}
if !reflect.DeepEqual(ret.KVs[0], w) {
t.Fatalf("range result = %+v, want = %+v", ret.KVs[0], w)
}
readTx1.End()
}
// TestConcurrentReadTxAndWrite creates random concurrent Reads and Writes, and ensures Reads always see latest Writes
func TestConcurrentReadTxAndWrite(t *testing.T) {
var (
numOfReads = 100
numOfWrites = 100
maxNumOfPutsPerWrite = 10
committedKVs kvs // committedKVs records the key-value pairs written by the finished Write Txns
mu sync.Mutex // mu protects committedKVs
)
b, _ := betesting.NewDefaultTmpBackend(t)
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b)
var wg sync.WaitGroup
wg.Add(numOfWrites)
for i := 0; i < numOfWrites; i++ {
go func() {
defer wg.Done()
time.Sleep(time.Duration(mrand.Intn(100)) * time.Millisecond) // random starting time
tx := s.Write(traceutil.TODO())
numOfPuts := mrand.Intn(maxNumOfPutsPerWrite) + 1
var pendingKvs kvs
for j := 0; j < numOfPuts; j++ {
k := []byte(strconv.Itoa(mrand.Int()))
v := []byte(strconv.Itoa(mrand.Int()))
tx.Put(k, v, lease.NoLease)
pendingKvs = append(pendingKvs, kv{k, v})
}
// reads should not see above Puts until write is finished
mu.Lock()
committedKVs = merge(committedKVs, pendingKvs) // update shared data structure
tx.End()
mu.Unlock()
}()
}
wg.Add(numOfReads)
for i := 0; i < numOfReads; i++ {
go func() {
defer wg.Done()
time.Sleep(time.Duration(mrand.Intn(100)) * time.Millisecond) // random starting time
mu.Lock()
wKVs := make(kvs, len(committedKVs))
copy(wKVs, committedKVs)
tx := s.Read(ConcurrentReadTxMode, traceutil.TODO())
mu.Unlock()
// get all keys in backend store, and compare with wKVs
ret, err := tx.Range(t.Context(), []byte("\x00000000"), []byte("\xffffffff"), RangeOptions{})
tx.End()
if err != nil {
t.Errorf("failed to range keys: %v", err)
return
}
if len(wKVs) == 0 && len(ret.KVs) == 0 { // no committed KVs yet
return
}
var result kvs
for _, keyValue := range ret.KVs {
result = append(result, kv{keyValue.Key, keyValue.Value})
}
if !reflect.DeepEqual(wKVs, result) {
t.Errorf("unexpected range result") // too many key value pairs, skip printing them
}
}()
}
// wait until goroutines finish or timeout
doneC := make(chan struct{})
go func() {
wg.Wait()
close(doneC)
}()
select {
case <-doneC:
case <-time.After(5 * time.Minute):
testutil.FatalStack(t, "timeout")
}
}
type kv struct {
key []byte
val []byte
}
type kvs []kv
func (kvs kvs) Len() int { return len(kvs) }
func (kvs kvs) Less(i, j int) bool { return bytes.Compare(kvs[i].key, kvs[j].key) < 0 }
func (kvs kvs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] }
func merge(dst, src kvs) kvs {
dst = append(dst, src...)
sort.Stable(dst)
// remove duplicates, using only the newest value
// ref: tx_buffer.go
widx := 0
for ridx := 1; ridx < len(dst); ridx++ {
if !bytes.Equal(dst[widx].key, dst[ridx].key) {
widx++
}
dst[widx] = dst[ridx]
}
return dst[:widx+1]
}
// TODO: test attach key to lessor
func newTestRevBytes(rev Revision) []byte {
bytes := NewRevBytes()
return RevToBytes(rev, bytes)
}
func newTestBucketKeyBytes(rev BucketKey) []byte {
bytes := NewRevBytes()
return BucketKeyToBytes(rev, bytes)
}
func newFakeStore(lg *zap.Logger) *store {
b := &fakeBackend{&fakeBatchTx{
Recorder: &testutil.RecorderBuffered{},
rangeRespc: make(chan rangeResp, 5),
}}
s := &store{
cfg: StoreConfig{
CompactionBatchLimit: 10000,
CompactionSleepInterval: defaultCompactionSleepInterval,
},
b: b,
le: &lease.FakeLessor{},
kvindex: newFakeIndex(),
currentRev: 0,
compactMainRev: -1,
fifoSched: schedule.NewFIFOScheduler(lg),
stopc: make(chan struct{}),
lg: lg,
}
s.ReadView, s.WriteView = &readView{s}, &writeView{s}
s.hashes = NewHashStorage(lg, s)
return s
}
func newFakeIndex() *fakeIndex {
return &fakeIndex{
Recorder: &testutil.RecorderBuffered{},
indexGetRespc: make(chan indexGetResp, 1),
indexRangeRespc: make(chan indexRangeResp, 1),
indexRangeEventsRespc: make(chan indexRangeEventsResp, 1),
indexCompactRespc: make(chan map[Revision]struct{}, 1),
}
}
type rangeResp struct {
keys [][]byte
vals [][]byte
}
type fakeBatchTx struct {
testutil.Recorder
rangeRespc chan rangeResp
}
func (b *fakeBatchTx) LockInsideApply() {}
func (b *fakeBatchTx) LockOutsideApply() {}
func (b *fakeBatchTx) Lock() {}
func (b *fakeBatchTx) Unlock() {}
func (b *fakeBatchTx) RLock() {}
func (b *fakeBatchTx) RUnlock() {}
func (b *fakeBatchTx) UnsafeCreateBucket(bucket backend.Bucket) {}
func (b *fakeBatchTx) UnsafeDeleteBucket(bucket backend.Bucket) {}
func (b *fakeBatchTx) UnsafePut(bucket backend.Bucket, key []byte, value []byte) {
b.Recorder.Record(testutil.Action{Name: "put", Params: []any{bucket, key, value}})
}
func (b *fakeBatchTx) UnsafeSeqPut(bucket backend.Bucket, key []byte, value []byte) {
b.Recorder.Record(testutil.Action{Name: "seqput", Params: []any{bucket, key, value}})
}
func (b *fakeBatchTx) UnsafeRange(bucket backend.Bucket, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
b.Recorder.Record(testutil.Action{Name: "range", Params: []any{bucket, key, endKey, limit}})
r := <-b.rangeRespc
return r.keys, r.vals
}
func (b *fakeBatchTx) UnsafeDelete(bucket backend.Bucket, key []byte) {
b.Recorder.Record(testutil.Action{Name: "delete", Params: []any{bucket, key}})
}
func (b *fakeBatchTx) UnsafeForEach(bucket backend.Bucket, visitor func(k, v []byte) error) error {
return nil
}
func (b *fakeBatchTx) Commit() {}
func (b *fakeBatchTx) CommitAndStop() {}
type fakeBackend struct {
tx *fakeBatchTx
}
func (b *fakeBackend) BatchTx() backend.BatchTx { return b.tx }
func (b *fakeBackend) ReadTx() backend.ReadTx { return b.tx }
func (b *fakeBackend) ConcurrentReadTx() backend.ReadTx { return b.tx }
func (b *fakeBackend) Hash(func(bucketName, keyName []byte) bool) (uint32, error) { return 0, nil }
func (b *fakeBackend) Size() int64 { return 0 }
func (b *fakeBackend) SizeInUse() int64 { return 0 }
func (b *fakeBackend) OpenReadTxN() int64 { return 0 }
func (b *fakeBackend) Snapshot() backend.Snapshot { return nil }
func (b *fakeBackend) ForceCommit() {}
func (b *fakeBackend) Defrag() error { return nil }
func (b *fakeBackend) Close() error { return nil }
func (b *fakeBackend) SetTxPostLockInsideApplyHook(func()) {}
type indexGetResp struct {
rev Revision
created Revision
ver int64
err error
}
type indexRangeResp struct {
keys [][]byte
revs []Revision
}
type indexRangeEventsResp struct {
revs []Revision
}
type fakeIndex struct {
testutil.Recorder
indexGetRespc chan indexGetResp
indexRangeRespc chan indexRangeResp
indexRangeEventsRespc chan indexRangeEventsResp
indexCompactRespc chan map[Revision]struct{}
}
func (i *fakeIndex) Revisions(key, end []byte, atRev int64, limit int) ([]Revision, int) {
_, rev := i.Range(key, end, atRev)
if len(rev) >= limit {
rev = rev[:limit]
}
return rev, len(rev)
}
func (i *fakeIndex) CountRevisions(key, end []byte, atRev int64) int {
_, rev := i.Range(key, end, atRev)
return len(rev)
}
func (i *fakeIndex) Get(key []byte, atRev int64) (rev, created Revision, ver int64, err error) {
i.Recorder.Record(testutil.Action{Name: "get", Params: []any{key, atRev}})
r := <-i.indexGetRespc
return r.rev, r.created, r.ver, r.err
}
func (i *fakeIndex) Range(key, end []byte, atRev int64) ([][]byte, []Revision) {
i.Recorder.Record(testutil.Action{Name: "range", Params: []any{key, end, atRev}})
r := <-i.indexRangeRespc
return r.keys, r.revs
}
func (i *fakeIndex) Put(key []byte, rev Revision) {
i.Recorder.Record(testutil.Action{Name: "put", Params: []any{key, rev}})
}
func (i *fakeIndex) Tombstone(key []byte, rev Revision) error {
i.Recorder.Record(testutil.Action{Name: "tombstone", Params: []any{key, rev}})
return nil
}
func (i *fakeIndex) RangeSince(key, end []byte, rev int64) []Revision {
i.Recorder.Record(testutil.Action{Name: "rangeEvents", Params: []any{key, end, rev}})
r := <-i.indexRangeEventsRespc
return r.revs
}
func (i *fakeIndex) Compact(rev int64) map[Revision]struct{} {
i.Recorder.Record(testutil.Action{Name: "compact", Params: []any{rev}})
return <-i.indexCompactRespc
}
func (i *fakeIndex) Keep(rev int64) map[Revision]struct{} {
i.Recorder.Record(testutil.Action{Name: "keep", Params: []any{rev}})
return <-i.indexCompactRespc
}
func (i *fakeIndex) Equal(b index) bool { return false }
func (i *fakeIndex) Insert(ki *keyIndex) {
i.Recorder.Record(testutil.Action{Name: "insert", Params: []any{ki}})
}
func (i *fakeIndex) KeyIndex(ki *keyIndex) *keyIndex {
i.Recorder.Record(testutil.Action{Name: "keyIndex", Params: []any{ki}})
return nil
}
func createBytesSlice(bytesN, sliceN int) [][]byte {
var rs [][]byte
for len(rs) != sliceN {
v := make([]byte, bytesN)
if _, err := rand.Read(v); err != nil {
panic(err)
}
rs = append(rs, v)
}
return rs
} | go | github | https://github.com/etcd-io/etcd | server/storage/mvcc/kvstore_test.go |
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_zplsc_c_dcl
@file mi-dataset/mi/dataset/parser/test/test_zplsc_c_dcl.py
@author Richard Han (Raytheon), Ronald Ronquillo (Raytheon)
@brief Test code for a zplsc_c_dcl data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.zplsc_c.dcl.resource import RESOURCE_PATH
from mi.dataset.parser.utilities import particle_to_yml
from mi.dataset.parser.zplsc_c_dcl import ZplscCDclParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
MODULE_NAME = 'mi.dataset.parser.zplsc_c_dcl'
CLASS_NAME = 'ZplscCInstrumentDataParticle'
PARTICLE_TYPE = 'zplsc_c_instrument'
@attr('UNIT', group='mi')
class ZplscCDclParserUnitTestCase(ParserUnitTestCase):
"""
Zplsc_c_dcl Parser unit test suite
"""
def create_zplsc_c_dcl_parser(self, file_handle):
"""
This function creates a ZplscCDCL parser for recovered data.
"""
return ZplscCDclParser(self.config, file_handle, self.rec_exception_callback)
def file_path(self, filename):
log.debug('resource path = %s, file name = %s', RESOURCE_PATH, filename)
return os.path.join(RESOURCE_PATH, filename)
def create_yml(self, particles, filename):
particle_to_yml(particles, os.path.join(RESOURCE_PATH, filename))
def rec_exception_callback(self, exception):
"""
Call back method to watch what comes in via the exception callback
"""
self.exception_callback_value.append(exception)
self.exceptions_detected += 1
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: CLASS_NAME
}
self.exception_callback_value = []
self.exceptions_detected = 0
def test_zplsc_c_dcl_parser(self):
"""
Test Zplsc C DCL parser
Just test that it is able to parse the file and records are generated.
"""
log.debug('===== START TEST ZPLSC_C_DCL Parser =====')
with open(self.file_path('20150406.zplsc.log')) as in_file:
parser = self.create_zplsc_c_dcl_parser(in_file)
# In a single read, get all particles in this file.
result = parser.get_records(15)
self.assertEqual(len(result), 1)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST ZPLSC_C_DCL Parser =====')
def test_telem(self):
"""
Read a file and pull out a data particle.
Verify that the results are those we expected.
"""
log.debug('===== START TEST TELEM =====')
with open(self.file_path('20150407.zplsc.log')) as in_file:
parser = self.create_zplsc_c_dcl_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(15)
self.assertEqual(len(result), 15)
self.assert_particles(result, '20150407.zplsc.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST TELEM =====')
def test_variable_num_of_channels(self):
"""
Read a file and pull out a data particle.
Verify that the results are those we expected.
All test log files usually contain 4 channels with 19 bins each.
This tests a manually edited log file to exercise the logic for handling a variable
number of channels and number of bins.
"""
log.debug('===== START TEST VARIABLE NUM OF CHANNELS =====')
with open(self.file_path('20150407.zplsc_var_channels.log')) as in_file:
parser = self.create_zplsc_c_dcl_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(15)
self.assertEqual(len(result), 15)
self.assert_particles(result, '20150407.zplsc_var_channels.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST VARIABLE NUM OF CHANNELS =====')
def test_bad_data(self):
"""
Ensure that bad data is skipped when it exists.
See '20150407.zplsc_corrupt.log' file for line by line details of expected errors.
"""
log.debug('===== START TEST BAD DATA =====')
with open(self.file_path('20150407.zplsc_corrupt.log')) as in_file:
parser = self.create_zplsc_c_dcl_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(100)
self.assertEqual(len(result), 1)
self.assertEqual(len(self.exception_callback_value), 6)
for i in range(len(self.exception_callback_value)):
log.debug('Exception: %s', self.exception_callback_value[i])
log.debug('===== END TEST BAD DATA =====')
def test_bug_9692(self):
"""
Test to verify change made works with DCL
timestamps containing seconds >59
"""
with open(self.file_path('20150407A.zplsc.log')) as in_file:
parser = self.create_zplsc_c_dcl_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(5)
self.assertEqual(len(result), 3)
self.assertListEqual(self.exception_callback_value, []) | unknown | codeparrot/codeparrot-clean | ||
"""
Support for Clementine Music Player as media player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.clementine/
"""
import asyncio
from datetime import timedelta
import logging
import time
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, PLATFORM_SCHEMA,
SUPPORT_VOLUME_STEP, SUPPORT_SELECT_SOURCE, SUPPORT_PLAY, MEDIA_TYPE_MUSIC,
SUPPORT_VOLUME_SET, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, CONF_ACCESS_TOKEN,
STATE_OFF, STATE_PLAYING, STATE_PAUSED, STATE_UNKNOWN)
REQUIREMENTS = ['python-clementine-remote==1.0.1']
SCAN_INTERVAL = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Clementine Remote'
DEFAULT_PORT = 5500
SUPPORT_CLEMENTINE = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_VOLUME_SET | \
SUPPORT_NEXT_TRACK | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_ACCESS_TOKEN, default=None): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Clementine platform."""
from clementineremote import ClementineRemote
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
token = config.get(CONF_ACCESS_TOKEN)
client = ClementineRemote(host, port, token, reconnect=True)
add_devices([ClementineDevice(client, config[CONF_NAME])])
class ClementineDevice(MediaPlayerDevice):
"""Representation of Clementine Player."""
def __init__(self, client, name):
"""Initialize the Clementine device."""
self._client = client
self._name = name
self._muted = False
self._volume = 0.0
self._track_id = 0
self._last_track_id = 0
self._track_name = ''
self._track_artist = ''
self._track_album_name = ''
self._state = STATE_UNKNOWN
def update(self):
"""Retrieve the latest data from the Clementine Player."""
try:
client = self._client
if client.state == 'Playing':
self._state = STATE_PLAYING
elif client.state == 'Paused':
self._state = STATE_PAUSED
elif client.state == 'Disconnected':
self._state = STATE_OFF
else:
self._state = STATE_PAUSED
if client.last_update and (time.time() - client.last_update > 40):
self._state = STATE_OFF
self._volume = float(client.volume) if client.volume else 0.0
if client.current_track:
self._track_id = client.current_track['track_id']
self._track_name = client.current_track['title']
self._track_artist = client.current_track['track_artist']
self._track_album_name = client.current_track['track_album']
except:
self._state = STATE_OFF
raise
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / 100.0
@property
def source(self):
"""Return current source name."""
source_name = "Unknown"
client = self._client
if client.active_playlist_id in client.playlists:
source_name = client.playlists[client.active_playlist_id]['name']
return source_name
@property
def source_list(self):
"""List of available input sources."""
source_names = [s["name"] for s in self._client.playlists.values()]
return source_names
def select_source(self, source):
"""Select input source."""
client = self._client
sources = [s for s in client.playlists.values() if s['name'] == source]
if len(sources) == 1:
client.change_song(sources[0]['id'], 0)
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing media."""
return self._track_name
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._track_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._track_album_name
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CLEMENTINE
@property
def media_image_hash(self):
"""Hash value for media image."""
if self._client.current_track:
return self._client.current_track['track_id']
return None
@asyncio.coroutine
def async_get_media_image(self):
"""Fetch media image of current playing image."""
if self._client.current_track:
image = bytes(self._client.current_track['art'])
return (image, 'image/png')
return None, None
def volume_up(self):
"""Volume up the media player."""
newvolume = min(self._client.volume + 4, 100)
self._client.set_volume(newvolume)
def volume_down(self):
"""Volume down media player."""
newvolume = max(self._client.volume - 4, 0)
self._client.set_volume(newvolume)
def mute_volume(self, mute):
"""Send mute command."""
self._client.set_volume(0)
def set_volume_level(self, volume):
"""Set volume level."""
self._client.set_volume(int(100 * volume))
def media_play_pause(self):
"""Simulate play pause media player."""
if self._state == STATE_PLAYING:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
self._client.play()
def media_pause(self):
"""Send media pause command to media player."""
self._state = STATE_PAUSED
self._client.pause()
def media_next_track(self):
"""Send next track command."""
self._client.next()
def media_previous_track(self):
"""Send the previous track command."""
self._client.previous() | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: GPL-2.0-only
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/snps,dma-spear1340.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Synopsys Designware DMA Controller
maintainers:
- Viresh Kumar <vireshk@kernel.org>
- Andy Shevchenko <andriy.shevchenko@linux.intel.com>
allOf:
- $ref: dma-controller.yaml#
properties:
compatible:
oneOf:
- const: snps,dma-spear1340
- items:
- enum:
- renesas,r9a06g032-dma
- const: renesas,rzn1-dma
"#dma-cells":
minimum: 3
maximum: 4
description: |
First cell is a phandle pointing to the DMA controller. Second one is
the DMA request line number. Third cell is the memory master identifier
for transfers on dynamically allocated channel. Fourth cell is the
peripheral master identifier for transfers on an allocated channel. Fifth
cell is an optional mask of the DMA channels permitted to be allocated
for the corresponding client device.
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
clock-names:
description: AHB interface reference clock.
const: hclk
dma-channels:
description: |
Number of DMA channels supported by the controller. In case if
not specified the driver will try to auto-detect this and
the rest of the optional parameters.
minimum: 1
maximum: 8
dma-requests:
minimum: 1
maximum: 16
dma-masters:
$ref: /schemas/types.yaml#/definitions/uint32
description: |
Number of DMA masters supported by the controller. In case if
not specified the driver will try to auto-detect this and
the rest of the optional parameters.
minimum: 1
maximum: 4
chan_allocation_order:
$ref: /schemas/types.yaml#/definitions/uint32
description: |
DMA channels allocation order specifier. Zero means ascending order
(first free allocated), while one - descending (last free allocated).
default: 0
enum: [0, 1]
chan_priority:
$ref: /schemas/types.yaml#/definitions/uint32
description: |
DMA channels priority order. Zero means ascending channels priority
so the very first channel has the highest priority. While 1 means
descending priority (the last channel has the highest priority).
default: 0
enum: [0, 1]
block_size:
$ref: /schemas/types.yaml#/definitions/uint32
description: Maximum block size supported by the DMA controller.
enum: [3, 7, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095]
data-width:
$ref: /schemas/types.yaml#/definitions/uint32-array
description: Data bus width per each DMA master in bytes.
minItems: 1
maxItems: 4
items:
enum: [4, 8, 16, 32]
data_width:
$ref: /schemas/types.yaml#/definitions/uint32-array
deprecated: true
description: |
Data bus width per each DMA master in (2^n * 8) bits. This property is
deprecated. It' usage is discouraged in favor of data-width one. Moreover
the property incorrectly permits to define data-bus width of 8 and 16
bits, which is impossible in accordance with DW DMAC IP-core data book.
minItems: 1
maxItems: 4
items:
enum:
- 0 # 8 bits
- 1 # 16 bits
- 2 # 32 bits
- 3 # 64 bits
- 4 # 128 bits
- 5 # 256 bits
default: 0
multi-block:
$ref: /schemas/types.yaml#/definitions/uint32-array
description: |
LLP-based multi-block transfer supported by hardware per
each DMA channel.
minItems: 1
maxItems: 8
items:
enum: [0, 1]
default: 1
snps,max-burst-len:
$ref: /schemas/types.yaml#/definitions/uint32-array
description: |
Maximum length of the burst transactions supported by the controller.
This property defines the upper limit of the run-time burst setting
(CTLx.SRC_MSIZE/CTLx.DST_MSIZE fields) so the allowed burst length
will be from 1 to max-burst-len words. It's an array property with one
cell per channel in the units determined by the value set in the
CTLx.SRC_TR_WIDTH/CTLx.DST_TR_WIDTH fields (data width).
minItems: 1
maxItems: 8
items:
enum: [4, 8, 16, 32, 64, 128, 256]
default: 256
snps,dma-protection-control:
$ref: /schemas/types.yaml#/definitions/uint32
description: |
Bits one-to-one passed to the AHB HPROT[3:1] bus. Each bit setting
indicates the following features: bit 0 - privileged mode,
bit 1 - DMA is bufferable, bit 2 - DMA is cacheable.
default: 0
minimum: 0
maximum: 7
unevaluatedProperties: false
required:
- compatible
- "#dma-cells"
- reg
- interrupts
examples:
- |
dma-controller@fc000000 {
compatible = "snps,dma-spear1340";
reg = <0xfc000000 0x1000>;
interrupt-parent = <&vic1>;
interrupts = <12>;
dma-channels = <8>;
dma-requests = <16>;
dma-masters = <4>;
#dma-cells = <3>;
chan_allocation_order = <1>;
chan_priority = <1>;
block_size = <0xfff>;
data-width = <8 8>;
multi-block = <0 0 0 0 0 0 0 0>;
snps,max-burst-len = <16 16 4 4 4 4 4 4>;
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml |
from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation
__author__ = 'keyvan'
x_axis = xrange(13)
zeros_13 = zeros(13)
class Animation(object):
def __init__(self, event_a, event_b, event_c, plt=plt):
self.event_a = event_a
self.event_c = event_c
self.event_b_length_beginning = event_b.beginning - event_b.a
self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
self.event_b_length_total = event_b.b - event_b.ending
self.plt = plt
self.fig = plt.figure(1)
self.ax_a_b = self.fig.add_subplot(4, 1, 1)
self.ax_b_c = self.fig.add_subplot(4, 1, 2)
self.ax_a_c = self.fig.add_subplot(4, 1, 3)
self.ax_relations = self.fig.add_subplot(4, 1, 4)
self.ax_a_b.set_xlim(0, 13)
self.ax_a_b.set_ylim(0, 1)
self.ax_b_c.set_xlim(0, 13)
self.ax_b_c.set_ylim(0, 1)
self.ax_a_c.set_xlim(0, 13)
self.ax_a_c.set_ylim(0, 1)
self.rects_a_b = self.ax_a_b.bar(x_axis, zeros_13)
self.rects_b_c = self.ax_b_c.bar(x_axis, zeros_13)
self.rects_a_c = self.ax_a_c.bar(x_axis, zeros_13)
self.line_a = Line2D([], [])
self.line_b = Line2D([], [])
self.line_c = Line2D([], [])
self.ax_relations.add_line(self.line_a)
self.ax_relations.add_line(self.line_b)
self.ax_relations.add_line(self.line_c)
a = min(event_a.a, event_c.a) - self.event_b_length_total
b = max(event_a.b, event_c.b)
self.ax_relations.set_xlim(a, b + self.event_b_length_total)
self.ax_relations.set_ylim(0, 1.1)
# self.interval = TimeInterval(a, b, 150)
self.interval = TimeInterval(a, b, 2)
self.ax_a_b.xaxis.set_minor_formatter(self.ax_a_b.xaxis.get_major_formatter())
self.ax_a_b.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_b.xaxis.set_ticklabels('poDedOP')
self.ax_a_b.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_b_c.xaxis.set_minor_formatter(self.ax_b_c.xaxis.get_major_formatter())
self.ax_b_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_b_c.xaxis.set_ticklabels('poDedOP')
self.ax_b_c.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_a_c.xaxis.set_minor_formatter(self.ax_a_c.xaxis.get_major_formatter())
self.ax_a_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_c.xaxis.set_ticklabels('poDedOP')
self.ax_a_c.xaxis.set_ticklabels('mFsSfM', minor=True)
def init(self):
artists = []
self.line_a.set_data(self.event_a, self.event_a.membership_function)
self.line_b.set_data([], [])
self.line_c.set_data(self.event_c, self.event_c.membership_function)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
for rect, h in zip(self.rects_a_b, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_a_c, (self.event_a * self.event_c).to_list()):
rect.set_height(h)
artists.append(rect)
return artists
def animate(self, t):
interval = self.interval
B = TemporalEventTrapezium(interval[t], interval[t] + self.event_b_length_total,
interval[t] + self.event_b_length_beginning,
interval[t] + self.event_b_length_middle)
plt.figure()
B.plot().show()
a_b = (self.event_a * B).to_list()
b_c = (B * self.event_c).to_list()
self.line_b.set_data(B, B.membership_function)
artists = []
for rect, h in zip(self.rects_a_b, a_b):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, b_c):
rect.set_height(h)
artists.append(rect)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
return artists
def show(self):
fr = len(self.interval) - 1
anim = animation.FuncAnimation(self.fig, self.animate, init_func=self.init,
frames=fr, interval=fr, blit=True)
self.plt.show()
if __name__ == '__main__':
anim = Animation(TemporalEventTrapezium(4, 8, 5, 7),
TemporalEventTrapezium(0, 10, 6, 9),
TemporalEventTrapezium(0.5, 11, 1, 3))
# anim.show() | unknown | codeparrot/codeparrot-clean | ||
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Long: metalink
Help: Process given URLs as metalink XML file
Added: 7.27.0
Category: deprecated
Multi: single
See-also:
- parallel
Example:
- --metalink file $URL
---
# `--metalink`
This option was previously used to specify a Metalink resource. Metalink
support is disabled in curl for security reasons (added in 7.78.0). | unknown | github | https://github.com/curl/curl | docs/cmdline-opts/metalink.md |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\HttpFoundation;
/**
* ServerBag is a container for HTTP headers from the $_SERVER variable.
*
* @author Fabien Potencier <fabien@symfony.com>
* @author Bulat Shakirzyanov <mallluhuct@gmail.com>
* @author Robert Kiss <kepten@gmail.com>
*/
class ServerBag extends ParameterBag
{
/**
* Gets the HTTP headers.
*/
public function getHeaders(): array
{
$headers = [];
foreach ($this->parameters as $key => $value) {
if (str_starts_with($key, 'HTTP_')) {
$headers[substr($key, 5)] = $value;
} elseif (\in_array($key, ['CONTENT_TYPE', 'CONTENT_LENGTH', 'CONTENT_MD5'], true) && '' !== $value) {
$headers[$key] = $value;
}
}
if (isset($this->parameters['PHP_AUTH_USER'])) {
$headers['PHP_AUTH_USER'] = $this->parameters['PHP_AUTH_USER'];
$headers['PHP_AUTH_PW'] = $this->parameters['PHP_AUTH_PW'] ?? '';
} else {
/*
* php-cgi under Apache does not pass HTTP Basic user/pass to PHP by default
* For this workaround to work, add these lines to your .htaccess file:
* RewriteCond %{HTTP:Authorization} .+
* RewriteRule ^ - [E=HTTP_AUTHORIZATION:%0]
*
* A sample .htaccess file:
* RewriteEngine On
* RewriteCond %{HTTP:Authorization} .+
* RewriteRule ^ - [E=HTTP_AUTHORIZATION:%0]
* RewriteCond %{REQUEST_FILENAME} !-f
* RewriteRule ^(.*)$ index.php [QSA,L]
*/
$authorizationHeader = null;
if (isset($this->parameters['HTTP_AUTHORIZATION'])) {
$authorizationHeader = $this->parameters['HTTP_AUTHORIZATION'];
} elseif (isset($this->parameters['REDIRECT_HTTP_AUTHORIZATION'])) {
$authorizationHeader = $this->parameters['REDIRECT_HTTP_AUTHORIZATION'];
}
if (null !== $authorizationHeader) {
if (0 === stripos($authorizationHeader, 'basic ')) {
// Decode AUTHORIZATION header into PHP_AUTH_USER and PHP_AUTH_PW when authorization header is basic
$exploded = explode(':', base64_decode(substr($authorizationHeader, 6)), 2);
if (2 == \count($exploded)) {
[$headers['PHP_AUTH_USER'], $headers['PHP_AUTH_PW']] = $exploded;
}
} elseif (empty($this->parameters['PHP_AUTH_DIGEST']) && (0 === stripos($authorizationHeader, 'digest '))) {
// In some circumstances PHP_AUTH_DIGEST needs to be set
$headers['PHP_AUTH_DIGEST'] = $authorizationHeader;
$this->parameters['PHP_AUTH_DIGEST'] = $authorizationHeader;
} elseif (0 === stripos($authorizationHeader, 'bearer ')) {
/*
* XXX: Since there is no PHP_AUTH_BEARER in PHP predefined variables,
* I'll just set $headers['AUTHORIZATION'] here.
* https://php.net/reserved.variables.server
*/
$headers['AUTHORIZATION'] = $authorizationHeader;
}
}
}
if (isset($headers['AUTHORIZATION'])) {
return $headers;
}
// PHP_AUTH_USER/PHP_AUTH_PW
if (isset($headers['PHP_AUTH_USER'])) {
$headers['AUTHORIZATION'] = 'Basic '.base64_encode($headers['PHP_AUTH_USER'].':'.($headers['PHP_AUTH_PW'] ?? ''));
} elseif (isset($headers['PHP_AUTH_DIGEST'])) {
$headers['AUTHORIZATION'] = $headers['PHP_AUTH_DIGEST'];
}
return $headers;
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Component/HttpFoundation/ServerBag.php |
import numpy as np
import pytest
from pandas import (
MultiIndex,
Series,
date_range,
)
import pandas._testing as tm
def test_xs_datetimelike_wrapping():
# GH#31630 a case where we shouldn't wrap datetime64 in Timestamp
arr = date_range("2016-01-01", periods=3)._data._ndarray
ser = Series(arr, dtype=object)
for i in range(len(ser)):
ser.iloc[i] = arr[i]
assert ser.dtype == object
assert isinstance(ser[0], np.datetime64)
result = ser.xs(0)
assert isinstance(result, np.datetime64)
class TestXSWithMultiIndex:
def test_xs_level_series(self, multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
ser = df["A"]
expected = ser[:, "two"]
result = df.xs("two", level=1)["A"]
tm.assert_series_equal(result, expected)
def test_series_getitem_multiindex_xs_by_label(self):
# GH#5684
idx = MultiIndex.from_tuples(
[("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")]
)
ser = Series([1, 2, 3, 4], index=idx)
return_value = ser.index.set_names(["L1", "L2"], inplace=True)
assert return_value is None
expected = Series([1, 3], index=["a", "b"])
return_value = expected.index.set_names(["L1"], inplace=True)
assert return_value is None
result = ser.xs("one", level="L2")
tm.assert_series_equal(result, expected)
def test_series_getitem_multiindex_xs(self):
# GH#6258
dt = list(date_range("20130903", periods=3))
idx = MultiIndex.from_product([list("AB"), dt])
ser = Series([1, 3, 4, 1, 3, 4], index=idx)
expected = Series([1, 1], index=list("AB"))
result = ser.xs("20130903", level=1)
tm.assert_series_equal(result, expected)
def test_series_xs_droplevel_false(self):
# GH: 19056
mi = MultiIndex.from_tuples(
[("a", "x"), ("a", "y"), ("b", "x")], names=["level1", "level2"]
)
ser = Series([1, 1, 1], index=mi)
result = ser.xs("a", axis=0, drop_level=False)
expected = Series(
[1, 1],
index=MultiIndex.from_tuples(
[("a", "x"), ("a", "y")], names=["level1", "level2"]
),
)
tm.assert_series_equal(result, expected)
def test_xs_key_as_list(self):
# GH#41760
mi = MultiIndex.from_tuples([("a", "x")], names=["level1", "level2"])
ser = Series([1], index=mi)
with pytest.raises(TypeError, match="list keys are not supported"):
ser.xs(["a", "x"], axis=0, drop_level=False)
with pytest.raises(TypeError, match="list keys are not supported"):
ser.xs(["a"], axis=0, drop_level=False) | python | github | https://github.com/pandas-dev/pandas | pandas/tests/series/indexing/test_xs.py |
import os
import sys
from fmap import fmap
from tempfile import mkstemp
from subprocess import Popen, PIPE
DIR = os.path.abspath(os.path.dirname(__file__))
TESTDIR = os.path.join(DIR, 'tree')
#-------------------------------------------------------------------------------
def test_fmap():
seen = []
def accum(name):
seen.append(name)
fmap(TESTDIR, accum)
assert set(seen) == {'c', 'd', 'f', 'g', 'h'}
del seen[:]
fmap(TESTDIR, accum, max_depth=0)
assert set(seen) == {'c', 'd'}
del seen[:]
fmap(TESTDIR, accum, apply_dirs=True)
assert set(seen) == {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'}
del seen[:]
fmap(TESTDIR, accum, apply_dirs=True, max_depth=0)
assert set(seen) == {'a', 'b', 'c', 'd'}
del seen[:]
fmap(TESTDIR, accum, excludes=['a'])
assert set(seen) == {'c', 'd', 'h'}
del seen[:]
fmap(TESTDIR, accum, patterns=['g', 'd', 'h'], excludes=['a'])
assert set(seen) == {'d', 'h'}
#-------------------------------------------------------------------------------
def test_main():
from fmap import main
main = main.main
main('-x', '*', 'echo')
_, path = mkstemp()
tf = open(path, 'rt')
cmd = 'echo {{}} >> {}'.format(path)
def seen():
tf.seek(0)
out = tf.read()
seen = map(os.path.basename, filter(bool, out.split('\n')))
with open(path, 'wt') as f:
f.write('')
return set(seen)
main('-r', TESTDIR, cmd)
assert seen() == {'c', 'd', 'f', 'g', 'h'}
main('-r', TESTDIR, '-z0', cmd)
assert seen() == {'c', 'd'}
main('-r', TESTDIR, '-z0', '-p', cmd)
assert seen() == set()
argv = sys.argv
sys.argv = ['', '-r', TESTDIR, cmd]
main()
assert seen() == {'c', 'd', 'f', 'g', 'h'}
sys.argv = argv
tf.close()
os.remove(path)
#-------------------------------------------------------------------------------
def test_fmap_invocation():
def seen(p):
out = p.communicate()[0].decode('utf-8')
seen = map(os.path.basename, filter(bool, out.split('\n')))
return set(seen)
p = Popen('fmap -r {} echo'.format(TESTDIR), stdout=PIPE, shell=True)
assert seen(p) == {'c', 'd', 'f', 'g', 'h'}
p = Popen('fmap -r {} -z0 echo'.format(TESTDIR), stdout=PIPE, shell=True)
assert seen(p) == {'c', 'd'}
p = Popen('fmap -r {} -d echo'.format(TESTDIR), stdout=PIPE, shell=True)
assert seen(p) == {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'}
p = Popen('fmap -r {} -d -z0 echo'.format(TESTDIR), stdout=PIPE, shell=True)
assert seen(p) == {'a', 'b', 'c', 'd'}
p = Popen('fmap -r {} -x a echo'.format(TESTDIR), stdout=PIPE, shell=True)
assert seen(p) == {'c', 'd', 'h'}
p = Popen('fmap -r {} -x a echo g d h'.format(TESTDIR), stdout=PIPE,
shell=True)
assert seen(p) == {'d', 'h'}
p = Popen('python -m fmap -r {} -x a echo g d h'.format(TESTDIR),
stdout=PIPE, shell=True)
assert seen(p) == {'d', 'h'}
#------------------------------------------------------------------------------- | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
jinja2.lexer
~~~~~~~~~~~~
This module implements a Jinja / Python combination lexer. The
`Lexer` class provided by this module is used to do some preprocessing
for Jinja.
On the one hand it filters out invalid operators like the bitshift
operators we don't allow in templates. On the other hand it separates
template code and python code in expressions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from operator import itemgetter
from collections import deque
from jinja2.exceptions import TemplateSyntaxError
from jinja2.utils import LRUCache
from jinja2._compat import iteritems, implements_iterator, text_type, \
intern, PY2
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r'\s+', re.U)
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
integer_re = re.compile(r'\d+')
# we use the unicode identifier rule if this python version is able
# to handle unicode identifiers, otherwise the standard ASCII one.
try:
compile('föö', '<unknown>', 'eval')
except SyntaxError:
name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
else:
from jinja2 import _stringdefs
name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
_stringdefs.xid_continue))
float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')
# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
TOKEN_MUL = intern('mul')
TOKEN_NE = intern('ne')
TOKEN_PIPE = intern('pipe')
TOKEN_POW = intern('pow')
TOKEN_RBRACE = intern('rbrace')
TOKEN_RBRACKET = intern('rbracket')
TOKEN_RPAREN = intern('rparen')
TOKEN_SEMICOLON = intern('semicolon')
TOKEN_SUB = intern('sub')
TOKEN_TILDE = intern('tilde')
TOKEN_WHITESPACE = intern('whitespace')
TOKEN_FLOAT = intern('float')
TOKEN_INTEGER = intern('integer')
TOKEN_NAME = intern('name')
TOKEN_STRING = intern('string')
TOKEN_OPERATOR = intern('operator')
TOKEN_BLOCK_BEGIN = intern('block_begin')
TOKEN_BLOCK_END = intern('block_end')
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
TOKEN_VARIABLE_END = intern('variable_end')
TOKEN_RAW_BEGIN = intern('raw_begin')
TOKEN_RAW_END = intern('raw_end')
TOKEN_COMMENT_BEGIN = intern('comment_begin')
TOKEN_COMMENT_END = intern('comment_end')
TOKEN_COMMENT = intern('comment')
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
TOKEN_LINECOMMENT_END = intern('linecomment_end')
TOKEN_LINECOMMENT = intern('linecomment')
TOKEN_DATA = intern('data')
TOKEN_INITIAL = intern('initial')
TOKEN_EOF = intern('eof')
# bind operators to token types
operators = {
'+': TOKEN_ADD,
'-': TOKEN_SUB,
'/': TOKEN_DIV,
'//': TOKEN_FLOORDIV,
'*': TOKEN_MUL,
'%': TOKEN_MOD,
'**': TOKEN_POW,
'~': TOKEN_TILDE,
'[': TOKEN_LBRACKET,
']': TOKEN_RBRACKET,
'(': TOKEN_LPAREN,
')': TOKEN_RPAREN,
'{': TOKEN_LBRACE,
'}': TOKEN_RBRACE,
'==': TOKEN_EQ,
'!=': TOKEN_NE,
'>': TOKEN_GT,
'>=': TOKEN_GTEQ,
'<': TOKEN_LT,
'<=': TOKEN_LTEQ,
'=': TOKEN_ASSIGN,
'.': TOKEN_DOT,
':': TOKEN_COLON,
'|': TOKEN_PIPE,
',': TOKEN_COMMA,
';': TOKEN_SEMICOLON
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
sorted(operators, key=lambda x: -len(x))))
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
TOKEN_COMMENT_END, TOKEN_WHITESPACE,
TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
TOKEN_COMMENT, TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: 'begin of comment',
TOKEN_COMMENT_END: 'end of comment',
TOKEN_COMMENT: 'comment',
TOKEN_LINECOMMENT: 'comment',
TOKEN_BLOCK_BEGIN: 'begin of statement block',
TOKEN_BLOCK_END: 'end of statement block',
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
TOKEN_VARIABLE_END: 'end of print statement',
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
TOKEN_LINESTATEMENT_END: 'end of line statement',
TOKEN_DATA: 'template data / text',
TOKEN_EOF: 'end of template'
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(len(environment.comment_start_string), 'comment',
e(environment.comment_start_string)),
(len(environment.block_start_string), 'block',
e(environment.block_start_string)),
(len(environment.variable_start_string), 'variable',
e(environment.variable_start_string))
]
if environment.line_statement_prefix is not None:
rules.append((len(environment.line_statement_prefix), 'linestatement',
r'^[ \t\v]*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
rules.append((len(environment.line_comment_prefix), 'linecomment',
r'(?:^|(?<=\S))[^\S\r\n]*' +
e(environment.line_comment_prefix)))
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == 'name':
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ':' in expr:
return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return 'Token(%r, %r, %r)' % (
self.lineno,
self.type,
self.value
)
@implements_iterator
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def __next__(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
@implements_iterator
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._iter = iter(generator)
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
__nonzero__ = __bool__ # py2
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for x in range(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def __next__(self):
"""Go one token ahead and return the old one"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = next(self._iter)
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._iter = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError('unexpected end of template, '
'expected %r.' % expr,
self.current.lineno,
self.name, self.filename)
raise TemplateSyntaxError("expected token %r, got %r" %
(expr, describe_token(self.current)),
self.current.lineno,
self.name, self.filename)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None)
]
# assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
# strip leading spaces if lstrip_blocks is enabled
prefix_re = {}
if environment.lstrip_blocks:
# use '{%+' to manually disable lstrip_blocks behavior
no_lstrip_re = e('+')
# detect overlap between block and variable or comment strings
block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
# make sure we don't mistake a block for a variable or a comment
m = block_diff.match(environment.comment_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
m = block_diff.match(environment.variable_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
# detect overlap between comment and variable strings
comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
m = comment_diff.match(environment.variable_start_string)
no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
lstrip_re = r'^[ \t]*'
block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
lstrip_re,
e(environment.block_start_string),
no_lstrip_re,
e(environment.block_start_string),
)
comment_prefix_re = r'%s%s%s|%s\+?' % (
lstrip_re,
e(environment.comment_start_string),
no_variable_re,
e(environment.comment_start_string),
)
prefix_re['block'] = block_prefix_re
prefix_re['comment'] = comment_prefix_re
else:
block_prefix_re = '%s' % e(environment.block_start_string)
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
'root': [
# directives
(c('(.*?)(?:%s)' % '|'.join(
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string)
)] + [
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
for n, r in root_tag_rules
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
# data
(c('.+'), TOKEN_DATA, None)
],
# comments
TOKEN_COMMENT_BEGIN: [
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
(c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
TOKEN_BLOCK_BEGIN: [
(c('(?:\-%s\s*|%s)%s' % (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), TOKEN_BLOCK_END, '#pop'),
] + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(c('\-%s\s*|%s' % (
e(environment.variable_end_string),
e(environment.variable_end_string)
)), TOKEN_VARIABLE_END, '#pop')
] + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
(c('(.)'), (Failure('Missing end of raw directive'),), None)
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
] + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
TOKEN_LINECOMMENT_END), '#pop')
]
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normalize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream.
"""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == 'linestatement_begin':
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
# we are not interested in those tokens in the parser
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
value = str(value)
elif token == 'string':
# try to unescape string
try:
value = self._normalize_newlines(value[1:-1]) \
.encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
except Exception as e:
msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
# if we can express it as bytestring (ascii only)
# we do that for support of semi broken APIs
# as datetime.datetime.strftime. On python 3 this
# call becomes a noop thanks to 2to3
if PY2:
try:
value = value.encode('ascii')
except UnicodeError:
pass
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
for newline in ('\r\n', '\r', '\n'):
if source.endswith(newline):
lines.append('')
break
source = '\n'.join(lines)
pos = 0
lineno = 1
stack = ['root']
if state is not None and state != 'root':
assert state in ('variable', 'block'), 'invalid state'
stack.append(state + '_begin')
else:
state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and \
tokens in ('variable_end', 'block_end',
'linestatement_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve '
'the token dynamically'
' but no group matched'
% regex)
# normal group
else:
data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count('\n')
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError('unexpected \'%s\'' %
data, lineno, name,
filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError('unexpected \'%s\', '
'expected \'%s\'' %
(data, expected_op),
lineno, name,
filename)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
elif new_state == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the '
'new state dynamically but'
' no group matched' %
regex)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without '
'stack change' % regex)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError('unexpected char %r at %d' %
(source[pos], pos), lineno,
name, filename) | unknown | codeparrot/codeparrot-clean | ||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.inventory.group import Group
from ansible.utils.vars import combine_vars, get_unique_id
__all__ = ['Host']
class Host:
''' a single ansible host '''
# __slots__ = [ 'name', 'vars', 'groups' ]
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def __eq__(self, other):
if not isinstance(other, Host):
return False
return self._uuid == other._uuid
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
def __str__(self):
return self.get_name()
def __repr__(self):
return self.get_name()
def serialize(self):
groups = []
for group in self.groups:
groups.append(group.serialize())
return dict(
name=self.name,
vars=self.vars.copy(),
address=self.address,
uuid=self._uuid,
groups=groups,
implicit=self.implicit,
)
def deserialize(self, data):
self.__init__(gen_uuid=False)
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.address = data.get('address', '')
self._uuid = data.get('uuid', None)
self.implicit = data.get('implicit', False)
groups = data.get('groups', [])
for group_data in groups:
g = Group()
g.deserialize(group_data)
self.groups.append(g)
def __init__(self, name=None, port=None, gen_uuid=True):
self.vars = {}
self.groups = []
self._uuid = None
self.name = name
self.address = name
if port:
self.set_variable('ansible_port', int(port))
if gen_uuid:
self._uuid = get_unique_id()
self.implicit = False
def get_name(self):
return self.name
def populate_ancestors(self, additions=None):
# populate ancestors
if additions is None:
for group in self.groups:
self.add_group(group)
else:
for group in additions:
if group not in self.groups:
self.groups.append(group)
def add_group(self, group):
# populate ancestors first
for oldg in group.get_ancestors():
if oldg not in self.groups:
self.groups.append(oldg)
# actually add group
if group not in self.groups:
self.groups.append(group)
def remove_group(self, group):
if group in self.groups:
self.groups.remove(group)
# remove exclusive ancestors, xcept all!
for oldg in group.get_ancestors():
if oldg.name != 'all':
for childg in self.groups:
if oldg in childg.get_ancestors():
break
else:
self.remove_group(oldg)
def set_variable(self, key, value):
self.vars[key] = value
def get_groups(self):
return self.groups
def get_magic_vars(self):
results = {}
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([g.name for g in self.get_groups() if g.name != 'all'])
return results
def get_vars(self):
return combine_vars(self.vars, self.get_magic_vars()) | unknown | codeparrot/codeparrot-clean | ||
/************************************************
constants.c -
created at: Thu Mar 31 12:21:29 JST 1994
Copyright (C) 1993-2007 Yukihiro Matsumoto
************************************************/
#include "rubysocket.h"
static VALUE rb_mSockConst;
#include "constdefs.c"
static int
constant_arg(VALUE arg, int (*str_to_int)(const char*, long, int*), const char *errmsg)
{
VALUE tmp;
char *ptr;
int ret;
if (SYMBOL_P(arg)) {
arg = rb_sym2str(arg);
goto str;
}
else if (!NIL_P(tmp = rb_check_string_type(arg))) {
arg = tmp;
str:
ptr = RSTRING_PTR(arg);
if (str_to_int(ptr, RSTRING_LEN(arg), &ret) == -1)
rb_raise(rb_eSocket, "%s: %s", errmsg, ptr);
}
else {
ret = NUM2INT(arg);
}
return ret;
}
int
rsock_family_arg(VALUE domain)
{
/* convert AF_INET, etc. */
return constant_arg(domain, rsock_family_to_int, "unknown socket domain");
}
int
rsock_socktype_arg(VALUE type)
{
/* convert SOCK_STREAM, etc. */
return constant_arg(type, rsock_socktype_to_int, "unknown socket type");
}
int
rsock_level_arg(int family, VALUE level)
{
/* convert SOL_SOCKET, IPPROTO_TCP, etc. */
if (IS_IP_FAMILY(family)) {
return constant_arg(level, rsock_ip_level_to_int, "unknown protocol level");
}
else {
return constant_arg(level, rsock_unknown_level_to_int, "unknown protocol level");
}
}
int
rsock_optname_arg(int family, int level, VALUE optname)
{
if (IS_IP_FAMILY(family)) {
switch (level) {
case SOL_SOCKET:
return constant_arg(optname, rsock_so_optname_to_int, "unknown socket level option name");
case IPPROTO_IP:
return constant_arg(optname, rsock_ip_optname_to_int, "unknown IP level option name");
#ifdef IPPROTO_IPV6
case IPPROTO_IPV6:
return constant_arg(optname, rsock_ipv6_optname_to_int, "unknown IPv6 level option name");
#endif
case IPPROTO_TCP:
return constant_arg(optname, rsock_tcp_optname_to_int, "unknown TCP level option name");
case IPPROTO_UDP:
return constant_arg(optname, rsock_udp_optname_to_int, "unknown UDP level option name");
default:
return NUM2INT(optname);
}
}
else {
switch (level) {
case SOL_SOCKET:
return constant_arg(optname, rsock_so_optname_to_int, "unknown socket level option name");
default:
return NUM2INT(optname);
}
}
}
int
rsock_cmsg_type_arg(int family, int level, VALUE type)
{
if (IS_IP_FAMILY(family)) {
switch (level) {
case SOL_SOCKET:
return constant_arg(type, rsock_scm_optname_to_int, "unknown UNIX control message");
case IPPROTO_IP:
return constant_arg(type, rsock_ip_optname_to_int, "unknown IP control message");
#ifdef IPPROTO_IPV6
case IPPROTO_IPV6:
return constant_arg(type, rsock_ipv6_optname_to_int, "unknown IPv6 control message");
#endif
case IPPROTO_TCP:
return constant_arg(type, rsock_tcp_optname_to_int, "unknown TCP control message");
case IPPROTO_UDP:
return constant_arg(type, rsock_udp_optname_to_int, "unknown UDP control message");
default:
return NUM2INT(type);
}
}
else {
switch (level) {
case SOL_SOCKET:
return constant_arg(type, rsock_scm_optname_to_int, "unknown UNIX control message");
default:
return NUM2INT(type);
}
}
}
int
rsock_shutdown_how_arg(VALUE how)
{
/* convert SHUT_RD, SHUT_WR, SHUT_RDWR. */
return constant_arg(how, rsock_shutdown_how_to_int, "unknown shutdown argument");
}
/*
* Socket::Constants module
*/
void
rsock_init_socket_constants(void)
{
/* constants */
init_constants();
} | c | github | https://github.com/ruby/ruby | ext/socket/constants.c |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_bgp
version_added: "2.8"
author: "Nilashish Chakraborty (@NilashishC)"
short_description: Configure global BGP protocol settings on Arista EOS.
description:
- This module provides configuration management of global BGP parameters
on Arista EOS devices.
notes:
- Tested against Arista vEOS Version 4.15.9M.
options:
config:
description:
- Specifies the BGP related configuration.
suboptions:
bgp_as:
description:
- Specifies the BGP Autonomous System (AS) number to configure on the device.
type: int
required: true
router_id:
description:
- Configures the BGP routing process router-id value.
default: null
log_neighbor_changes:
description:
- Enable/disable logging neighbor up/down and reset reason.
type: bool
neighbors:
description:
- Specifies BGP neighbor related configurations.
suboptions:
neighbor:
description:
- Neighbor router address.
required: True
remote_as:
description:
- Remote AS of the BGP neighbor to configure.
type: int
required: True
update_source:
description:
- Source of the routing updates.
password:
description:
- Password to authenticate the BGP peer connection.
description:
description:
- Neighbor specific description.
ebgp_multihop:
description:
- Specifies the maximum hop count for EBGP neighbors not on directly connected networks.
- The range is from 1 to 255.
type: int
peer_group:
description:
- Name of the peer group that the neighbor is a member of.
timers:
description:
- Specifies BGP neighbor timer related configurations.
suboptions:
keepalive:
description:
- Frequency (in seconds) with which the device sends keepalive messages to its peer.
- The range is from 0 to 3600.
type: int
required: True
holdtime:
description:
- Interval (in seconds) after not receiving a keepalive message that device declares a peer dead.
- The range is from 3 to 7200.
- Setting this value to 0 will not send keep-alives (hold forever).
type: int
required: True
route_reflector_client:
description:
- Specify a neighbor as a route reflector client.
type: bool
remove_private_as:
description:
- Remove the private AS number from outbound updates.
type: bool
enabled:
description:
- Administratively shutdown or enable a neighbor.
maximum_prefix:
description:
- Maximum number of prefixes to accept from this peer.
- The range is from 0 to 4294967294.
type: int
redistribute:
description:
- Specifies the redistribute information from another routing protocol.
suboptions:
protocol:
description:
- Specifies the protocol for configuring redistribute information.
required: True
route_map:
description:
- Specifies the route map reference.
networks:
description:
- Specify Networks to announce via BGP.
- For operation replace, this option is mutually exclusive with networks option under address_family.
- For operation replace, if the device already has an address family activated, this option is not allowed.
suboptions:
prefix:
description:
- Network ID to announce via BGP.
required: True
masklen:
description:
- Subnet mask length for the Network to announce(e.g, 8, 16, 24, etc.).
route_map:
description:
- Route map to modify the attributes.
address_family:
description:
- Specifies BGP address family related configurations.
suboptions:
afi:
description:
- Type of address family to configure.
choices:
- ipv4
- ipv6
required: True
redistribute:
description:
- Specifies the redistribute information from another routing protocol.
suboptions:
protocol:
description:
- Specifies the protocol for configuring redistribute information.
required: True
route_map:
description:
- Specifies the route map reference.
networks:
description:
- Specify Networks to announce via BGP.
- For operation replace, this option is mutually exclusive with root level networks option.
suboptions:
prefix:
description:
- Network ID to announce via BGP.
required: True
masklen:
description:
- Subnet mask length for the Network to announce(e.g, 8, 16, 24, etc.).
route_map:
description:
- Route map to modify the attributes.
neighbors:
description:
- Specifies BGP neighbor related configurations in Address Family configuration mode.
suboptions:
neighbor:
description:
- Neighbor router address.
required: True
activate:
description:
- Enable the Address Family for this Neighbor.
type: bool
default_originate:
description:
- Originate default route to this neighbor.
type: bool
graceful_restart:
description:
- Enable/disable graceful restart mode for this neighbor.
type: bool
weight:
description:
- Assign weight for routes learnt from this neighbor.
- The range is from 0 to 65535
type: int
operation:
description:
- Specifies the operation to be performed on the BGP process configured on the device.
- In case of merge, the input configuration will be merged with the existing BGP configuration on the device.
- In case of replace, if there is a diff between the existing configuration and the input configuration, the
existing configuration will be replaced by the input configuration for every option that has the diff.
- In case of override, all the existing BGP configuration will be removed from the device and replaced with
the input configuration.
- In case of delete the existing BGP configuration will be removed from the device.
default: merge
choices: ['merge', 'replace', 'override', 'delete']
"""
EXAMPLES = """
- name: configure global bgp as 64496
eos_bgp:
config:
bgp_as: 64496
router_id: 192.0.2.1
log_neighbor_changes: True
neighbors:
- neighbor: 203.0.113.5
remote_as: 64511
timers:
keepalive: 300
holdtime: 360
- neighbor: 198.51.100.2
remote_as: 64498
networks:
- prefix: 198.51.100.0
route_map: RMAP_1
- prefix: 192.0.2.0
masklen: 23
address_family:
- afi: ipv4
safi: unicast
redistribute:
- protocol: isis
route_map: RMAP_1
operation: merge
- name: Configure BGP neighbors
eos_bgp:
config:
bgp_as: 64496
neighbors:
- neighbor: 192.0.2.10
remote_as: 64496
description: IBGP_NBR_1
ebgp_multihop: 100
timers:
keepalive: 300
holdtime: 360
- neighbor: 192.0.2.15
remote_as: 64496
description: IBGP_NBR_2
ebgp_multihop: 150
operation: merge
- name: Configure root-level networks for BGP
eos_bgp:
config:
bgp_as: 64496
networks:
- prefix: 203.0.113.0
masklen: 27
route_map: RMAP_1
- prefix: 203.0.113.32
masklen: 27
route_map: RMAP_2
operation: merge
- name: Configure BGP neighbors under address family mode
eos_bgp:
config:
bgp_as: 64496
address_family:
- afi: ipv4
neighbors:
- neighbor: 203.0.113.10
activate: yes
default_originate: True
- neighbor: 192.0.2.15
activate: yes
graceful_restart: True
operation: merge
- name: remove bgp as 64496 from config
eos_bgp:
config:
bgp_as: 64496
operation: delete
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- router bgp 64496
- bgp router-id 192.0.2.1
- bgp log-neighbor-changes
- neighbor 203.0.113.5 remote-as 64511
- neighbor 203.0.113.5 timers 300 360
- neighbor 198.51.100.2 remote-as 64498
- network 198.51.100.0 route-map RMAP_1
- network 192.0.2.0 mask 255.255.254.0
- address-family ipv4
- redistribute isis route-map RMAP_1
- exit-address-family
"""
from ansible.module_utils._text import to_text
from ansible.module_utils.network.eos.providers.module import NetworkModule
from ansible.module_utils.network.eos.providers.cli.config.bgp.process import REDISTRIBUTE_PROTOCOLS
def main():
""" main entry point for module execution
"""
network_spec = {
'prefix': dict(required=True),
'masklen': dict(type='int'),
'route_map': dict(),
}
redistribute_spec = {
'protocol': dict(choices=REDISTRIBUTE_PROTOCOLS, required=True),
'route_map': dict(),
}
timer_spec = {
'keepalive': dict(type='int', required=True),
'holdtime': dict(type='int', required=True),
}
neighbor_spec = {
'neighbor': dict(required=True),
'remote_as': dict(type='int', required=True),
'update_source': dict(),
'password': dict(no_log=True),
'enabled': dict(type='bool'),
'description': dict(),
'ebgp_multihop': dict(type='int'),
'timers': dict(type='dict', options=timer_spec),
'peer_group': dict(),
'maximum_prefix': dict(type='int'),
'route_reflector_client': dict(type='int'),
'remove_private_as': dict(type='bool')
}
af_neighbor_spec = {
'neighbor': dict(required=True),
'activate': dict(type='bool'),
'default_originate': dict(type='bool'),
'graceful_restart': dict(type='bool'),
'weight': dict(type='int'),
}
address_family_spec = {
'afi': dict(choices=['ipv4', 'ipv6'], required=True),
'networks': dict(type='list', elements='dict', options=network_spec),
'redistribute': dict(type='list', elements='dict', options=redistribute_spec),
'neighbors': dict(type='list', elements='dict', options=af_neighbor_spec),
}
config_spec = {
'bgp_as': dict(type='int', required=True),
'router_id': dict(),
'log_neighbor_changes': dict(type='bool'),
'neighbors': dict(type='list', elements='dict', options=neighbor_spec),
'address_family': dict(type='list', elements='dict', options=address_family_spec),
'redistribute': dict(type='list', elements='dict', options=redistribute_spec),
'networks': dict(type='list', elements='dict', options=network_spec)
}
argument_spec = {
'config': dict(type='dict', options=config_spec),
'operation': dict(default='merge', choices=['merge', 'replace', 'override', 'delete'])
}
module = NetworkModule(argument_spec=argument_spec,
supports_check_mode=True)
try:
result = module.edit_config(config_filter='| section bgp')
except Exception as exc:
module.fail_json(msg=to_text(exc))
module.exit_json(**result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
template = {
"Description": "AWS CloudFormation Sample Template Gollum_Single_Instance_With_EBS_Volume: Gollum is a simple wiki system built on top of Git that powers GitHub Wikis. This template installs a Gollum Wiki stack on a single EC2 instance with an EBS volume for storage and demonstrates using the AWS CloudFormation bootstrap scripts to install the packages and files necessary at instance launch time. **WARNING** This template creates an Amazon EC2 instance and an EBS volume. You will be billed for the AWS resources used if you create a stack from this template.",
"Parameters": {
"SSHLocation": {
"ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.",
"Description": "The IP address range that can be used to SSH to the EC2 instances",
"Default": "0.0.0.0/0",
"MinLength": "9",
"AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
"MaxLength": "18",
"Type": "String"
},
"KeyName": {
"Type": "String",
"Description": "Name of an existing EC2 KeyPair to enable SSH access to the instances",
"MinLength": "1",
"AllowedPattern": "[\\x20-\\x7E]*",
"MaxLength": "255",
"ConstraintDescription": "can contain only ASCII characters."
},
"InstanceType": {
"Default": "m1.small",
"ConstraintDescription": "must be a valid EC2 instance type.",
"Type": "String",
"Description": "WebServer EC2 instance type",
"AllowedValues": [
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
]
},
"VolumeSize": {
"Description": "WebServer EC2 instance type",
"Default": "5",
"Type": "Number",
"MaxValue": "1024",
"MinValue": "5",
"ConstraintDescription": "must be between 5 and 1024 Gb."
}
},
"AWSTemplateFormatVersion": "2010-09-09",
"Outputs": {
"WebsiteURL": {
"Description": "URL for Gollum wiki",
"Value": {
"Fn::Join": [
"",
[
"http://",
{
"Fn::GetAtt": [
"WebServer",
"PublicDnsName"
]
}
]
]
}
}
},
"Resources": {
"WebServerSecurityGroup": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"SecurityGroupIngress": [
{
"ToPort": "80",
"IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0",
"FromPort": "80"
},
{
"ToPort": "22",
"IpProtocol": "tcp",
"CidrIp": {
"Ref": "SSHLocation"
},
"FromPort": "22"
}
],
"GroupDescription": "Enable SSH access and HTTP access on the inbound port"
}
},
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
[
"#!/bin/bash -v\n",
"yum update -y aws-cfn-bootstrap\n",
"# Helper function\n",
"function error_exit\n",
"{\n",
" /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '",
{
"Ref": "WaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"# Install Rails packages\n",
"/opt/aws/bin/cfn-init -s ",
{
"Ref": "AWS::StackId"
},
" -r WebServer ",
" --region ",
{
"Ref": "AWS::Region"
},
" || error_exit 'Failed to run cfn-init'\n",
"# Wait for the EBS volume to show up\n",
"while [ ! -e /dev/sdh ]; do echo Waiting for EBS volume to attach; sleep 5; done\n",
"# Format the EBS volume and mount it\n",
"mkdir /var/wikidata\n",
"/sbin/mkfs -t ext3 /dev/sdh1\n",
"mount /dev/sdh1 /var/wikidata\n",
"# Initialize the wiki and fire up the server\n",
"cd /var/wikidata\n",
"git init\n",
"gollum --port 80 --host 0.0.0.0 &\n",
"# If all is well so signal success\n",
"/opt/aws/bin/cfn-signal -e $? -r \"Rails application setup complete\" '",
{
"Ref": "WaitHandle"
},
"'\n"
]
]
}
},
"KeyName": {
"Ref": "KeyName"
},
"SecurityGroups": [
{
"Ref": "WebServerSecurityGroup"
}
],
"InstanceType": {
"Ref": "InstanceType"
},
"ImageId": {
"Fn::FindInMap": [
"AWSRegionArch2AMI",
{
"Ref": "AWS::Region"
},
{
"Fn::FindInMap": [
"AWSInstanceType2Arch",
{
"Ref": "InstanceType"
},
"Arch"
]
}
]
}
},
"Metadata": {
"AWS::CloudFormation::Init": {
"config": {
"packages": {
"rubygems": {
"nokogiri": [
"1.5.10"
],
"rdiscount": [],
"gollum": [
"1.1.1"
]
},
"yum": {
"libxslt-devel": [],
"gcc": [],
"git": [],
"rubygems": [],
"ruby-devel": [],
"ruby-rdoc": [],
"make": [],
"libxml2-devel": []
}
}
}
}
}
},
"DataVolume": {
"Type": "AWS::EC2::Volume",
"Properties": {
"Tags": [
{
"Value": "Gollum Data Volume",
"Key": "Usage"
}
],
"AvailabilityZone": {
"Fn::GetAtt": [
"WebServer",
"AvailabilityZone"
]
},
"Size": "100",
}
},
"MountPoint": {
"Type": "AWS::EC2::VolumeAttachment",
"Properties": {
"InstanceId": {
"Ref": "WebServer"
},
"Device": "/dev/sdh",
"VolumeId": {
"Ref": "DataVolume"
}
}
},
"WaitCondition": {
"DependsOn": "MountPoint",
"Type": "AWS::CloudFormation::WaitCondition",
"Properties": {
"Handle": {
"Ref": "WaitHandle"
},
"Timeout": "300"
},
"Metadata": {
"Comment1": "Note that the WaitCondition is dependent on the volume mount point allowing the volume to be created and attached to the EC2 instance",
"Comment2": "The instance bootstrap script waits for the volume to be attached to the instance prior to installing Gollum and signalling completion"
}
},
"WaitHandle": {
"Type": "AWS::CloudFormation::WaitConditionHandle"
}
},
"Mappings": {
"AWSInstanceType2Arch": {
"m3.2xlarge": {
"Arch": "64"
},
"m2.2xlarge": {
"Arch": "64"
},
"m1.small": {
"Arch": "64"
},
"c1.medium": {
"Arch": "64"
},
"cg1.4xlarge": {
"Arch": "64HVM"
},
"m2.xlarge": {
"Arch": "64"
},
"t1.micro": {
"Arch": "64"
},
"cc1.4xlarge": {
"Arch": "64HVM"
},
"m1.medium": {
"Arch": "64"
},
"cc2.8xlarge": {
"Arch": "64HVM"
},
"m1.large": {
"Arch": "64"
},
"m1.xlarge": {
"Arch": "64"
},
"m2.4xlarge": {
"Arch": "64"
},
"c1.xlarge": {
"Arch": "64"
},
"m3.xlarge": {
"Arch": "64"
}
},
"AWSRegionArch2AMI": {
"ap-southeast-1": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-b4b0cae6",
"64": "ami-beb0caec"
},
"ap-southeast-2": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-b3990e89",
"64": "ami-bd990e87"
},
"us-west-2": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-38fe7308",
"64": "ami-30fe7300"
},
"us-east-1": {
"64HVM": "ami-0da96764",
"32": "ami-31814f58",
"64": "ami-1b814f72"
},
"ap-northeast-1": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-0644f007",
"64": "ami-0a44f00b"
},
"us-west-1": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-11d68a54",
"64": "ami-1bd68a5e"
},
"eu-west-1": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-973b06e3",
"64": "ami-953b06e1"
},
"sa-east-1": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-3e3be423",
"64": "ami-3c3be421"
}
}
}
} | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from sqlalchemy import (
Column,
Index,
Integer,
Text,
DateTime,
String,
Enum,
)
from base import Base
class SOrderLine(Base):
__tablename__ = 'sorder_line'
id = Column(Integer, primary_key=True)
order_id = Column(String)
product_id = Column(String)
user_id = Column(String)
create_date = Column(DateTime)
# like_type = Column(Enum(u'factory',
# u'brand',
# u'shop',
# u'product',
# name='like_type'))
# like_code = Column(String)
# remote_ip = Column(String)
# local_ip = Column(String)
# class EShopVisit(Base):
# __tablename__ = 'eshop_visit'
# id = Column(Integer, primary_key=True)
# factory_code = Column(String)
# visit_type = Column(Enum(u'factory',
# u'brand',
# u'shop',
# u'product',
# name='visit_type'))
# visit_code = Column(String)
# remote_ip = Column(String)
# local_ip = Column(String)
# start_time = Column(DateTime)
# ## end_time = Column(DateTime)
# Index('company_code_index', EShopVisit.factory_code) | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Google Cloud KMS hook.
"""
import base64
from typing import Optional, Sequence, Tuple
from google.api_core.retry import Retry
from google.cloud.kms_v1 import KeyManagementServiceClient
from airflow.providers.google.cloud.hooks.base import CloudBaseHook
def _b64encode(s: bytes) -> str:
""" Base 64 encodes a bytes object to a string """
return base64.b64encode(s).decode("ascii")
def _b64decode(s: str) -> bytes:
""" Base 64 decodes a string to bytes. """
return base64.b64decode(s.encode("utf-8"))
# noinspection PyAbstractClass
class CloudKMSHook(CloudBaseHook):
"""
Hook for Google Cloud Key Management service.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None
) -> None:
super().__init__(gcp_conn_id=gcp_conn_id, delegate_to=delegate_to)
self._conn = None # type: Optional[KeyManagementServiceClient]
def get_conn(self) -> KeyManagementServiceClient:
"""
Retrieves connection to Cloud Key Management service.
:return: Cloud Key Management service object
:rtype: google.cloud.kms_v1.KeyManagementServiceClient
"""
if not self._conn:
self._conn = KeyManagementServiceClient(
credentials=self._get_credentials(),
client_info=self.client_info
)
return self._conn
def encrypt(
self,
key_name: str,
plaintext: bytes,
authenticated_data: Optional[bytes] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> str:
"""
Encrypts a plaintext message using Google Cloud KMS.
:param key_name: The Resource Name for the key (or key version)
to be used for encyption. Of the form
``projects/*/locations/*/keyRings/*/cryptoKeys/**``
:type key_name: str
:param plaintext: The message to be encrypted.
:type plaintext: bytes
:param authenticated_data: Optional additional authenticated data that
must also be provided to decrypt the message.
:type authenticated_data: bytes
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:return: The base 64 encoded ciphertext of the original message.
:rtype: str
"""
response = self.get_conn().encrypt(
name=key_name,
plaintext=plaintext,
additional_authenticated_data=authenticated_data,
retry=retry,
timeout=timeout,
metadata=metadata,
)
ciphertext = _b64encode(response.ciphertext)
return ciphertext
def decrypt(
self,
key_name: str,
ciphertext: str,
authenticated_data: Optional[bytes] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> bytes:
"""
Decrypts a ciphertext message using Google Cloud KMS.
:param key_name: The Resource Name for the key to be used for decyption.
Of the form ``projects/*/locations/*/keyRings/*/cryptoKeys/**``
:type key_name: str
:param ciphertext: The message to be decrypted.
:type ciphertext: str
:param authenticated_data: Any additional authenticated data that was
provided when encrypting the message.
:type authenticated_data: bytes
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:return: The original message.
:rtype: bytes
"""
response = self.get_conn().decrypt(
name=key_name,
ciphertext=_b64decode(ciphertext),
additional_authenticated_data=authenticated_data,
retry=retry,
timeout=timeout,
metadata=metadata,
)
plaintext = response.plaintext
return plaintext | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# (c) 2014, Pavel Antonov <antonov@adwz.ru>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
######################################################################
DOCUMENTATION = '''
---
module: docker_image
author: Pavel Antonov
version_added: "1.5"
short_description: manage docker images
description:
- Create, check and remove docker images
options:
path:
description:
- Path to directory with Dockerfile
required: false
default: null
aliases: []
dockerfile:
description:
- Dockerfile to use
required: false
default: Dockerfile
version_added: "2.0"
name:
description:
- Image name to work with
required: true
default: null
aliases: []
tag:
description:
- Image tag to work with
required: false
default: "latest"
aliases: []
nocache:
description:
- Do not use cache with building
required: false
default: false
aliases: []
docker_url:
description:
- URL of docker host to issue commands to
required: false
default: unix://var/run/docker.sock
aliases: []
docker_api_version:
description:
- Remote API version to use. This defaults to the current default as
specified by docker-py.
default: docker-py default remote API version
version_added: "2.0"
state:
description:
- Set the state of the image
required: false
default: present
choices: [ "present", "absent", "build" ]
aliases: []
timeout:
description:
- Set image operation timeout
required: false
default: 600
aliases: []
requirements:
- "python >= 2.6"
- "docker-py"
- "requests"
'''
EXAMPLES = '''
Build docker image if required. Path should contains Dockerfile to build image:
- hosts: web
sudo: yes
tasks:
- name: check or build image
docker_image: path="/path/to/build/dir" name="my/app" state=present
Build new version of image:
- hosts: web
sudo: yes
tasks:
- name: check or build image
docker_image: path="/path/to/build/dir" name="my/app" state=build
Remove image from local docker storage:
- hosts: web
sudo: yes
tasks:
- name: remove image
docker_image: name="my/app" state=absent
'''
import re
from urlparse import urlparse
try:
import json
except ImportError:
import simplejson as json
try:
from requests.exceptions import *
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
import docker.client
HAS_DOCKER_CLIENT = True
except ImportError:
HAS_DOCKER_CLIENT = False
if HAS_DOCKER_CLIENT:
try:
from docker.errors import APIError as DockerAPIError
except ImportError:
from docker.client import APIError as DockerAPIError
try:
# docker-py 1.2+
import docker.constants
DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION
except (ImportError, AttributeError):
# docker-py less than 1.2
DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION
class DockerImageManager:
def __init__(self, module):
self.module = module
self.path = self.module.params.get('path')
self.dockerfile = self.module.params.get('dockerfile')
self.name = self.module.params.get('name')
self.tag = self.module.params.get('tag')
self.nocache = self.module.params.get('nocache')
docker_url = urlparse(module.params.get('docker_url'))
self.client = docker.Client(
base_url=docker_url.geturl(),
version=module.params.get('docker_api_version'),
timeout=module.params.get('timeout'))
self.changed = False
self.log = []
self.error_msg = None
def get_log(self, as_string=True):
return "".join(self.log) if as_string else self.log
def build(self):
stream = self.client.build(self.path, dockerfile=self.dockerfile, tag=':'.join([self.name, self.tag]), nocache=self.nocache, rm=True, stream=True)
success_search = r'Successfully built ([0-9a-f]+)'
image_id = None
self.changed = True
for chunk in stream:
if not chunk:
continue
try:
chunk_json = json.loads(chunk)
except ValueError:
continue
if 'error' in chunk_json:
self.error_msg = chunk_json['error']
return None
if 'stream' in chunk_json:
output = chunk_json['stream']
self.log.append(output)
match = re.search(success_search, output)
if match:
image_id = match.group(1)
# Just in case we skipped evaluating the JSON returned from build
# during every iteration, add an error if the image_id was never
# populated
if not image_id:
self.error_msg = 'Unknown error encountered'
return image_id
def has_changed(self):
return self.changed
def get_images(self):
filtered_images = []
images = self.client.images()
for i in images:
# Docker-py version >= 0.3 (Docker API >= 1.8)
if 'RepoTags' in i:
repotag = ':'.join([self.name, self.tag])
if not self.name or repotag in i['RepoTags']:
filtered_images.append(i)
# Docker-py version < 0.3 (Docker API < 1.8)
elif (not self.name or self.name == i['Repository']) and (not self.tag or self.tag == i['Tag']):
filtered_images.append(i)
return filtered_images
def remove_images(self):
images = self.get_images()
for i in images:
try:
self.client.remove_image(i['Id'])
self.changed = True
except DockerAPIError as e:
# image can be removed by docker if not used
pass
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=False, default=None),
dockerfile = dict(required=False, default="Dockerfile"),
name = dict(required=True),
tag = dict(required=False, default="latest"),
nocache = dict(default=False, type='bool'),
state = dict(default='present', choices=['absent', 'present', 'build']),
docker_url = dict(default='unix://var/run/docker.sock'),
docker_api_version = dict(required=False,
default=DEFAULT_DOCKER_API_VERSION,
type='str'),
timeout = dict(default=600, type='int'),
)
)
if not HAS_DOCKER_CLIENT:
module.fail_json(msg='docker-py is needed for this module')
if not HAS_REQUESTS:
module.fail_json(msg='requests is needed for this module')
try:
manager = DockerImageManager(module)
state = module.params.get('state')
failed = False
image_id = None
msg = ''
do_build = False
# build image if not exists
if state == "present":
images = manager.get_images()
if len(images) == 0:
do_build = True
# build image
elif state == "build":
do_build = True
# remove image or images
elif state == "absent":
manager.remove_images()
if do_build:
image_id = manager.build()
if image_id:
msg = "Image built: %s" % image_id
else:
failed = True
msg = "Error: %s\nLog:%s" % (manager.error_msg, manager.get_log())
module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id)
except DockerAPIError as e:
module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation)
except RequestException as e:
module.exit_json(failed=True, changed=manager.has_changed(), msg=repr(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
'''
Created on 2015-01-19
@author: levi
'''
import unittest
import inspect
from t_core.messages import Packet
from t_core.iterator import Iterator
from t_core.matcher import Matcher
from t_core.rewriter import Rewriter
from himesis_utils import graph_to_dot, print_graph
from PyRamify import PyRamify
from tests.TestModules.HSM2SM_partial import HSM2SM_partial
from tests.TestModules.HSM2SM_complete import HSM2SM_complete
class Test(unittest.TestCase):
def testName(self):
i = Iterator()
p = Packet()
pyramify = PyRamify()
[self.rules, self.ruleTraceCheckers, backwardPatterns2Rules, backwardPatternsComplete, self.matchRulePatterns, self.ruleCombinators] = \
pyramify.ramify_directory("dir_for_ramify_1/", True)
HSM2SM_py = self.ruleCombinators["HSM2SM"]
print(HSM2SM_py)
matcher = HSM2SM_py[0][0]
rewriter = HSM2SM_py[0][1]
# print rewriter.condition.vs[3]["MT_label__"]
# rewriter.condition.vs[newnode]["MT_label__"] = """100"""
# rewriter.condition.vs[newnode]["mm__"] = """MT_post__Station_T"""
# print rewriter.condition.vs[1]["MT_label__"]
# print rewriter.condition.vs[1]["mm__"]
# graph_to_dot("rewriter", rewriter.condition)
# for v in range(len(rewriter.condition.vs)):
# print rewriter.condition.vs[v]
# for v in range(len(rewriter.condition.vs)):
# rewriter.condition.vs[v]["MT_label__"] = """100"""
# print_graph(rewriter.condition)
p.graph = HSM2SM_partial()
# comb_match = matcher
# comb_rewrite = rewriter
#
# graph_to_dot("test_SM2SM_graph", p.graph)
graph_to_dot("comb_SM2SM_matcher", matcher.condition)
graph_to_dot("comb_SM2SM_rewriter", rewriter.condition)
# for v in range(len(comb_rewrite.condition.vs)):
# print comb_rewrite.condition.vs[v]
#
p = matcher.packet_in(p)
if matcher.is_success:
print "Yes!"
else:
print "no"
p = i.packet_in(p)
p = rewriter.packet_in(p)
if rewriter.is_success:
print "Yes!"
else:
print "no"
graph_to_dot("test_after_SM2SM", p.graph)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Contracts\Validation;
/**
* @deprecated see ValidationRule
*/
interface ImplicitRule extends Rule
{
//
} | php | github | https://github.com/laravel/framework | src/Illuminate/Contracts/Validation/ImplicitRule.php |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_network
short_description: create / delete an isolated network in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud isolated network.
version_added: "1.4"
options:
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
label:
description:
- Label (name) to give the network
default: null
cidr:
description:
- cidr of the network being created
default: null
author:
- "Christopher H. Laco (@claco)"
- "Jesse Keating (@j2sol)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build an Isolated Network
gather_facts: False
tasks:
- name: Network create request
local_action:
module: rax_network
credentials: ~/.raxpub
label: my-net
cidr: 192.168.3.0/24
state: present
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_network(module, state, label, cidr):
changed = False
network = None
networks = []
if not pyrax.cloud_networks:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not cidr:
module.fail_json(msg='missing required arguments: cidr')
try:
network = pyrax.cloud_networks.find_network_by_label(label)
except pyrax.exceptions.NetworkNotFound:
try:
network = pyrax.cloud_networks.create(label, cidr=cidr)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
network = pyrax.cloud_networks.find_network_by_label(label)
network.delete()
changed = True
except pyrax.exceptions.NetworkNotFound:
pass
except Exception, e:
module.fail_json(msg='%s' % e.message)
if network:
instance = dict(id=network.id,
label=network.label,
cidr=network.cidr)
networks.append(instance)
module.exit_json(changed=changed, networks=networks)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present',
choices=['present', 'absent']),
label=dict(required=True),
cidr=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
cidr = module.params.get('cidr')
setup_rax_module(module, pyrax)
cloud_network(module, state, label, cidr)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main() | unknown | codeparrot/codeparrot-clean | ||
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..person import RegExpName
from ._memberbase import father_base
#-------------------------------------------------------------------------
#
# RegExpFatherName
#
#-------------------------------------------------------------------------
class RegExpFatherName(RegExpName):
"""Rule that checks for full or partial name matches"""
name = _('Families with father matching the <regex_name>')
description = _("Matches families whose father has a name "
"matching a specified regular expression")
category = _('Father filters')
base_class = RegExpName
apply = father_base | unknown | codeparrot/codeparrot-clean | ||
## Input
```javascript
import {useFragment} from 'shared-runtime';
function Component(props) {
const post = useFragment(
graphql`
fragment F on T {
id
}
`,
props.post
);
const allUrls = [];
// `media` and `urls` are exported from the scope that will wrap this code,
// but `comments` is not (it doesn't need to be memoized, bc the callback
// only checks `comments.length`)
// because of the scope, the let declaration for media and urls are lifted
// out of the scope, and the destructure statement ends up turning into
// a reassignment, instead of a const declaration. this means we try to
// reassign `comments` when there's no declaration for it.
const {media, comments, urls} = post;
const onClick = e => {
if (!comments.length) {
return;
}
console.log(comments.length);
};
allUrls.push(...urls);
return <Media media={media} onClick={onClick} />;
}
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
import { useFragment } from "shared-runtime";
function Component(props) {
const $ = _c(4);
const post = useFragment(
graphql`
fragment F on T {
id
}
`,
props.post,
);
let t0;
if ($[0] !== post) {
const allUrls = [];
const { media, comments, urls } = post;
let t1;
if ($[2] !== comments.length) {
t1 = (e) => {
if (!comments.length) {
return;
}
console.log(comments.length);
};
$[2] = comments.length;
$[3] = t1;
} else {
t1 = $[3];
}
const onClick = t1;
allUrls.push(...urls);
t0 = <Media media={media} onClick={onClick} />;
$[0] = post;
$[1] = t0;
} else {
t0 = $[1];
}
return t0;
}
``` | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/destructuring-mixed-scope-declarations-and-locals.expect.md |
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import boto
import boto3
from boto.exception import EC2ResponseError
import sure # noqa
from moto import mock_ec2_deprecated, mock_ec2
@mock_ec2_deprecated
def test_console_output():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance_id = reservation.instances[0].id
output = conn.get_console_output(instance_id)
output.output.should_not.equal(None)
@mock_ec2_deprecated
def test_console_output_without_instance():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_console_output('i-1234abcd')
cm.exception.code.should.equal('InvalidInstanceID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_console_output_boto3():
conn = boto3.resource('ec2', 'us-east-1')
instances = conn.create_instances(ImageId='ami-1234abcd',
MinCount=1,
MaxCount=1)
output = instances[0].console_output()
output.get('Output').should_not.equal(None) | unknown | codeparrot/codeparrot-clean | ||
/*
* cash.c
* Written by D'Arcy J.M. Cain
* darcy@druid.net
* http://www.druid.net/darcy/
*
* Functions to allow input and output of money normally but store
* and handle it as 64 bit ints
*
* A slightly modified version of this file and a discussion of the
* workings can be found in the book "Software Solutions in C" by
* Dale Schumacher, Academic Press, ISBN: 0-12-632360-7 except that
* this version handles 64 bit numbers and so can hold values up to
* $92,233,720,368,547,758.07.
*
* src/backend/utils/adt/cash.c
*/
#include "postgres.h"
#include <limits.h>
#include <ctype.h>
#include <math.h>
#include "common/int.h"
#include "libpq/pqformat.h"
#include "utils/builtins.h"
#include "utils/cash.h"
#include "utils/float.h"
#include "utils/numeric.h"
#include "utils/pg_locale.h"
/*************************************************************************
* Private routines
************************************************************************/
static void
append_num_word(StringInfo buf, Cash value)
{
static const char *const small[] = {
"zero", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen",
"fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty",
"thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"
};
const char *const *big = small + 18;
int tu = value % 100;
/* deal with the simple cases first */
if (value <= 20)
{
appendStringInfoString(buf, small[value]);
return;
}
/* is it an even multiple of 100? */
if (!tu)
{
appendStringInfo(buf, "%s hundred", small[value / 100]);
return;
}
/* more than 99? */
if (value > 99)
{
/* is it an even multiple of 10 other than 10? */
if (value % 10 == 0 && tu > 10)
appendStringInfo(buf, "%s hundred %s",
small[value / 100], big[tu / 10]);
else if (tu < 20)
appendStringInfo(buf, "%s hundred and %s",
small[value / 100], small[tu]);
else
appendStringInfo(buf, "%s hundred %s %s",
small[value / 100], big[tu / 10], small[tu % 10]);
}
else
{
/* is it an even multiple of 10 other than 10? */
if (value % 10 == 0 && tu > 10)
appendStringInfoString(buf, big[tu / 10]);
else if (tu < 20)
appendStringInfoString(buf, small[tu]);
else
appendStringInfo(buf, "%s %s", big[tu / 10], small[tu % 10]);
}
}
static inline Cash
cash_pl_cash(Cash c1, Cash c2)
{
Cash res;
if (unlikely(pg_add_s64_overflow(c1, c2, &res)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("money out of range")));
return res;
}
static inline Cash
cash_mi_cash(Cash c1, Cash c2)
{
Cash res;
if (unlikely(pg_sub_s64_overflow(c1, c2, &res)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("money out of range")));
return res;
}
static inline Cash
cash_mul_float8(Cash c, float8 f)
{
float8 res = rint(float8_mul((float8) c, f));
if (unlikely(isnan(res) || !FLOAT8_FITS_IN_INT64(res)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("money out of range")));
return (Cash) res;
}
static inline Cash
cash_div_float8(Cash c, float8 f)
{
float8 res = rint(float8_div((float8) c, f));
if (unlikely(isnan(res) || !FLOAT8_FITS_IN_INT64(res)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("money out of range")));
return (Cash) res;
}
static inline Cash
cash_mul_int64(Cash c, int64 i)
{
Cash res;
if (unlikely(pg_mul_s64_overflow(c, i, &res)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("money out of range")));
return res;
}
static inline Cash
cash_div_int64(Cash c, int64 i)
{
if (unlikely(i == 0))
ereport(ERROR,
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
return c / i;
}
/* cash_in()
* Convert a string to a cash data type.
* Format is [$]###[,]###[.##]
* Examples: 123.45 $123.45 $123,456.78
*
*/
Datum
cash_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
Node *escontext = fcinfo->context;
Cash result;
Cash value = 0;
Cash dec = 0;
Cash sgn = 1;
bool seen_dot = false;
const char *s = str;
int fpoint;
char dsymbol;
const char *ssymbol,
*psymbol,
*nsymbol,
*csymbol;
struct lconv *lconvert = PGLC_localeconv();
/*
* frac_digits will be CHAR_MAX in some locales, notably C. However, just
* testing for == CHAR_MAX is risky, because of compilers like gcc that
* "helpfully" let you alter the platform-standard definition of whether
* char is signed or not. If we are so unfortunate as to get compiled
* with a nonstandard -fsigned-char or -funsigned-char switch, then our
* idea of CHAR_MAX will not agree with libc's. The safest course is not
* to test for CHAR_MAX at all, but to impose a range check for plausible
* frac_digits values.
*/
fpoint = lconvert->frac_digits;
if (fpoint < 0 || fpoint > 10)
fpoint = 2; /* best guess in this case, I think */
/* we restrict dsymbol to be a single byte, but not the other symbols */
if (*lconvert->mon_decimal_point != '\0' &&
lconvert->mon_decimal_point[1] == '\0')
dsymbol = *lconvert->mon_decimal_point;
else
dsymbol = '.';
if (*lconvert->mon_thousands_sep != '\0')
ssymbol = lconvert->mon_thousands_sep;
else /* ssymbol should not equal dsymbol */
ssymbol = (dsymbol != ',') ? "," : ".";
csymbol = (*lconvert->currency_symbol != '\0') ? lconvert->currency_symbol : "$";
psymbol = (*lconvert->positive_sign != '\0') ? lconvert->positive_sign : "+";
nsymbol = (*lconvert->negative_sign != '\0') ? lconvert->negative_sign : "-";
#ifdef CASHDEBUG
printf("cashin- precision '%d'; decimal '%c'; thousands '%s'; currency '%s'; positive '%s'; negative '%s'\n",
fpoint, dsymbol, ssymbol, csymbol, psymbol, nsymbol);
#endif
/* we need to add all sorts of checking here. For now just */
/* strip all leading whitespace and any leading currency symbol */
while (isspace((unsigned char) *s))
s++;
if (strncmp(s, csymbol, strlen(csymbol)) == 0)
s += strlen(csymbol);
while (isspace((unsigned char) *s))
s++;
#ifdef CASHDEBUG
printf("cashin- string is '%s'\n", s);
#endif
/* a leading minus or paren signifies a negative number */
/* again, better heuristics needed */
/* XXX - doesn't properly check for balanced parens - djmc */
if (strncmp(s, nsymbol, strlen(nsymbol)) == 0)
{
sgn = -1;
s += strlen(nsymbol);
}
else if (*s == '(')
{
sgn = -1;
s++;
}
else if (strncmp(s, psymbol, strlen(psymbol)) == 0)
s += strlen(psymbol);
#ifdef CASHDEBUG
printf("cashin- string is '%s'\n", s);
#endif
/* allow whitespace and currency symbol after the sign, too */
while (isspace((unsigned char) *s))
s++;
if (strncmp(s, csymbol, strlen(csymbol)) == 0)
s += strlen(csymbol);
while (isspace((unsigned char) *s))
s++;
#ifdef CASHDEBUG
printf("cashin- string is '%s'\n", s);
#endif
/*
* We accumulate the absolute amount in "value" and then apply the sign at
* the end. (The sign can appear before or after the digits, so it would
* be more complicated to do otherwise.) Because of the larger range of
* negative signed integers, we build "value" in the negative and then
* flip the sign at the end, catching most-negative-number overflow if
* necessary.
*/
for (; *s; s++)
{
/*
* We look for digits as long as we have found less than the required
* number of decimal places.
*/
if (isdigit((unsigned char) *s) && (!seen_dot || dec < fpoint))
{
int8 digit = *s - '0';
if (pg_mul_s64_overflow(value, 10, &value) ||
pg_sub_s64_overflow(value, digit, &value))
ereturn(escontext, (Datum) 0,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("value \"%s\" is out of range for type %s",
str, "money")));
if (seen_dot)
dec++;
}
/* decimal point? then start counting fractions... */
else if (*s == dsymbol && !seen_dot)
{
seen_dot = true;
}
/* ignore if "thousands" separator, else we're done */
else if (strncmp(s, ssymbol, strlen(ssymbol)) == 0)
s += strlen(ssymbol) - 1;
else
break;
}
/* round off if there's another digit */
if (isdigit((unsigned char) *s) && *s >= '5')
{
/* remember we build the value in the negative */
if (pg_sub_s64_overflow(value, 1, &value))
ereturn(escontext, (Datum) 0,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("value \"%s\" is out of range for type %s",
str, "money")));
}
/* adjust for less than required decimal places */
for (; dec < fpoint; dec++)
{
if (pg_mul_s64_overflow(value, 10, &value))
ereturn(escontext, (Datum) 0,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("value \"%s\" is out of range for type %s",
str, "money")));
}
/*
* should only be trailing digits followed by whitespace, right paren,
* trailing sign, and/or trailing currency symbol
*/
while (isdigit((unsigned char) *s))
s++;
while (*s)
{
if (isspace((unsigned char) *s) || *s == ')')
s++;
else if (strncmp(s, nsymbol, strlen(nsymbol)) == 0)
{
sgn = -1;
s += strlen(nsymbol);
}
else if (strncmp(s, psymbol, strlen(psymbol)) == 0)
s += strlen(psymbol);
else if (strncmp(s, csymbol, strlen(csymbol)) == 0)
s += strlen(csymbol);
else
ereturn(escontext, (Datum) 0,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type %s: \"%s\"",
"money", str)));
}
/*
* If the value is supposed to be positive, flip the sign, but check for
* the most negative number.
*/
if (sgn > 0)
{
if (value == PG_INT64_MIN)
ereturn(escontext, (Datum) 0,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("value \"%s\" is out of range for type %s",
str, "money")));
result = -value;
}
else
result = value;
#ifdef CASHDEBUG
printf("cashin- result is " INT64_FORMAT "\n", result);
#endif
PG_RETURN_CASH(result);
}
/* cash_out()
* Function to convert cash to a dollars and cents representation, using
* the lc_monetary locale's formatting.
*/
Datum
cash_out(PG_FUNCTION_ARGS)
{
Cash value = PG_GETARG_CASH(0);
uint64 uvalue;
char *result;
char buf[128];
char *bufptr;
int digit_pos;
int points,
mon_group;
char dsymbol;
const char *ssymbol,
*csymbol,
*signsymbol;
char sign_posn,
cs_precedes,
sep_by_space;
struct lconv *lconvert = PGLC_localeconv();
/* see comments about frac_digits in cash_in() */
points = lconvert->frac_digits;
if (points < 0 || points > 10)
points = 2; /* best guess in this case, I think */
/*
* As with frac_digits, must apply a range check to mon_grouping to avoid
* being fooled by variant CHAR_MAX values.
*/
mon_group = *lconvert->mon_grouping;
if (mon_group <= 0 || mon_group > 6)
mon_group = 3;
/* we restrict dsymbol to be a single byte, but not the other symbols */
if (*lconvert->mon_decimal_point != '\0' &&
lconvert->mon_decimal_point[1] == '\0')
dsymbol = *lconvert->mon_decimal_point;
else
dsymbol = '.';
if (*lconvert->mon_thousands_sep != '\0')
ssymbol = lconvert->mon_thousands_sep;
else /* ssymbol should not equal dsymbol */
ssymbol = (dsymbol != ',') ? "," : ".";
csymbol = (*lconvert->currency_symbol != '\0') ? lconvert->currency_symbol : "$";
if (value < 0)
{
/* set up formatting data */
signsymbol = (*lconvert->negative_sign != '\0') ? lconvert->negative_sign : "-";
sign_posn = lconvert->n_sign_posn;
cs_precedes = lconvert->n_cs_precedes;
sep_by_space = lconvert->n_sep_by_space;
}
else
{
signsymbol = lconvert->positive_sign;
sign_posn = lconvert->p_sign_posn;
cs_precedes = lconvert->p_cs_precedes;
sep_by_space = lconvert->p_sep_by_space;
}
/* make the amount positive for digit-reconstruction loop */
uvalue = pg_abs_s64(value);
/* we build the digits+decimal-point+sep string right-to-left in buf[] */
bufptr = buf + sizeof(buf) - 1;
*bufptr = '\0';
/*
* Generate digits till there are no non-zero digits left and we emitted
* at least one to the left of the decimal point. digit_pos is the
* current digit position, with zero as the digit just left of the decimal
* point, increasing to the right.
*/
digit_pos = points;
do
{
if (points && digit_pos == 0)
{
/* insert decimal point, but not if value cannot be fractional */
*(--bufptr) = dsymbol;
}
else if (digit_pos < 0 && (digit_pos % mon_group) == 0)
{
/* insert thousands sep, but only to left of radix point */
bufptr -= strlen(ssymbol);
memcpy(bufptr, ssymbol, strlen(ssymbol));
}
*(--bufptr) = (uvalue % 10) + '0';
uvalue = uvalue / 10;
digit_pos--;
} while (uvalue || digit_pos >= 0);
/*----------
* Now, attach currency symbol and sign symbol in the correct order.
*
* The POSIX spec defines these values controlling this code:
*
* p/n_sign_posn:
* 0 Parentheses enclose the quantity and the currency_symbol.
* 1 The sign string precedes the quantity and the currency_symbol.
* 2 The sign string succeeds the quantity and the currency_symbol.
* 3 The sign string precedes the currency_symbol.
* 4 The sign string succeeds the currency_symbol.
*
* p/n_cs_precedes: 0 means currency symbol after value, else before it.
*
* p/n_sep_by_space:
* 0 No <space> separates the currency symbol and value.
* 1 If the currency symbol and sign string are adjacent, a <space>
* separates them from the value; otherwise, a <space> separates
* the currency symbol from the value.
* 2 If the currency symbol and sign string are adjacent, a <space>
* separates them; otherwise, a <space> separates the sign string
* from the value.
*----------
*/
switch (sign_posn)
{
case 0:
if (cs_precedes)
result = psprintf("(%s%s%s)",
csymbol,
(sep_by_space == 1) ? " " : "",
bufptr);
else
result = psprintf("(%s%s%s)",
bufptr,
(sep_by_space == 1) ? " " : "",
csymbol);
break;
case 1:
default:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
signsymbol,
(sep_by_space == 2) ? " " : "",
csymbol,
(sep_by_space == 1) ? " " : "",
bufptr);
else
result = psprintf("%s%s%s%s%s",
signsymbol,
(sep_by_space == 2) ? " " : "",
bufptr,
(sep_by_space == 1) ? " " : "",
csymbol);
break;
case 2:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
csymbol,
(sep_by_space == 1) ? " " : "",
bufptr,
(sep_by_space == 2) ? " " : "",
signsymbol);
else
result = psprintf("%s%s%s%s%s",
bufptr,
(sep_by_space == 1) ? " " : "",
csymbol,
(sep_by_space == 2) ? " " : "",
signsymbol);
break;
case 3:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
signsymbol,
(sep_by_space == 2) ? " " : "",
csymbol,
(sep_by_space == 1) ? " " : "",
bufptr);
else
result = psprintf("%s%s%s%s%s",
bufptr,
(sep_by_space == 1) ? " " : "",
signsymbol,
(sep_by_space == 2) ? " " : "",
csymbol);
break;
case 4:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
csymbol,
(sep_by_space == 2) ? " " : "",
signsymbol,
(sep_by_space == 1) ? " " : "",
bufptr);
else
result = psprintf("%s%s%s%s%s",
bufptr,
(sep_by_space == 1) ? " " : "",
csymbol,
(sep_by_space == 2) ? " " : "",
signsymbol);
break;
}
PG_RETURN_CSTRING(result);
}
/*
* cash_recv - converts external binary format to cash
*/
Datum
cash_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
PG_RETURN_CASH((Cash) pq_getmsgint64(buf));
}
/*
* cash_send - converts cash to binary format
*/
Datum
cash_send(PG_FUNCTION_ARGS)
{
Cash arg1 = PG_GETARG_CASH(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendint64(&buf, arg1);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*
* Comparison functions
*/
Datum
cash_eq(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
PG_RETURN_BOOL(c1 == c2);
}
Datum
cash_ne(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
PG_RETURN_BOOL(c1 != c2);
}
Datum
cash_lt(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
PG_RETURN_BOOL(c1 < c2);
}
Datum
cash_le(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
PG_RETURN_BOOL(c1 <= c2);
}
Datum
cash_gt(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
PG_RETURN_BOOL(c1 > c2);
}
Datum
cash_ge(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
PG_RETURN_BOOL(c1 >= c2);
}
Datum
cash_cmp(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
if (c1 > c2)
PG_RETURN_INT32(1);
else if (c1 == c2)
PG_RETURN_INT32(0);
else
PG_RETURN_INT32(-1);
}
/* cash_pl()
* Add two cash values.
*/
Datum
cash_pl(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
PG_RETURN_CASH(cash_pl_cash(c1, c2));
}
/* cash_mi()
* Subtract two cash values.
*/
Datum
cash_mi(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
PG_RETURN_CASH(cash_mi_cash(c1, c2));
}
/* cash_div_cash()
* Divide cash by cash, returning float8.
*/
Datum
cash_div_cash(PG_FUNCTION_ARGS)
{
Cash dividend = PG_GETARG_CASH(0);
Cash divisor = PG_GETARG_CASH(1);
float8 quotient;
if (divisor == 0)
ereport(ERROR,
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
quotient = (float8) dividend / (float8) divisor;
PG_RETURN_FLOAT8(quotient);
}
/* cash_mul_flt8()
* Multiply cash by float8.
*/
Datum
cash_mul_flt8(PG_FUNCTION_ARGS)
{
Cash c = PG_GETARG_CASH(0);
float8 f = PG_GETARG_FLOAT8(1);
PG_RETURN_CASH(cash_mul_float8(c, f));
}
/* flt8_mul_cash()
* Multiply float8 by cash.
*/
Datum
flt8_mul_cash(PG_FUNCTION_ARGS)
{
float8 f = PG_GETARG_FLOAT8(0);
Cash c = PG_GETARG_CASH(1);
PG_RETURN_CASH(cash_mul_float8(c, f));
}
/* cash_div_flt8()
* Divide cash by float8.
*/
Datum
cash_div_flt8(PG_FUNCTION_ARGS)
{
Cash c = PG_GETARG_CASH(0);
float8 f = PG_GETARG_FLOAT8(1);
PG_RETURN_CASH(cash_div_float8(c, f));
}
/* cash_mul_flt4()
* Multiply cash by float4.
*/
Datum
cash_mul_flt4(PG_FUNCTION_ARGS)
{
Cash c = PG_GETARG_CASH(0);
float4 f = PG_GETARG_FLOAT4(1);
PG_RETURN_CASH(cash_mul_float8(c, (float8) f));
}
/* flt4_mul_cash()
* Multiply float4 by cash.
*/
Datum
flt4_mul_cash(PG_FUNCTION_ARGS)
{
float4 f = PG_GETARG_FLOAT4(0);
Cash c = PG_GETARG_CASH(1);
PG_RETURN_CASH(cash_mul_float8(c, (float8) f));
}
/* cash_div_flt4()
* Divide cash by float4.
*
*/
Datum
cash_div_flt4(PG_FUNCTION_ARGS)
{
Cash c = PG_GETARG_CASH(0);
float4 f = PG_GETARG_FLOAT4(1);
PG_RETURN_CASH(cash_div_float8(c, (float8) f));
}
/* cash_mul_int8()
* Multiply cash by int8.
*/
Datum
cash_mul_int8(PG_FUNCTION_ARGS)
{
Cash c = PG_GETARG_CASH(0);
int64 i = PG_GETARG_INT64(1);
PG_RETURN_CASH(cash_mul_int64(c, i));
}
/* int8_mul_cash()
* Multiply int8 by cash.
*/
Datum
int8_mul_cash(PG_FUNCTION_ARGS)
{
int64 i = PG_GETARG_INT64(0);
Cash c = PG_GETARG_CASH(1);
PG_RETURN_CASH(cash_mul_int64(c, i));
}
/* cash_div_int8()
* Divide cash by 8-byte integer.
*/
Datum
cash_div_int8(PG_FUNCTION_ARGS)
{
Cash c = PG_GETARG_CASH(0);
int64 i = PG_GETARG_INT64(1);
PG_RETURN_CASH(cash_div_int64(c, i));
}
/* cash_mul_int4()
* Multiply cash by int4.
*/
Datum
cash_mul_int4(PG_FUNCTION_ARGS)
{
Cash c = PG_GETARG_CASH(0);
int32 i = PG_GETARG_INT32(1);
PG_RETURN_CASH(cash_mul_int64(c, (int64) i));
}
/* int4_mul_cash()
* Multiply int4 by cash.
*/
Datum
int4_mul_cash(PG_FUNCTION_ARGS)
{
int32 i = PG_GETARG_INT32(0);
Cash c = PG_GETARG_CASH(1);
PG_RETURN_CASH(cash_mul_int64(c, (int64) i));
}
/* cash_div_int4()
* Divide cash by 4-byte integer.
*
*/
Datum
cash_div_int4(PG_FUNCTION_ARGS)
{
Cash c = PG_GETARG_CASH(0);
int32 i = PG_GETARG_INT32(1);
PG_RETURN_CASH(cash_div_int64(c, (int64) i));
}
/* cash_mul_int2()
* Multiply cash by int2.
*/
Datum
cash_mul_int2(PG_FUNCTION_ARGS)
{
Cash c = PG_GETARG_CASH(0);
int16 s = PG_GETARG_INT16(1);
PG_RETURN_CASH(cash_mul_int64(c, (int64) s));
}
/* int2_mul_cash()
* Multiply int2 by cash.
*/
Datum
int2_mul_cash(PG_FUNCTION_ARGS)
{
int16 s = PG_GETARG_INT16(0);
Cash c = PG_GETARG_CASH(1);
PG_RETURN_CASH(cash_mul_int64(c, (int64) s));
}
/* cash_div_int2()
* Divide cash by int2.
*
*/
Datum
cash_div_int2(PG_FUNCTION_ARGS)
{
Cash c = PG_GETARG_CASH(0);
int16 s = PG_GETARG_INT16(1);
PG_RETURN_CASH(cash_div_int64(c, (int64) s));
}
/* cashlarger()
* Return larger of two cash values.
*/
Datum
cashlarger(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
Cash result;
result = (c1 > c2) ? c1 : c2;
PG_RETURN_CASH(result);
}
/* cashsmaller()
* Return smaller of two cash values.
*/
Datum
cashsmaller(PG_FUNCTION_ARGS)
{
Cash c1 = PG_GETARG_CASH(0);
Cash c2 = PG_GETARG_CASH(1);
Cash result;
result = (c1 < c2) ? c1 : c2;
PG_RETURN_CASH(result);
}
/* cash_words()
* This converts an int4 as well but to a representation using words
* Obviously way North American centric - sorry
*/
Datum
cash_words(PG_FUNCTION_ARGS)
{
Cash value = PG_GETARG_CASH(0);
uint64 val;
StringInfoData buf;
text *res;
Cash dollars;
Cash m0;
Cash m1;
Cash m2;
Cash m3;
Cash m4;
Cash m5;
Cash m6;
initStringInfo(&buf);
/* work with positive numbers */
if (value < 0)
{
value = -value;
appendStringInfoString(&buf, "minus ");
}
/* Now treat as unsigned, to avoid trouble at INT_MIN */
val = (uint64) value;
dollars = val / INT64CONST(100);
m0 = val % INT64CONST(100); /* cents */
m1 = (val / INT64CONST(100)) % 1000; /* hundreds */
m2 = (val / INT64CONST(100000)) % 1000; /* thousands */
m3 = (val / INT64CONST(100000000)) % 1000; /* millions */
m4 = (val / INT64CONST(100000000000)) % 1000; /* billions */
m5 = (val / INT64CONST(100000000000000)) % 1000; /* trillions */
m6 = (val / INT64CONST(100000000000000000)) % 1000; /* quadrillions */
if (m6)
{
append_num_word(&buf, m6);
appendStringInfoString(&buf, " quadrillion ");
}
if (m5)
{
append_num_word(&buf, m5);
appendStringInfoString(&buf, " trillion ");
}
if (m4)
{
append_num_word(&buf, m4);
appendStringInfoString(&buf, " billion ");
}
if (m3)
{
append_num_word(&buf, m3);
appendStringInfoString(&buf, " million ");
}
if (m2)
{
append_num_word(&buf, m2);
appendStringInfoString(&buf, " thousand ");
}
if (m1)
append_num_word(&buf, m1);
if (dollars == 0)
appendStringInfoString(&buf, "zero");
appendStringInfoString(&buf, dollars == 1 ? " dollar and " : " dollars and ");
append_num_word(&buf, m0);
appendStringInfoString(&buf, m0 == 1 ? " cent" : " cents");
/* capitalize output */
buf.data[0] = pg_ascii_toupper((unsigned char) buf.data[0]);
/* return as text datum */
res = cstring_to_text_with_len(buf.data, buf.len);
pfree(buf.data);
PG_RETURN_TEXT_P(res);
}
/* cash_numeric()
* Convert cash to numeric.
*/
Datum
cash_numeric(PG_FUNCTION_ARGS)
{
Cash money = PG_GETARG_CASH(0);
Datum result;
int fpoint;
struct lconv *lconvert = PGLC_localeconv();
/* see comments about frac_digits in cash_in() */
fpoint = lconvert->frac_digits;
if (fpoint < 0 || fpoint > 10)
fpoint = 2;
/* convert the integral money value to numeric */
result = NumericGetDatum(int64_to_numeric(money));
/* scale appropriately, if needed */
if (fpoint > 0)
{
int64 scale;
int i;
Datum numeric_scale;
Datum quotient;
/* compute required scale factor */
scale = 1;
for (i = 0; i < fpoint; i++)
scale *= 10;
numeric_scale = NumericGetDatum(int64_to_numeric(scale));
/*
* Given integral inputs approaching INT64_MAX, select_div_scale()
* might choose a result scale of zero, causing loss of fractional
* digits in the quotient. We can ensure an exact result by setting
* the dscale of either input to be at least as large as the desired
* result scale. numeric_round() will do that for us.
*/
numeric_scale = DirectFunctionCall2(numeric_round,
numeric_scale,
Int32GetDatum(fpoint));
/* Now we can safely divide ... */
quotient = DirectFunctionCall2(numeric_div, result, numeric_scale);
/* ... and forcibly round to exactly the intended number of digits */
result = DirectFunctionCall2(numeric_round,
quotient,
Int32GetDatum(fpoint));
}
PG_RETURN_DATUM(result);
}
/* numeric_cash()
* Convert numeric to cash.
*/
Datum
numeric_cash(PG_FUNCTION_ARGS)
{
Datum amount = PG_GETARG_DATUM(0);
Cash result;
int fpoint;
int64 scale;
int i;
Datum numeric_scale;
struct lconv *lconvert = PGLC_localeconv();
/* see comments about frac_digits in cash_in() */
fpoint = lconvert->frac_digits;
if (fpoint < 0 || fpoint > 10)
fpoint = 2;
/* compute required scale factor */
scale = 1;
for (i = 0; i < fpoint; i++)
scale *= 10;
/* multiply the input amount by scale factor */
numeric_scale = NumericGetDatum(int64_to_numeric(scale));
amount = DirectFunctionCall2(numeric_mul, amount, numeric_scale);
/* note that numeric_int8 will round to nearest integer for us */
result = DatumGetInt64(DirectFunctionCall1(numeric_int8, amount));
PG_RETURN_CASH(result);
}
/* int4_cash()
* Convert int4 (int) to cash
*/
Datum
int4_cash(PG_FUNCTION_ARGS)
{
int32 amount = PG_GETARG_INT32(0);
Cash result;
int fpoint;
int64 scale;
int i;
struct lconv *lconvert = PGLC_localeconv();
/* see comments about frac_digits in cash_in() */
fpoint = lconvert->frac_digits;
if (fpoint < 0 || fpoint > 10)
fpoint = 2;
/* compute required scale factor */
scale = 1;
for (i = 0; i < fpoint; i++)
scale *= 10;
/* compute amount * scale, checking for overflow */
result = DatumGetInt64(DirectFunctionCall2(int8mul, Int64GetDatum(amount),
Int64GetDatum(scale)));
PG_RETURN_CASH(result);
}
/* int8_cash()
* Convert int8 (bigint) to cash
*/
Datum
int8_cash(PG_FUNCTION_ARGS)
{
int64 amount = PG_GETARG_INT64(0);
Cash result;
int fpoint;
int64 scale;
int i;
struct lconv *lconvert = PGLC_localeconv();
/* see comments about frac_digits in cash_in() */
fpoint = lconvert->frac_digits;
if (fpoint < 0 || fpoint > 10)
fpoint = 2;
/* compute required scale factor */
scale = 1;
for (i = 0; i < fpoint; i++)
scale *= 10;
/* compute amount * scale, checking for overflow */
result = DatumGetInt64(DirectFunctionCall2(int8mul, Int64GetDatum(amount),
Int64GetDatum(scale)));
PG_RETURN_CASH(result);
} | c | github | https://github.com/postgres/postgres | src/backend/utils/adt/cash.c |
#!/usr/bin/env python
"""create_min_chi2_table.py.
Create Table of minimum Chi_2 values and save to a table.
"""
import argparse
import os
import sys
import corner
import matplotlib.pyplot as plt
import pandas as pd
import sqlalchemy as sa
from joblib import Parallel, delayed
from pandas.plotting import scatter_matrix
import simulators
from mingle.utilities.param_file import get_host_params
from mingle.utilities.phoenix_utils import closest_model_params
from mingle.utilities.scatter_corner import scatter_corner
from mingle.utilities.db_utils import decompose_database_name
def parse_args(args):
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(description='Minimum chi-squared table.')
parser.add_argument('star', help='Star name')
parser.add_argument('--suffix', help='Suffix to add to the file names.', default="")
return parser.parse_args(args)
def main(star, obsnum, chip, suffix="", echo=False):
database = os.path.join(simulators.paths["output_dir"], star, "iam",
"{0}-{1}_{2}_iam_chisqr_results{3}.db".format(star, obsnum, chip, suffix))
path, star, obsnum, chip = decompose_database_name(database)
os.makedirs(os.path.join(path, "plots"), exist_ok=True)
save_name = os.path.join(path, "{0}_iam_all_observation_min_chi2{1}.tsv".format(star, suffix))
teff, logg, fe_h = closest_model_params(*get_host_params(star))
params = {"path": path, "star": star, "obsnum": obsnum, "chip": chip,
"teff": teff, "logg": logg, "fe_h": fe_h}
# Hack to run from editor
if os.getcwd().endswith("companion_simulations/bin"):
database = "../" + database
save_name = "../" + save_name
if os.path.exists(database):
engine = sa.create_engine('sqlite:///{0}'.format(database), echo=echo)
else:
raise IOError("Database does not exist.")
table_names = engine.table_names()
if len(table_names) == 1:
tb_name = table_names[0]
else:
raise ValueError("Database has two many tables {0}".format(table_names))
query = """SELECT * FROM {0}
WHERE (teff_1 = {1} AND logg_1 = {2} AND feh_1 = {3})
ORDER BY chi2 LIMIT 1
""".format(tb_name, params["teff"], params["logg"], params["fe_h"])
df = pd.read_sql(sa.text(query), engine)
df["obsnum"] = obsnum
df["chip"] = chip
columns = ["obsnum", "chip", "teff_1", "logg_1", "feh_1", "teff_2",
"logg_2", "feh_2", "alpha", "rv", "gamma", "chi2"]
if os.path.exists(save_name):
df.to_csv(save_name, columns=columns, sep='\t', mode="a", index=False, header=False)
else:
df.to_csv(save_name, columns=columns, sep='\t', mode="a", index=False, header=True)
return save_name
def scatter_plots(star, filename):
"""Load minimum chi2 table and make scatter plots across chips."""
df = pd.read_table(filename, sep="\t")
df.loc[:, "chip"] = df.loc[:, "chip"].astype(int)
fig, axes = plt.subplots(5, 2)
subdf = df.loc[:, ["chip", "teff_2", "alpha", "rv", "gamma", "chi2"]] # "logg_2", "feh_2"
scatter_matrix(subdf, alpha=1, figsize=(12, 12), diagonal='hist')
plt.suptitle("{0} Observation/chip variations".format(star))
path, fname = os.path.split(filename)
figname = os.path.join(path, "plots", "{0}_scatter.pdf".format(fname.split(".")[0]))
plt.savefig(figname)
figname = os.path.join(path, "plots", "{0}_scatter.png".format(fname.split(".")[0]))
plt.savefig(figname)
plt.close()
def scatter_corner_plots(star, filename):
"""Load minimum chi2 table and make scatter plots across chips."""
df = pd.read_table(filename, sep="\t")
df.loc[:, "chip"] = df.loc[:, "chip"].astype(int)
fig, axes = plt.subplots(5, 2)
subdf = df.loc[:, ["chip", "teff_2", "alpha", "rv", "gamma", "chi2"]] # "logg_2", "feh_2"
scatter_corner(subdf, alpha=1, figsize=(12, 12), diagonal='hist', corner="lower")
plt.suptitle("{0} Observation/chip variations".format(star))
path, fname = os.path.split(filename)
figname = os.path.join(path, "plots", "{0}_scatter_corner.pdf".format(fname.split(".")[0]))
plt.savefig(figname)
figname = os.path.join(path, "plots", "{0}_scatter_corner.png".format(fname.split(".")[0]))
plt.savefig(figname)
plt.close()
# Corner.corner
def min_chi2_corner_plot(star, filename):
df = pd.read_table(filename, sep="\t")
df.loc[:, "chip"] = df.loc[:, "chip"].astype(int)
subdf = df.loc[:, ["chip", "teff_2", "alpha", "rv", "gamma", "chi2"]] # "logg_2", "feh_2"
corner.corner(subdf.values, labels=subdf.columns.values)
plt.suptitle("{0} Observation/chip variations".format(star))
path, fname = os.path.split(filename)
figname = os.path.join(path, "plots", "{0}_corner_corner.png".format(fname.split(".")[0]))
plt.savefig(figname)
corner.corner(subdf.values, labels=subdf.columns.values, plot_contours=False)
plt.suptitle("{0} Observation/chip variations".format(star))
figname = os.path.join(path, "plots", "{0}_corner_contoured.png".format(fname.split(".")[0]))
plt.savefig(figname)
plt.close()
# TODO common function to determine observations and chips for different stars (like here)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
star = args.star
obs_nums = {"HD30501": ["1", "2a", "2b", "3"], "HD211847": ["1", "2"], "HD4747": ["1"],
"HDSIM": ["1", "2", "3"], "HDSIM2": ["1", "2", "3"], "HDSIM3": ["1", "2", "3"]}
chips = range(1, 5)
def paralleled_main(star, obsnum):
for chip in chips:
try:
save_name = main(star, obsnum, chip, suffix=args.suffix)
except Exception as e:
print(e)
print("Table creation failed for {0}-{1}_{2}".format(star, obsnum, chip))
continue
try:
scatter_plots(star, save_name)
scatter_corner_plots(star, save_name)
min_chi2_corner_plot(star, save_name)
except Exception as e:
print(" Corner plots did not work.")
raise e
# Run in parallel
star_obsnums = obsnums[star]
Parallel(n_jobs=-1)(delayed(paralleled_main)(star, obsnum) for obsnum in star_obsnums) | unknown | codeparrot/codeparrot-clean | ||
__author__ = 'dcline'
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../toNetCDF")) # lrauvNc4ToNetcdf.py is in sister toNetCDF dir
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../")) # settings.py is two dirs up
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "./"))
import pytz
from . import Contour
from .Contour import Contour
from datetime import datetime, timedelta
class makeContour(object):
'''
Create contour plots for visualizing data from LRAUV vehicles
'''
def process_command_line(self):
'''The argparse library is included in Python 2.7 and is an added package for STOQS.
'''
import argparse
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description='Script to create contour plots of CTD data collected by the LRAUV')
parser.add_argument('-d', '--database', action='store', help='database', default='stoqs')
parser.add_argument('--start', action='store', help='Start time in YYYYMMDDTHHMMSS format', default='20150310T210000', required=False)
parser.add_argument('--end', action='store', help='End time in YYYYMMDDTHHMMSS format',default='20150311T21000', required=False)
parser.add_argument('--daily', action='store', help='True to generate a daily plot',default=True, required=False)
parser.add_argument('--animate', action='store', help='if True will create frames to make animation from',default=False, required=False)
parser.add_argument('--zoom', action='store', help='time window in hours to zoom animation',default=8, required=False)
parser.add_argument('--overlap', action='store', help='time window in hours to overlap animation',default=2, required=False)
parser.add_argument('--title', action='store', help='Title for plots, will override default title created if --start specified', default='MBARI LRAUV Survey')
parser.add_argument('-v', '--verbose', nargs='?', choices=[1,2,3], type=int, help='Turn on verbose output. Higher number = more output.', const=1)
parser.add_argument('--minDepth', action='store', help='Minimum depth for data queries', default=0, type=float)
parser.add_argument('--maxDepth', action='store', help='Maximum depth for data queries', default=80, type=float)
parser.add_argument('-o', '--outDir', action='store', help='output directory to store contour image file', default='/tmp',required=False)
parser.add_argument('--parms', action='store', help='List of space separated parameters to contour plot', nargs='*', default=
['sea_water_temperature', 'sea_water_salinity', 'mass_concentration_of_chlorophyll_in_sea_water'])
parser.add_argument('--platformName', action='store', help='Filename to store output image to', default='daphne',required=False)
parser.add_argument('-t', '--contourUrl', action='store', help='base url to store cross referenced contour plot resources', default='http://elvis.shore.mbari.org/thredds/catalog/LRAUV/stoqs',required=False)
self.args = parser.parse_args()
self.commandline = ' '.join(sys.argv)
startDatetime = datetime.strptime(self.args.start, '%Y%m%dT%H%M%S')
endDatetime = datetime.strptime(self.args.end, '%Y%m%dT%H%M%S')
self.endDatetimeUTC = pytz.utc.localize(endDatetime)
endDatetimeLocal = self.endDatetimeUTC.astimezone(pytz.timezone('America/Los_Angeles'))
self.startDatetimeUTC = pytz.utc.localize(startDatetime)
startDatetimeLocal = self.startDatetimeUTC.astimezone(pytz.timezone('America/Los_Angeles'))
# If daily image round the UTC time to the local time and do the query for the 24 hour period
if self.args.daily:
startDatetimeLocal = startDatetimeLocal.replace(hour=0,minute=0,second=0,microsecond=0)
endDatetimeLocal = startDatetimeLocal.replace(hour=23,minute=0,second=0,microsecond=0)
self.startDatetimeUTC = startDatetimeLocal.astimezone(pytz.utc)
self.endDatetimeUTC = endDatetimeLocal.astimezone(pytz.utc)
def run(self):
title = 'MBARI LRAUV Survey'
outFile = self.args.outDir + '/' + self.args.platformName + '_log_' + self.startDatetimeUTC.strftime('%Y%m%dT%H%M%S') + '_' + self.endDatetimeUTC.strftime('%Y%m%dT%H%M%S') + '.png'
c = Contour(self.startDatetimeUTC, self.endDatetimeUTC, self.args.database, self.args.platformName, self.args.parms, title, outFile, False)
c.run()
cmd = r'scp %s stoqsadm@elvis.shore.mbari.org:/mbari/LRAUV/stoqs' % (outFile)
#logger.debug('%s', cmd)
import pdb; pdb.set_trace()
os.system(cmd)
if __name__ == '__main__':
d = makeContour()
d.process_command_line()
d.run() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import fcntl
import json
import os
import psutil
import stat
import signal
import sys
import time
import threading
class Monitor:
def __init__(self, app, filename):
self.app = app
self.pid = os.getpid()
self.filename = filename
self.thread = False
self.worker = {
"pid": self.pid,
"requests": 0,
"status": "idle",
# "vss": 0,
# "rss": 0,
"last_spawn": int(time.time()),
# "tx": 0,
# "avg_rt": 0,
"uri": "",
"method": "",
}
if self.is_threadmodel():
self.thread = True
with open(filename, mode="w") as fp:
fp.write("{}{}".format(
"WSGI status does not support worker thread model. ",
"Work only worker pre-fork."))
return
self.pre_sigint_handler = signal.getsignal(signal.SIGINT)
self.pre_sigterm_handler = signal.getsignal(signal.SIGTERM)
self.pre_sigabrt_handler = signal.getsignal(signal.SIGABRT)
# Create status file for own process permissions
ppid_ctime = psutil.Process(os.getppid()).create_time()
file_ctime = 0.0
if os.path.exists(self.filename):
file_ctime = os.stat(self.filename).st_ctime
if ppid_ctime > file_ctime:
os.remove(self.filename)
if ppid_ctime > file_ctime:
with open(filename, mode="w") as f:
obj = {
"workers": [],
}
json.dump(obj, f)
os.chown(filename, os.getuid(), os.getgid())
statinfo = os.stat(filename)
mode = statinfo.st_mode + stat.S_IWGRP
os.chmod(filename, mode=mode)
# Handler for receiving termination signal
signal.signal(signal.SIGINT, self.handler)
signal.signal(signal.SIGTERM, self.handler)
signal.signal(signal.SIGABRT, self.handler)
with open(self.filename, mode="r+") as self.fp:
self.update_status(fp=self.fp, init=True)
def __call__(self, environ, start_response):
if self.thread is True:
resp = self.app(environ, start_response)
return resp
self.pre_request(environ)
def post_request(status_code, headers, exc_info=None):
self.worker["status"] = "idle"
self.worker["uri"] = ""
self.worker["method"] = ""
with open(self.filename, mode="r+") as self.fp:
self.update_status(fp=self.fp, init=False)
return start_response(status_code, headers, exc_info)
return self.app(environ, post_request)
def pre_request(self, environ):
self.worker["requests"] += 1
self.worker["status"] = "busy"
self.worker["uri"] = environ["PATH_INFO"]
self.worker["method"] = environ["REQUEST_METHOD"]
with open(self.filename, mode="r+") as self.fp:
self.update_status(fp=self.fp, init=False)
def handler(self, signum, stack):
self.worker["status"] = str(signum)
self.worker["uri"] = ""
self.worker["method"] = ""
if not self.fp.closed:
fcntl.flock(self.fp, fcntl.LOCK_UN)
with open(self.filename, mode="r+") as fp:
self.update_status(fp=fp, init=False)
if signum == signal.SIGINT:
self.pre_sigint_handler(signum, stack)
elif signum == signal.SIGTERM:
self.pre_sigterm_handler(signum, stack)
elif signum == signal.SIGABRT:
self.pre_sigabrt_handler(signum, stack)
def is_threadmodel(self):
if threading.active_count() > 1:
return True
return False
def update_status(self, fp, init):
fcntl.flock(fp, fcntl.LOCK_EX)
try:
obj = {}
try:
obj = json.load(fp)
except ValueError:
# Failed to json parse
obj = {
"workers": [],
}
workers = [(i, v) for i, v in enumerate(obj["workers"]) if v["pid"] == self.pid]
if len(workers) == 1:
index = workers[0][0]
obj["workers"][index] = self.worker
else:
obj["workers"].append(self.worker)
if not init:
sys.stderr.write("not find self.pid: {} in workers key".format(self.pid))
fp.seek(0)
fp.truncate(0)
json.dump(obj, fp)
fp.flush()
finally:
fcntl.flock(fp, fcntl.LOCK_UN) | unknown | codeparrot/codeparrot-clean | ||
kind: Deployment
apiVersion: apps/v1
metadata:
name: calico-typha-vertical-autoscaler
namespace: kube-system
labels:
k8s-app: calico-typha-autoscaler
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
matchLabels:
k8s-app: calico-typha-autoscaler
template:
metadata:
labels:
k8s-app: calico-typha-autoscaler
spec:
priorityClassName: system-cluster-critical
containers:
- image: registry.k8s.io/cpvpa-amd64:v0.8.3
name: autoscaler
command:
- /cpvpa
- --target=deployment/calico-typha
- --namespace=kube-system
- --logtostderr=true
- --poll-period-seconds=30
- --v=2
- --config-file=/etc/config/typha-autoscaler
volumeMounts:
- name: config
mountPath: /etc/config
volumes:
- name: config
configMap:
name: calico-typha-vertical-autoscaler
serviceAccountName: calico-cpva | unknown | github | https://github.com/kubernetes/kubernetes | cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Implementation of the Metadata for Python packages PEPs.
Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental).
"""
from __future__ import unicode_literals
import codecs
from email import message_from_file
import logging
import re
from . import DistlibException
from .compat import StringIO, string_types
from .markers import interpret
from .version import get_scheme
logger = logging.getLogger(__name__)
class MetadataMissingError(DistlibException):
"""A required metadata is missing"""
class MetadataConflictError(DistlibException):
"""Attempt to read or write metadata fields that are conflictual."""
class MetadataUnrecognizedVersionError(DistlibException):
"""Unknown metadata version number."""
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
class SilentReporter(Reporter, object):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
super(SilentReporter, self).__init__(
source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
return nodes.system_message(message, level=level, type=self.
levels[level], *children, **kwargs)
_HAS_DOCUTILS = True
except ImportError:
# docutils is not installed
_HAS_DOCUTILS = False
# public API of this module
__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# preferred version. Hopefully will be changed
# to 1.2 once PEP 345 is supported everywhere
PKG_INFO_PREFERRED_VERSION = '1.1'
_LINE_PREFIX = re.compile('\n \|')
_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License')
_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License', 'Classifier', 'Download-URL', 'Obsoletes',
'Provides', 'Requires')
_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
'Download-URL')
_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External')
_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
'Obsoletes-Dist', 'Requires-External', 'Maintainer',
'Maintainer-email', 'Project-URL')
_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External', 'Private-Version',
'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
'Provides-Extra')
_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
'Setup-Requires-Dist', 'Extension')
_ALL_FIELDS = set()
_ALL_FIELDS.update(_241_FIELDS)
_ALL_FIELDS.update(_314_FIELDS)
_ALL_FIELDS.update(_345_FIELDS)
_ALL_FIELDS.update(_426_FIELDS)
EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
def _version2fieldlist(version):
if version == '1.0':
return _241_FIELDS
elif version == '1.1':
return _314_FIELDS
elif version == '1.2':
return _345_FIELDS
elif version == '2.0':
return _426_FIELDS
raise MetadataUnrecognizedVersionError(version)
def _best_version(fields):
"""Detect the best version depending on the fields used."""
def _has_marker(keys, markers):
for marker in markers:
if marker in keys:
return True
return False
keys = []
for key, value in fields.items():
if value in ([], 'UNKNOWN', None):
continue
keys.append(key)
possible_versions = ['1.0', '1.1', '1.2', '2.0']
# first let's try to see if a field is not part of one of the version
for key in keys:
if key not in _241_FIELDS and '1.0' in possible_versions:
possible_versions.remove('1.0')
if key not in _314_FIELDS and '1.1' in possible_versions:
possible_versions.remove('1.1')
if key not in _345_FIELDS and '1.2' in possible_versions:
possible_versions.remove('1.2')
if key not in _426_FIELDS and '2.0' in possible_versions:
possible_versions.remove('2.0')
# possible_version contains qualified versions
if len(possible_versions) == 1:
return possible_versions[0] # found !
elif len(possible_versions) == 0:
raise MetadataConflictError('Unknown metadata set')
# let's see if one unique marker is found
is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1:
raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields')
# we have the choice, 1.0, or 1.2, or 2.0
# - 1.0 has a broken Summary field but works with all tools
# - 1.1 is to avoid
# - 1.2 fixes Summary but has little adoption
# - 2.0 adds more features and is very new
if not is_1_1 and not is_1_2 and not is_2_0:
# we couldn't find any specific marker
if PKG_INFO_PREFERRED_VERSION in possible_versions:
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
return '2.0'
_ATTR2FIELD = {
'metadata_version': 'Metadata-Version',
'name': 'Name',
'version': 'Version',
'platform': 'Platform',
'supported_platform': 'Supported-Platform',
'summary': 'Summary',
'description': 'Description',
'keywords': 'Keywords',
'home_page': 'Home-page',
'author': 'Author',
'author_email': 'Author-email',
'maintainer': 'Maintainer',
'maintainer_email': 'Maintainer-email',
'license': 'License',
'classifier': 'Classifier',
'download_url': 'Download-URL',
'obsoletes_dist': 'Obsoletes-Dist',
'provides_dist': 'Provides-Dist',
'requires_dist': 'Requires-Dist',
'setup_requires_dist': 'Setup-Requires-Dist',
'requires_python': 'Requires-Python',
'requires_external': 'Requires-External',
'requires': 'Requires',
'provides': 'Provides',
'obsoletes': 'Obsoletes',
'project_url': 'Project-URL',
'private_version': 'Private-Version',
'obsoleted_by': 'Obsoleted-By',
'extension': 'Extension',
'provides_extra': 'Provides-Extra',
}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python',)
_VERSION_FIELDS = ('Version',)
_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
'Requires', 'Provides', 'Obsoletes-Dist',
'Provides-Dist', 'Requires-Dist', 'Requires-External',
'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
'Provides-Extra', 'Extension')
_LISTTUPLEFIELDS = ('Project-URL',)
_ELEMENTSFIELD = ('Keywords',)
_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
_MISSING = object()
_FILESAFE = re.compile('[^A-Za-z0-9.]+')
class Metadata(object):
"""The metadata of a release.
Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a METADATA file
- *fileobj* give a file-like object with METADATA as content
- *mapping* is a dict-like object
- *scheme* is a version scheme name
"""
# TODO document that execution_context and platform_dependent are used
# to filter on query, not when setting a key
# also document the mapping API and UNKNOWN default key
def __init__(self, path=None, platform_dependent=False,
execution_context=None, fileobj=None, mapping=None,
scheme='default'):
self._fields = {}
self.requires_files = []
self.docutils_support = _HAS_DOCUTILS
self.platform_dependent = platform_dependent
self.execution_context = execution_context
self._dependencies = None
self.scheme = scheme
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
if path is not None:
self.read(path)
elif fileobj is not None:
self.read_file(fileobj)
elif mapping is not None:
self.update(mapping)
self.set_metadata_version()
def set_metadata_version(self):
self._fields['Metadata-Version'] = _best_version(self._fields)
def _write_field(self, file, name, value):
file.write('%s: %s\n' % (name, value))
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __delitem__(self, name):
field_name = self._convert_name(name)
try:
del self._fields[field_name]
except KeyError:
raise KeyError(name)
def __contains__(self, name):
return (name in self._fields or
self._convert_name(name) in self._fields)
def _convert_name(self, name):
if name in _ALL_FIELDS:
return name
name = name.replace('-', '_').lower()
return _ATTR2FIELD.get(name, name)
def _default_value(self, name):
if name in _LISTFIELDS or name in _ELEMENTSFIELD:
return []
return 'UNKNOWN'
def _check_rst_data(self, data):
"""Return warnings when the provided data has syntax errors."""
source_path = StringIO()
parser = Parser()
settings = frontend.OptionParser().get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError:
reporter.messages.append((-1, 'Could not finish the parsing.',
'', {}))
return reporter.messages
def _platform(self, value):
if not self.platform_dependent or ';' not in value:
return True, value
value, marker = value.split(';')
return interpret(marker, self.execution_context), value
def _remove_line_prefix(self, value):
return _LINE_PREFIX.sub('\n', value)
def __getattr__(self, name):
if name in _ATTR2FIELD:
return self[name]
raise AttributeError(name)
def _get_dependencies(self):
def handle_req(req, rlist, extras):
if ';' not in req:
rlist.append(req)
else:
r, marker = req.split(';')
m = EXTRA_RE.search(marker)
if m:
extra = m.groups()[0][1:-1]
extras.setdefault(extra, []).append(r)
result = self._dependencies
if result is None:
self._dependencies = result = {}
extras = {}
setup_reqs = self['Setup-Requires-Dist']
if setup_reqs:
result['setup'] = setup_reqs
install_reqs = []
for req in self['Requires-Dist']:
handle_req(req, install_reqs, extras)
if install_reqs:
result['install'] = install_reqs
if extras:
result['extras'] = extras
return result
def _set_dependencies(self, value):
if 'test' in value:
value = dict(value) # don't change value passed in
value.setdefault('extras', {})['test'] = value.pop('test')
self._dependencies = value
setup_reqs = value.get('setup', [])
install_reqs = value.get('install', [])
klist = []
for k, rlist in value.get('extras', {}).items():
klist.append(k)
for r in rlist:
install_reqs.append('%s; extra == "%s"' % (r, k))
if setup_reqs:
self['Setup-Requires-Dist'] = setup_reqs
if install_reqs:
self['Requires-Dist'] = install_reqs
if klist:
self['Provides-Extra'] = klist
#
# Public API
#
dependencies = property(_get_dependencies, _set_dependencies)
def get_fullname(self, filesafe=False):
"""Return the distribution name with version.
If filesafe is true, return a filename-escaped form."""
name, version = self['Name'], self['Version']
if filesafe:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version)
def is_field(self, name):
"""return True if name is a valid metadata key"""
name = self._convert_name(name)
return name in _ALL_FIELDS
def is_multi_field(self, name):
name = self._convert_name(name)
return name in _LISTFIELDS
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
def read_file(self, fileob):
"""Read the metadata values from a file object."""
msg = message_from_file(fileob)
self._fields['Metadata-Version'] = msg['metadata-version']
for field in _version2fieldlist(self['Metadata-Version']):
if field in _LISTFIELDS:
# we can have multiple lines
values = msg.get_all(field)
if field in _LISTTUPLEFIELDS and values is not None:
values = [tuple(value.split(',')) for value in values]
self.set(field, values)
else:
# single line
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
self.set_metadata_version()
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value)
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
"""
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v)
def set(self, name, value):
"""Control then set a metadata field."""
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning(
'%r: %r is not valid (field %r)',
project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value
def get(self, name, default=_MISSING):
"""Get a metadata field."""
name = self._convert_name(name)
if name not in self._fields:
if default is _MISSING:
default = self._default_value(name)
return default
if name in _UNICODEFIELDS:
value = self._fields[name]
return value
elif name in _LISTFIELDS:
value = self._fields[name]
if value is None:
return []
res = []
for val in value:
valid, val = self._platform(val)
if not valid:
continue
if name not in _LISTTUPLEFIELDS:
res.append(val)
else:
# That's for Project-URL
res.append((val[0], val[1]))
return res
elif name in _ELEMENTSFIELD:
valid, value = self._platform(self._fields[name])
if not valid:
return []
if isinstance(value, string_types):
return value.split(',')
valid, value = self._platform(self._fields[name])
if not valid:
return None
return value
def check(self, strict=False, restructuredtext=False):
"""Check if the metadata is compliant. If strict is True then raise if
no Name or Version are provided"""
self.set_metadata_version()
# XXX should check the versions (if the file was loaded)
missing, warnings = [], []
for attr in ('Name', 'Version'): # required by PEP 345
if attr not in self:
missing.append(attr)
if strict and missing != []:
msg = 'missing required metadata: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for attr in ('Home-page', 'Author'):
if attr not in self:
missing.append(attr)
if _HAS_DOCUTILS and restructuredtext:
warnings.extend(self._check_rst_data(self['Description']))
# checking metadata 1.2 (XXX needs to check 1.1, 1.0)
if self['Metadata-Version'] != '1.2':
return missing, warnings
scheme = get_scheme(self.scheme)
def are_valid_constraints(value):
for v in value:
if not scheme.is_valid_matcher(v.split(';')[0]):
return False
return True
for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
(_VERSIONS_FIELDS,
scheme.is_valid_constraint_list),
(_VERSION_FIELDS,
scheme.is_valid_version)):
for field in fields:
value = self.get(field, None)
if value is not None and not controller(value):
warnings.append('Wrong value for %r: %s' % (field, value))
return missing, warnings
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
"""
self.set_metadata_version()
mapping_1_0 = (
('metadata_version', 'Metadata-Version'),
('name', 'Name'),
('version', 'Version'),
('summary', 'Summary'),
('home_page', 'Home-page'),
('author', 'Author'),
('author_email', 'Author-email'),
('license', 'License'),
('description', 'Description'),
('keywords', 'Keywords'),
('platform', 'Platform'),
('classifier', 'Classifier'),
('download_url', 'Download-URL'),
)
data = {}
for key, field_name in mapping_1_0:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
if self['Metadata-Version'] == '1.2':
mapping_1_2 = (
('requires_dist', 'Requires-Dist'),
('requires_python', 'Requires-Python'),
('requires_external', 'Requires-External'),
('provides_dist', 'Provides-Dist'),
('obsoletes_dist', 'Obsoletes-Dist'),
('project_url', 'Project-URL'),
)
for key, field_name in mapping_1_2:
if not skip_missing or field_name in self._fields:
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
elif self['Metadata-Version'] == '1.1':
mapping_1_1 = (
('provides', 'Provides'),
('requires', 'Requires'),
('obsoletes', 'Obsoletes'),
)
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
return data
# Mapping API
# TODO could add iter* variants
def keys(self):
return list(_version2fieldlist(self['Metadata-Version']))
def __iter__(self):
for key in self.keys():
yield key
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __repr__(self):
return '<Metadata %s %s>' % (self.name, self.version) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module ActiveRecord
module Encryption
# A +KeyProvider+ serves keys:
#
# * An encryption key
# * A list of potential decryption keys. Serving multiple decryption keys supports rotation-schemes
# where new keys are added but old keys need to continue working
class KeyProvider
def initialize(keys)
@keys = Array(keys)
end
# Returns the last key in the list as the active key to perform encryptions
#
# When +ActiveRecord::Encryption.config.store_key_references+ is true, the key will include
# a public tag referencing the key itself. That key will be stored in the public
# headers of the encrypted message
def encryption_key
@encryption_key ||= @keys.last.tap do |key|
key.public_tags.encrypted_data_key_id = key.id if ActiveRecord::Encryption.config.store_key_references
end
@encryption_key
end
# Returns the list of decryption keys
#
# When the message holds a reference to its encryption key, it will return an array
# with that key. If not, it will return the list of keys.
def decryption_keys(encrypted_message)
if encrypted_message.headers.encrypted_data_key_id
keys_grouped_by_id[encrypted_message.headers.encrypted_data_key_id]
else
@keys
end
end
private
def keys_grouped_by_id
@keys_grouped_by_id ||= @keys.group_by(&:id)
end
end
end
end | ruby | github | https://github.com/rails/rails | activerecord/lib/active_record/encryption/key_provider.rb |
# -*- encoding: utf-8 -*-
import urllib2
import csv
from util import *
BGCOLOR_1 = '#ffffff'
BGCOLOR_2 = '#eafcf2'
def macro_ListEquipsInCategory(macro, _trailing_args=[]):
request = macro.request
formatter = macro.formatter
parser = macro.parser
requested_cat = _trailing_args[0] if len(_trailing_args) else u''
url = urllib2.urlopen('https://docs.google.com/spreadsheets/d/1AVKHCcHz-Y6S5UAocDXDgf4fasbz-AanJb9rX_GiB6M/pub?gid=2023316326&single=true&output=tsv')
if not url:
return u'マスターユニットデータの取得に失敗'
equips = list(csv.DictReader(url, dialect=csv.excel_tab))
equips = filter(lambda equip: equip.get('装備種類', '') == requested_cat.encode('utf-8'), equips)
equips = filter(lambda equip: equip.get('名前', ''), equips)
equips.sort(key=lambda equip: safe_toint(equip.get('制作費', '999999999')))
f = formatter
output = u''
output += f.table(True)
output += u''.join([
f.table_row(True),
cell(f, '名前', header=True),
cell(f, 'R', header=True),
cell(f, '攻撃', header=True),
cell(f, '防御', header=True),
cell(f, '速度', header=True),
cell(f, '知力', header=True),
cell(f, 'スキル', header=True, colspan=2),
#
cell(f, '所持数', header=True),
cell(f, '価格', header=True),
f.table_row(False),
])
for e in equips:
bgcolor = BGCOLOR_1 if safe_toint(e.get('レアリティ', '0')) % 2 else BGCOLOR_2
output += u''.join([
f.table_row(True, style='background-color: %s;' % bgcolor),
cell(f, e.get('名前', '')),
cell(f, e.get('レアリティ', ''), num=True),
cell(f, e.get('攻撃', ''), num=True),
cell(f, e.get('防御', ''), num=True),
cell(f, e.get('速度', ''), num=True),
cell(f, e.get('知力', ''), num=True),
cell(f, skill_pair_to_str(e.get('スキル1', ''), e.get('ス値1', 0))),
cell(f, skill_pair_to_str(e.get('スキル2', ''), e.get('ス値2', 0))),
cell(f, e.get('所持数', ''), num=True),
cell(f, e.get('制作費', ''), num=True),
])
output += f.table(False)
return output | unknown | codeparrot/codeparrot-clean | ||
r"""
If I repeat a paragraph using the following formula::
do text for pp in properties_list()
and if `properties_list()` is a generator function,
then appy.pod works only correctly
if the generator function actually yields values.
The following test works:
>>> run_test(1,[
... Property("Computer","good"),
... Property("Singing","very good")])
[u'Computer', u'Singing']
Generated file result1.odt
If it is an empty generator
But the following expected result doesn't work:
>>> run_test(2,[])
[]
Generated file result1.odt
It gives the following traceback instead:
Traceback (most recent call last):
File "test.py", line 44, in <module>
run_test(2,[])
File "test.py", line 33, in run_test
renderer.run()
File "l:\snapshots\appy-0.6.7\appy\pod\renderer.py", line 347, in run
self.currentParser.parse(self.contentXml)
File "l:\snapshots\appy-0.6.7\appy\shared\xml_parser.py", line 195, in parse
self.parser.parse(inputSource)
File "c:\Python27\lib\xml\sax\expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "c:\Python27\lib\xml\sax\xmlreader.py", line 123, in parse
self.feed(buffer)
File "c:\Python27\lib\xml\sax\expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "c:\Python27\lib\xml\sax\expatreader.py", line 304, in end_element
self._cont_handler.endElement(name)
File "l:\snapshots\appy-0.6.7\appy\pod\pod_parser.py", line 279, in endElement
e.currentBuffer.action.execute()
File "l:\snapshots\appy-0.6.7\appy\pod\actions.py", line 76, in execute
self.do()
File "l:\snapshots\appy-0.6.7\appy\pod\actions.py", line 198, in do
del context[self.iter]
KeyError: u'pp'
Tested also on 0.7.0
A workaround is to change the formula in the .odt template
to wrap the iterator manually into `list`:
do text for pp in list(properties_list())
Not urgent, but worth a closer look.
"""
import os
import os.path
from appy.pod.renderer import Renderer
class Property():
def __init__(self,property,value):
self.property = property
self.value = value
def __unicode__(self):
return unicode(self.value)
def run_test(n,pp_list):
tpl = os.path.join(os.path.abspath(os.path.dirname(__file__)),'cv.odt')
def properties_list():
for p in pp_list:
yield p
context = dict(properties_list=properties_list)
print [unicode(pp.property) for pp in properties_list()]
target = 'result%s.odt' % n
if os.path.exists(target):
os.remove(target)
renderer = Renderer(tpl, context, target)
renderer.run()
print "Generated file", target
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test() | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from unittest import skipUnless
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils.six import PY3, StringIO
from .models import ColumnTypes
class InspectDBTestCase(TestCase):
def test_stealth_table_name_filter_option(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
error_message = "inspectdb has examined a table that should have been filtered out."
# contrib.contenttypes is one of the apps always installed when running
# the Django test suite, check that one of its tables hasn't been
# inspected
self.assertNotIn("class DjangoContentType(models.Model):", out.getvalue(), msg=error_message)
def make_field_type_asserter(self):
"""Call inspectdb and return a function to validate a field type in its output"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
def assertFieldType(name, definition):
out_def = re.search(r'^\s*%s = (models.*)$' % name, output, re.MULTILINE).groups()[0]
self.assertEqual(definition, out_def)
return assertFieldType
def test_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
# Inspecting Oracle DB doesn't produce correct results (#19884):
# - it gets max_length wrong: it returns a number of bytes.
# - it reports fields as blank=True when they aren't.
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('char_field', "models.CharField(max_length=10)")
assertFieldType('null_char_field', "models.CharField(max_length=10, blank=True, null=True)")
assertFieldType('comma_separated_int_field', "models.CharField(max_length=99)")
assertFieldType('date_field', "models.DateField()")
assertFieldType('date_time_field', "models.DateTimeField()")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('email_field', "models.CharField(max_length=254)")
assertFieldType('file_field', "models.CharField(max_length=100)")
assertFieldType('file_path_field', "models.CharField(max_length=100)")
if connection.features.can_introspect_ip_address_field:
assertFieldType('gen_ip_adress_field', "models.GenericIPAddressField()")
elif (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('gen_ip_adress_field', "models.CharField(max_length=39)")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('slug_field', "models.CharField(max_length=50)")
if not connection.features.interprets_empty_strings_as_nulls:
assertFieldType('text_field', "models.TextField()")
if connection.features.can_introspect_time_field:
assertFieldType('time_field', "models.TimeField()")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('url_field', "models.CharField(max_length=200)")
def test_number_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
if not connection.features.can_introspect_autofield:
assertFieldType('id', "models.IntegerField(primary_key=True) # AutoField?")
if connection.features.can_introspect_big_integer_field:
assertFieldType('big_int_field', "models.BigIntegerField()")
else:
assertFieldType('big_int_field', "models.IntegerField()")
bool_field = ColumnTypes._meta.get_field('bool_field')
bool_field_type = connection.features.introspected_boolean_field_type(bool_field)
assertFieldType('bool_field', "models.{}()".format(bool_field_type))
null_bool_field = ColumnTypes._meta.get_field('null_bool_field')
null_bool_field_type = connection.features.introspected_boolean_field_type(null_bool_field)
if 'BooleanField' in null_bool_field_type:
assertFieldType('null_bool_field', "models.{}()".format(null_bool_field_type))
else:
if connection.features.can_introspect_null:
assertFieldType('null_bool_field', "models.{}(blank=True, null=True)".format(null_bool_field_type))
else:
assertFieldType('null_bool_field', "models.{}()".format(null_bool_field_type))
if connection.features.can_introspect_decimal_field:
assertFieldType('decimal_field', "models.DecimalField(max_digits=6, decimal_places=1)")
else: # Guessed arguments on SQLite, see #5014
assertFieldType('decimal_field', "models.DecimalField(max_digits=10, decimal_places=5) "
"# max_digits and decimal_places have been guessed, "
"as this database handles decimal fields as float")
assertFieldType('float_field', "models.FloatField()")
assertFieldType('int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
assertFieldType('pos_int_field', "models.PositiveIntegerField()")
else:
assertFieldType('pos_int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.PositiveSmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.PositiveIntegerField()")
else:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.IntegerField()")
if connection.features.can_introspect_small_integer_field:
assertFieldType('small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('small_int_field', "models.IntegerField()")
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_attribute_name_not_python_keyword(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated an attribute name which is a python keyword"
# Recursive foreign keys should be set to 'self'
self.assertIn("parent = models.ForeignKey('self', models.DO_NOTHING)", output)
self.assertNotIn(
"from = models.ForeignKey(InspectdbPeople, models.DO_NOTHING)",
output,
msg=error_message,
)
# As InspectdbPeople model is defined after InspectdbMessage, it should be quoted
self.assertIn(
"from_field = models.ForeignKey('InspectdbPeople', models.DO_NOTHING, db_column='from_id')",
output,
)
self.assertIn(
"people_pk = models.ForeignKey(InspectdbPeople, models.DO_NOTHING, primary_key=True)",
output,
)
self.assertIn(
"people_unique = models.ForeignKey(InspectdbPeople, models.DO_NOTHING, unique=True)",
output,
)
def test_digits_column_name_introspection(self):
"""Introspection of column names consist/start with digits (#16536/#17676)"""
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated a model field name which is a number"
self.assertNotIn(" 123 = models.CharField", output, msg=error_message)
self.assertIn("number_123 = models.CharField", output)
error_message = "inspectdb generated a model field name which starts with a digit"
self.assertNotIn(" 4extra = models.CharField", output, msg=error_message)
self.assertIn("number_4extra = models.CharField", output)
self.assertNotIn(" 45extra = models.CharField", output, msg=error_message)
self.assertIn("number_45extra = models.CharField", output)
def test_special_column_name_introspection(self):
"""
Introspection of column names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
base_name = 'Field' if not connection.features.uppercases_column_names else 'field'
self.assertIn("field = models.IntegerField()", output)
self.assertIn("field_field = models.IntegerField(db_column='%s_')" % base_name, output)
self.assertIn("field_field_0 = models.IntegerField(db_column='%s__')" % base_name, output)
self.assertIn("field_field_1 = models.IntegerField(db_column='__field')", output)
self.assertIn("prc_x = models.IntegerField(db_column='prc(%) x')", output)
if PY3:
# Python 3 allows non-ASCII identifiers
self.assertIn("tamaño = models.IntegerField()", output)
else:
self.assertIn("tama_o = models.IntegerField(db_column='tama\\xf1o')", output)
def test_table_name_introspection(self):
"""
Introspection of table names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
self.assertIn("class InspectdbSpecialTableName(models.Model):", output)
def test_managed_models(self):
"""Test that by default the command generates models with `Meta.managed = False` (#14305)"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.longMessage = False
self.assertIn(" managed = False", output, msg='inspectdb should generate unmanaged models.')
def test_unique_together_meta(self):
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_uniquetogether'),
stdout=out)
output = out.getvalue()
self.assertIn(
" unique_together = (('field1', 'field2'),)", output,
msg='inspectdb should generate unique_together.'
)
@skipUnless(connection.vendor == 'sqlite',
"Only patched sqlite's DatabaseIntrospection.data_types_reverse for this test")
def test_custom_fields(self):
"""
Introspection of columns with a custom field (#21090)
"""
out = StringIO()
orig_data_types_reverse = connection.introspection.data_types_reverse
try:
connection.introspection.data_types_reverse = {
'text': 'myfields.TextField',
'bigint': 'BigIntegerField',
}
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.assertIn("text_field = myfields.TextField()", output)
self.assertIn("big_int_field = models.BigIntegerField()", output)
finally:
connection.introspection.data_types_reverse = orig_data_types_reverse | unknown | codeparrot/codeparrot-clean | ||
# Scan an Apple header file, generating a Python file of generator calls.
import sys
import os
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner
LONG = "LaunchServices"
SHORT = "launch"
OBJECT = "NOTUSED"
def main():
input = LONG + ".h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
scanner.gentypetest(SHORT+"typetest.py")
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[0]
# This is non-functional today
if t == OBJECT and m == "InMode":
classname = "Method"
listname = "methods"
return classname, listname
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
self.defsfile.write("from Carbon.Files import *\n")
self.defsfile.write("kLSRequestAllInfo = -1\n")
self.defsfile.write("kLSRolesAll = -1\n")
self.defsfile.write("kLSUnknownType = FOUR_CHAR_CODE('\\0\\0\\0\\0')\n")
self.defsfile.write("kLSUnknownCreator = FOUR_CHAR_CODE('\\0\\0\\0\\0')\n")
self.defsfile.write("kLSInvalidExtensionIndex = -1\n")
def makeblacklistnames(self):
return [
"LSInit",
"LSTerm",
"kLSRequestAllInfo",
"kLSRolesAll",
"kLSInvalidExtensionIndex",
"kLSUnknownType",
"kLSUnknownCreator"
]
def makeblacklisttypes(self):
return [
"LSLaunchFSRefSpec_ptr",
"LSLaunchURLSpec_ptr",
]
def makerepairinstructions(self):
return [
# LSGetApplicationForInfo
([('CFStringRef', 'inExtension', 'InMode')],
[('OptCFStringRef', 'inExtension', 'InMode')]),
# LSFindApplicationForInfo
([('CFStringRef', 'inBundleID', 'InMode')],
[('OptCFStringRef', 'inBundleID', 'InMode')]),
([('CFStringRef', 'inName', 'InMode')],
[('OptCFStringRef', 'inName', 'InMode')]),
# Unicode filenames passed as length, buffer. LSGetExtensionInfo
([('UniCharCount', '*', 'InMode'),
('UniChar_ptr', '*', 'InMode')],
[('UnicodeReverseInBuffer', '*', 'InMode')]
),
]
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
from win32inet import *
from win32inetcon import *
import winerror
from pywin32_testutil import str2bytes # py3k-friendly helper
import unittest
class CookieTests(unittest.TestCase):
def testCookies(self):
data = "TestData=Test"
InternetSetCookie("http://www.python.org", None, data)
got = InternetGetCookie("http://www.python.org", None)
self.assertEqual(got, data)
def testCookiesEmpty(self):
try:
InternetGetCookie("http://site-with-no-cookie.python.org", None)
self.fail("expected win32 exception")
except error, exc:
self.failUnlessEqual(exc.winerror, winerror.ERROR_NO_MORE_ITEMS)
class UrlTests(unittest.TestCase):
def testSimpleCanonicalize(self):
ret = InternetCanonicalizeUrl("foo bar")
self.assertEqual(ret, "foo%20bar")
def testLongCanonicalize(self):
# a 4k URL causes the underlying API to request a bigger buffer"
big = "x" * 2048
ret = InternetCanonicalizeUrl(big + " " + big)
self.assertEqual(ret, big + "%20" + big)
class TestNetwork(unittest.TestCase):
def setUp(self):
self.hi = InternetOpen("test", INTERNET_OPEN_TYPE_DIRECT, None, None, 0)
def tearDown(self):
self.hi.Close()
def testPythonDotOrg(self):
hdl = InternetOpenUrl(self.hi, "http://www.python.org", None,
INTERNET_FLAG_EXISTING_CONNECT)
chunks = []
while 1:
chunk = InternetReadFile(hdl, 1024)
if not chunk:
break
chunks.append(chunk)
data = str2bytes('').join(chunks)
assert data.find(str2bytes("Python"))>0, repr(data) # This must appear somewhere on the main page!
def testFtpCommand(self):
# ftp.python.org doesn't exist. ftp.gnu.org is what Python's urllib
# test code uses.
hcon = InternetConnect(self.hi, "ftp.gnu.org", INTERNET_INVALID_PORT_NUMBER,
None, None, # username/password
INTERNET_SERVICE_FTP, 0, 0)
try:
try:
hftp = FtpCommand(hcon, True, FTP_TRANSFER_TYPE_ASCII, 'NLST', 0)
except error:
print "Error info is", InternetGetLastResponseInfo()
InternetReadFile(hftp, 2048)
hftp.Close()
finally:
hcon.Close()
if __name__=='__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
{
"GENPASS": {
"summary": "Generates a pseudorandom, secure password that can be used to identify ACL users.",
"complexity": "O(1)",
"group": "server",
"since": "6.0.0",
"arity": -2,
"container": "ACL",
"function": "aclCommand",
"command_flags": [
"NOSCRIPT",
"LOADING",
"STALE",
"SENTINEL"
],
"reply_schema": {
"type": "string",
"description": "Pseudorandom data. By default it contains 64 bytes, representing 256 bits of data. If `bits` was given, the output string length is the number of specified bits (rounded to the next multiple of 4) divided by 4."
},
"arguments": [
{
"name": "bits",
"type": "integer",
"optional": true
}
]
}
} | json | github | https://github.com/redis/redis | src/commands/acl-genpass.json |
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from caper.objects import CaptureMatch
from logr import Logr
class CaptureStep(object):
REPR_KEYS = ['regex', 'func', 'single']
def __init__(self, capture_group, tag, source, regex=None, func=None, single=None, **kwargs):
#: @type: CaptureGroup
self.capture_group = capture_group
#: @type: str
self.tag = tag
#: @type: str
self.source = source
#: @type: str
self.regex = regex
#: @type: function
self.func = func
#: @type: bool
self.single = single
self.kwargs = kwargs
self.matched = False
def execute(self, fragment):
"""Execute step on fragment
:type fragment: CaperFragment
:rtype : CaptureMatch
"""
match = CaptureMatch(self.tag, self)
if self.regex:
weight, result, num_fragments = self.capture_group.parser.matcher.fragment_match(fragment, self.regex)
Logr.debug('(execute) [regex] tag: "%s"', self.tag)
if not result:
return match
# Populate CaptureMatch
match.success = True
match.weight = weight
match.result = result
match.num_fragments = num_fragments
elif self.func:
result = self.func(fragment)
Logr.debug('(execute) [func] %s += "%s"', self.tag, match)
if not result:
return match
# Populate CaptureMatch
match.success = True
match.weight = 1.0
match.result = result
else:
Logr.debug('(execute) [raw] %s += "%s"', self.tag, fragment.value)
include_separators = self.kwargs.get('include_separators', False)
# Populate CaptureMatch
match.success = True
match.weight = 1.0
if include_separators:
match.result = (fragment.left_sep, fragment.value, fragment.right_sep)
else:
match.result = fragment.value
return match
def __repr__(self):
attribute_values = [key + '=' + repr(getattr(self, key))
for key in self.REPR_KEYS
if hasattr(self, key) and getattr(self, key)]
attribute_string = ', ' + ', '.join(attribute_values) if len(attribute_values) > 0 else ''
return "CaptureStep('%s'%s)" % (self.tag, attribute_string) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from kobo.shortcuts import random_string
from kobo.types import Enum
UPLOAD_STATES = Enum(
"NEW",
"STARTED",
"FINISHED",
"FAILED",
)
class FileUpload(models.Model):
owner = models.ForeignKey(User)
name = models.CharField(max_length=255)
checksum = models.CharField(max_length=255)
size = models.PositiveIntegerField()
target_dir = models.CharField(max_length=255)
upload_key = models.CharField(max_length=255)
state = models.PositiveIntegerField(default=0, choices=UPLOAD_STATES.get_mapping())
dt_created = models.DateTimeField(auto_now_add=True)
dt_finished = models.DateTimeField(null=True, blank=True)
class Meta:
ordering = ("-dt_created", "name", )
# unique_together = (
# ("name", "target_dir")
# )
def export(self):
result = {
"owner": self.owner_id,
"name": self.name,
"checksum": self.checksum,
"size": self.size,
"target_dir": self.target_dir,
"state": self.state,
}
return result
def get_full_path(self):
return os.path.abspath(os.path.join(self.target_dir, self.name))
def __unicode__(self):
return unicode(os.path.join(self.target_dir, self.name))
def save(self, *args, **kwargs):
if not self.upload_key:
self.upload_key = random_string(64)
if self.state == UPLOAD_STATES['FINISHED']:
if FileUpload.objects.filter(state = UPLOAD_STATES['FINISHED'], name = self.name).exclude(id = self.id).count() != 0:
# someone created same upload faster
self.state == UPLOAD_STATES['FAILED']
super(FileUpload, self).save(*args, **kwargs)
def delete(self):
super(FileUpload, self).delete()
# if file was successfully uploaded it should be removed from
# filesystem, otherwise it shouldn't be there
if self.state == UPLOAD_STATES['FINISHED']:
try:
os.unlink(self.get_full_path())
except OSError, ex:
if ex.errno != 2:
raise
upload_dir = getattr(settings, "UPLOAD_DIR", None)
if upload_dir is not None:
upload_dir = os.path.abspath(upload_dir)
file_dir = os.path.dirname(self.get_full_path())
while 1:
if not file_dir.startswith(upload_dir):
break
if file_dir == upload_dir:
break
try:
os.rmdir(file_dir)
except OSError, ex:
break
file_dir = os.path.split(file_dir)[0] | unknown | codeparrot/codeparrot-clean | ||
from django.db import models
from django.contrib.auth.models import User
class Test(models.Model):
path = models.CharField(max_length=255, unique=True)
rating = models.IntegerField(default=50)
date = models.DateTimeField('date', null=True)
title = models.CharField(max_length=255, null=True)
text = models.TextField(null=True, blank=True)
created_by = models.OneToOneField(User, null=True)
date_only = models.DateField('date', null=True)
class Tree(models.Model):
number = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, unique=True)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class TestContainer(models.Model):
test = models.OneToOneField(Test, null=True)
class InvalidField(models.Model):
name = models.CharField(max_length=255, unique=True)
limit = models.IntegerField()
class NoUniqueInitField(models.Model):
name = models.CharField(max_length=255)
num = models.IntegerField() | unknown | codeparrot/codeparrot-clean | ||
# (c) 2014, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
mode:
required: false
default: null
description:
- mode the file or directory should be. For those used to I(/usr/bin/chmod) remember that modes are actually octal numbers (like 0644). Leaving off the leading zero will likely have unexpected results. As of version 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)).
owner:
required: false
default: null
description:
- name of the user that should own the file/directory, as would be fed to I(chown)
group:
required: false
default: null
description:
- name of the group that should own the file/directory, as would be fed to I(chown)
seuser:
required: false
default: null
description:
- user part of SELinux file context. Will default to system policy, if
applicable. If set to C(_default), it will use the C(user) portion of the
policy if available
serole:
required: false
default: null
description:
- role part of SELinux file context, C(_default) feature works as for I(seuser).
setype:
required: false
default: null
description:
- type part of SELinux file context, C(_default) feature works as for I(seuser).
selevel:
required: false
default: "s0"
description:
- level part of the SELinux file context. This is the MLS/MCS attribute,
sometimes known as the C(range). C(_default) feature works as for
I(seuser).
""" | unknown | codeparrot/codeparrot-clean | ||
"use strict";
/** @type {import("webpack").Configuration} */
const config = {
output: {
assetModuleFilename: "images/[hash][ext]"
},
module: {
rules: [
{
test: /file\.(png|jpg|svg)$/,
type: "asset"
}
]
}
};
module.exports = config; | javascript | github | https://github.com/webpack/webpack | examples/asset/webpack.config.js |
import unittest
from p2pool.bitcoin import data, networks
from p2pool.util import pack
class Test(unittest.TestCase):
def test_header_hash(self):
assert data.hash256(data.block_header_type.pack(dict(
version=1,
previous_block=0x000000000000038a2a86b72387f93c51298298a732079b3b686df3603d2f6282,
merkle_root=0x37a43a3b812e4eb665975f46393b4360008824aab180f27d642de8c28073bc44,
timestamp=1323752685,
bits=data.FloatingInteger(437159528),
nonce=3658685446,
))) == 0x000000000000003aaaf7638f9f9c0d0c60e8b0eb817dcdb55fd2b1964efc5175
def test_header_hash_litecoin(self):
assert networks.nets['litecoin'].POW_FUNC(data.block_header_type.pack(dict(
version=1,
previous_block=0xd928d3066613d1c9dd424d5810cdd21bfeef3c698977e81ec1640e1084950073,
merkle_root=0x03f4b646b58a66594a182b02e425e7b3a93c8a52b600aa468f1bc5549f395f16,
timestamp=1327807194,
bits=data.FloatingInteger(0x1d01b56f),
nonce=20736,
))) < 2**256//2**30
def test_tx_hash(self):
assert data.hash256(data.tx_type.pack(dict(
version=1,
tx_ins=[dict(
previous_output=None,
sequence=None,
script='70736a0468860e1a0452389500522cfabe6d6d2b2f33cf8f6291b184f1b291d24d82229463fcec239afea0ee34b4bfc622f62401000000000000004d696e656420627920425443204775696c6420ac1eeeed88'.decode('hex'),
)],
tx_outs=[dict(
value=5003880250,
script=data.pubkey_hash_to_script2(pack.IntType(160).unpack('ca975b00a8c203b8692f5a18d92dc5c2d2ebc57b'.decode('hex'))),
)],
lock_time=0,
))) == 0xb53802b2333e828d6532059f46ecf6b313a42d79f97925e457fbbfda45367e5c
def test_address_to_pubkey_hash(self):
assert data.address_to_pubkey_hash('1KUCp7YP5FP8ViRxhfszSUJCTAajK6viGy', networks.nets['bitcoin']) == pack.IntType(160).unpack('ca975b00a8c203b8692f5a18d92dc5c2d2ebc57b'.decode('hex'))
def test_merkle_hash(self):
assert data.merkle_hash([
0xb53802b2333e828d6532059f46ecf6b313a42d79f97925e457fbbfda45367e5c,
0x326dfe222def9cf571af37a511ccda282d83bedcc01dabf8aa2340d342398cf0,
0x5d2e0541c0f735bac85fa84bfd3367100a3907b939a0c13e558d28c6ffd1aea4,
0x8443faf58aa0079760750afe7f08b759091118046fe42794d3aca2aa0ff69da2,
0x4d8d1c65ede6c8eab843212e05c7b380acb82914eef7c7376a214a109dc91b9d,
0x1d750bc0fa276f89db7e6ed16eb1cf26986795121f67c03712210143b0cb0125,
0x5179349931d714d3102dfc004400f52ef1fed3b116280187ca85d1d638a80176,
0xa8b3f6d2d566a9239c9ad9ae2ed5178dee4a11560a8dd1d9b608fd6bf8c1e75,
0xab4d07cd97f9c0c4129cff332873a44efdcd33bdbfc7574fe094df1d379e772f,
0xf54a7514b1de8b5d9c2a114d95fba1e694b6e3e4a771fda3f0333515477d685b,
0x894e972d8a2fc6c486da33469b14137a7f89004ae07b95e63923a3032df32089,
0x86cdde1704f53fce33ab2d4f5bc40c029782011866d0e07316d695c41e32b1a0,
0xf7cf4eae5e497be8215778204a86f1db790d9c27fe6a5b9f745df5f3862f8a85,
0x2e72f7ddf157d64f538ec72562a820e90150e8c54afc4d55e0d6e3dbd8ca50a,
0x9f27471dfbc6ce3cbfcf1c8b25d44b8d1b9d89ea5255e9d6109e0f9fd662f75c,
0x995f4c9f78c5b75a0c19f0a32387e9fa75adaa3d62fba041790e06e02ae9d86d,
0xb11ec2ad2049aa32b4760d458ee9effddf7100d73c4752ea497e54e2c58ba727,
0xa439f288fbc5a3b08e5ffd2c4e2d87c19ac2d5e4dfc19fabfa33c7416819e1ec,
0x3aa33f886f1357b4bbe81784ec1cf05873b7c5930ab912ee684cc6e4f06e4c34,
0xcab9a1213037922d94b6dcd9c567aa132f16360e213c202ee59f16dde3642ac7,
0xa2d7a3d2715eb6b094946c6e3e46a88acfb37068546cabe40dbf6cd01a625640,
0x3d02764f24816aaa441a8d472f58e0f8314a70d5b44f8a6f88cc8c7af373b24e,
0xcc5adf077c969ebd78acebc3eb4416474aff61a828368113d27f72ad823214d0,
0xf2d8049d1971f02575eb37d3a732d46927b6be59a18f1bd0c7f8ed123e8a58a,
0x94ffe8d46a1accd797351894f1774995ed7df3982c9a5222765f44d9c3151dbb,
0x82268fa74a878636261815d4b8b1b01298a8bffc87336c0d6f13ef6f0373f1f0,
0x73f441f8763dd1869fe5c2e9d298b88dc62dc8c75af709fccb3622a4c69e2d55,
0xeb78fc63d4ebcdd27ed618fd5025dc61de6575f39b2d98e3be3eb482b210c0a0,
0x13375a426de15631af9afdf00c490e87cc5aab823c327b9856004d0b198d72db,
0x67d76a64fa9b6c5d39fde87356282ef507b3dec1eead4b54e739c74e02e81db4,
]) == 0x37a43a3b812e4eb665975f46393b4360008824aab180f27d642de8c28073bc44 | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.