code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
name: Cygwin
on:
push:
paths-ignore:
- 'doc/**'
- '**/man/*'
- '**.md'
- '**.rdoc'
- '**/.document'
- '.*.yml'
pull_request:
paths-ignore:
- 'doc/**'
- '**/man/*'
- '**.md'
- '**.rdoc'
- '**/.document'
- '.*.yml'
merge_group:
concurrency:
group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
permissions:
contents: read
jobs:
make:
runs-on: windows-2022
if: >-
${{!(false
|| contains(github.event.head_commit.message, '[DOC]')
|| contains(github.event.pull_request.title, '[DOC]')
|| contains(github.event.pull_request.labels.*.name, 'Documentation')
|| (github.event_name == 'push' && github.event.pull_request.user.login == 'dependabot[bot]')
)}}
steps:
- run: git config --global core.autocrlf input
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Cygwin
uses: cygwin/cygwin-install-action@master
with:
packages: ruby gcc-core make autoconf libtool libssl-devel libyaml-devel libffi-devel zlib-devel rubygems
site: |
https://cygwin.osuosl.org/
- name: configure
run: |
./autogen.sh
./configure --disable-install-doc
shell: C:\cygwin\bin\bash.EXE --noprofile --norc -e -o igncr -o pipefail {0}
- name: Extract bundled gems
run: |
make ruby -j5
make extract-gems
shell: C:\cygwin\bin\bash.EXE --noprofile --norc -e -o igncr -o pipefail {0}
- name: make all
timeout-minutes: 30
run: make -j4 V=1
shell: C:\cygwin\bin\bash.EXE --noprofile --norc -e -o igncr -o pipefail {0}
- uses: ./.github/actions/slack
with:
label: Cygwin
SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
if: ${{ failure() }} | unknown | github | https://github.com/ruby/ruby | .github/workflows/cygwin.yml |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_format02.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [46335872, 46365696]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'line': {'color': 'red'},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual() | unknown | codeparrot/codeparrot-clean | ||
<%
executions_class = ""
files_class = ""
if self.attr.page_path == "executions":
executions_class = "active"
elif self.attr.page_path == "files":
files_class = "active"
%>
<%def name="older_newer_links(older_newer)">
% if older_newer != None:
<p class="older_newer_links">
% if older_newer[0]:
<a href="${self.attr.page_path}?page=${page-1}"><< Newer</a>
% endif
% if older_newer[1]:
<a href="${self.attr.page_path}?page=${page+1}">Older >></a>
% endif
</p>
% endif
</%def>
% if wrapped:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>MiniLIMS Client</title>
<link rel="stylesheet" href="htminilims.css" type="text/css">
<script src="jquery.js" type="text/javascript"></script>
<script src="htminilims.js" type="text/javascript"></script>
</head>
<body>
<div id="tabs">
<ul class="tabs">
<li class="${executions_class}"><a href="executions">Executions</a></li>
<li class="${files_class}"><a href="files">Files</a></li>
</ul>
</div>
% endif # Wrapped
<div class="tab_container">
${older_newer_links(older_newer)}
${self.body()}
${older_newer_links(older_newer)}
</div>
% if wrapped:
</body>
</html>
% endif | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Donut."""
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
convert_to_rgb,
get_resize_output_image_size,
pad,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...processing_utils import ImagesKwargs
from ...utils import TensorType, filter_out_non_signature_kwargs, logging
from ...utils.import_utils import is_vision_available, requires
logger = logging.get_logger(__name__)
if is_vision_available():
import PIL
class DonutImageProcessorKwargs(ImagesKwargs, total=False):
"""
do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
"""
do_thumbnail: bool
do_align_long_axis: bool
@requires(backends=("vision",))
class DonutImageProcessor(BaseImageProcessor):
r"""
Constructs a Donut image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_thumbnail (`bool`, *optional*, defaults to `True`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `False`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `random_padding` is set to `True` in `preprocess`, each image is padded with a
random amount of padding on each size, up to the largest image size in the batch. Otherwise, all images are
padded to the largest image size in the batch.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Image standard deviation.
"""
model_input_names = ["pixel_values"]
valid_kwargs = DonutImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_thumbnail: bool = True,
do_align_long_axis: bool = False,
do_pad: bool = True,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 2560, "width": 1920}
if isinstance(size, (tuple, list)):
# The previous feature extractor size parameter was in (width, height) format
size = size[::-1]
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_thumbnail = do_thumbnail
self.do_align_long_axis = do_align_long_axis
self.do_pad = do_pad
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
def align_long_axis(
self,
image: np.ndarray,
size: dict[str, int],
data_format: str | ChannelDimension | None = None,
input_data_format: str | ChannelDimension | None = None,
) -> np.ndarray:
"""
Align the long axis of the image to the longest axis of the specified size.
Args:
image (`np.ndarray`):
The image to be aligned.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to align the long axis to.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`np.ndarray`: The aligned image.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = size["height"], size["width"]
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(image)
if input_data_format == ChannelDimension.LAST:
rot_axes = (0, 1)
elif input_data_format == ChannelDimension.FIRST:
rot_axes = (1, 2)
else:
raise ValueError(f"Unsupported data format: {input_data_format}")
if (output_width < output_height and input_width > input_height) or (
output_width > output_height and input_width < input_height
):
image = np.rot90(image, 3, axes=rot_axes)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def pad_image(
self,
image: np.ndarray,
size: dict[str, int],
random_padding: bool = False,
data_format: str | ChannelDimension | None = None,
input_data_format: str | ChannelDimension | None = None,
) -> np.ndarray:
"""
Pad the image to the specified size.
Args:
image (`np.ndarray`):
The image to be padded.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to pad the image to.
random_padding (`bool`, *optional*, defaults to `False`):
Whether to use random padding or not.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
output_height, output_width = size["height"], size["width"]
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
delta_width = output_width - input_width
delta_height = output_height - input_height
if random_padding:
pad_top = np.random.randint(low=0, high=delta_height + 1)
pad_left = np.random.randint(low=0, high=delta_width + 1)
else:
pad_top = delta_height // 2
pad_left = delta_width // 2
pad_bottom = delta_height - pad_top
pad_right = delta_width - pad_left
padding = ((pad_top, pad_bottom), (pad_left, pad_right))
return pad(image, padding, data_format=data_format, input_data_format=input_data_format)
def thumbnail(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: str | ChannelDimension | None = None,
input_data_format: str | ChannelDimension | None = None,
**kwargs,
) -> np.ndarray:
"""
Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
corresponding dimension of the specified size.
Args:
image (`np.ndarray`):
The image to be resized.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to resize the image to.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
The resampling filter to use.
data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = size["height"], size["width"]
# We always resize to the smallest of either the input or output size.
height = min(input_height, output_height)
width = min(input_width, output_width)
if height == input_height and width == input_width:
return image
if input_height > input_width:
width = int(input_width * height / input_height)
elif input_width > input_height:
height = int(input_height * width / input_width)
return resize(
image,
size=(height, width),
resample=resample,
reducing_gap=2.0,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: str | ChannelDimension | None = None,
input_data_format: str | ChannelDimension | None = None,
**kwargs,
) -> np.ndarray:
"""
Resizes `image` to `(height, width)` specified by `size` using the PIL library.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size)
shortest_edge = min(size["height"], size["width"])
output_size = get_resize_output_image_size(
image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format
)
resized_image = resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
return resized_image
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
resample: PILImageResampling | None = None,
do_thumbnail: bool | None = None,
do_align_long_axis: bool | None = None,
do_pad: bool | None = None,
random_padding: bool = False,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
return_tensors: str | TensorType | None = None,
data_format: ChannelDimension | None = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to min(size["height"],
size["width"]) with the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random
amount of padding on each size, up to the largest image size in the batch. Otherwise, all images are
padded to the largest image size in the batch.
random_padding (`bool`, *optional*, defaults to `self.random_padding`):
Whether to use random padding when padding the image. If `True`, each image in the batch with be padded
with a random amount of padding on each side up to the size of the largest image in the batch.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image pixel values.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: defaults to the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
if isinstance(size, (tuple, list)):
# Previous feature extractor had size in (width, height) format
size = size[::-1]
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail
do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis
do_pad = do_pad if do_pad is not None else self.do_pad
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_align_long_axis:
images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images]
if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_thumbnail:
images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images]
if do_pad:
images = [
self.pad_image(
image=image, size=size, random_padding=random_padding, input_data_format=input_data_format
)
for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["DonutImageProcessor"] | python | github | https://github.com/huggingface/transformers | src/transformers/models/donut/image_processing_donut.py |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package schema
import (
"errors"
"fmt"
"log"
"strconv"
"github.com/hashicorp/terraform/internal/legacy/terraform"
"github.com/zclconf/go-cty/cty"
)
var ReservedDataSourceFields = []string{
"connection",
"count",
"depends_on",
"lifecycle",
"provider",
"provisioner",
}
var ReservedResourceFields = []string{
"connection",
"count",
"depends_on",
"id",
"lifecycle",
"provider",
"provisioner",
}
// Resource represents a thing in Terraform that has a set of configurable
// attributes and a lifecycle (create, read, update, delete).
//
// The Resource schema is an abstraction that allows provider writers to
// worry only about CRUD operations while off-loading validation, diff
// generation, etc. to this higher level library.
//
// In spite of the name, this struct is not used only for terraform resources,
// but also for data sources. In the case of data sources, the Create,
// Update and Delete functions must not be provided.
type Resource struct {
// Schema is the schema for the configuration of this resource.
//
// The keys of this map are the configuration keys, and the values
// describe the schema of the configuration value.
//
// The schema is used to represent both configurable data as well
// as data that might be computed in the process of creating this
// resource.
Schema map[string]*Schema
// SchemaVersion is the version number for this resource's Schema
// definition. The current SchemaVersion stored in the state for each
// resource. Provider authors can increment this version number
// when Schema semantics change. If the State's SchemaVersion is less than
// the current SchemaVersion, the InstanceState is yielded to the
// MigrateState callback, where the provider can make whatever changes it
// needs to update the state to be compatible to the latest version of the
// Schema.
//
// When unset, SchemaVersion defaults to 0, so provider authors can start
// their Versioning at any integer >= 1
SchemaVersion int
// MigrateState is deprecated and any new changes to a resource's schema
// should be handled by StateUpgraders. Existing MigrateState implementations
// should remain for compatibility with existing state. MigrateState will
// still be called if the stored SchemaVersion is less than the
// first version of the StateUpgraders.
//
// MigrateState is responsible for updating an InstanceState with an old
// version to the format expected by the current version of the Schema.
//
// It is called during Refresh if the State's stored SchemaVersion is less
// than the current SchemaVersion of the Resource.
//
// The function is yielded the state's stored SchemaVersion and a pointer to
// the InstanceState that needs updating, as well as the configured
// provider's configured meta interface{}, in case the migration process
// needs to make any remote API calls.
MigrateState StateMigrateFunc
// StateUpgraders contains the functions responsible for upgrading an
// existing state with an old schema version to a newer schema. It is
// called specifically by Terraform when the stored schema version is less
// than the current SchemaVersion of the Resource.
//
// StateUpgraders map specific schema versions to a StateUpgrader
// function. The registered versions are expected to be ordered,
// consecutive values. The initial value may be greater than 0 to account
// for legacy schemas that weren't recorded and can be handled by
// MigrateState.
StateUpgraders []StateUpgrader
// The functions below are the CRUD operations for this resource.
//
// The only optional operation is Update. If Update is not implemented,
// then updates will not be supported for this resource.
//
// The ResourceData parameter in the functions below are used to
// query configuration and changes for the resource as well as to set
// the ID, computed data, etc.
//
// The interface{} parameter is the result of the ConfigureFunc in
// the provider for this resource. If the provider does not define
// a ConfigureFunc, this will be nil. This parameter should be used
// to store API clients, configuration structures, etc.
//
// If any errors occur during each of the operation, an error should be
// returned. If a resource was partially updated, be careful to enable
// partial state mode for ResourceData and use it accordingly.
//
// Exists is a function that is called to check if a resource still
// exists. If this returns false, then this will affect the diff
// accordingly. If this function isn't set, it will not be called. You
// can also signal existence in the Read method by calling d.SetId("")
// if the Resource is no longer present and should be removed from state.
// The *ResourceData passed to Exists should _not_ be modified.
Create CreateFunc
Read ReadFunc
Update UpdateFunc
Delete DeleteFunc
Exists ExistsFunc
// CustomizeDiff is a custom function for working with the diff that
// Terraform has created for this resource - it can be used to customize the
// diff that has been created, diff values not controlled by configuration,
// or even veto the diff altogether and abort the plan. It is passed a
// *ResourceDiff, a structure similar to ResourceData but lacking most write
// functions like Set, while introducing new functions that work with the
// diff such as SetNew, SetNewComputed, and ForceNew.
//
// The phases Terraform runs this in, and the state available via functions
// like Get and GetChange, are as follows:
//
// * New resource: One run with no state
// * Existing resource: One run with state
// * Existing resource, forced new: One run with state (before ForceNew),
// then one run without state (as if new resource)
// * Tainted resource: No runs (custom diff logic is skipped)
// * Destroy: No runs (standard diff logic is skipped on destroy diffs)
//
// This function needs to be resilient to support all scenarios.
//
// If this function needs to access external API resources, remember to flag
// the RequiresRefresh attribute mentioned below to ensure that
// -refresh=false is blocked when running plan or apply, as this means that
// this resource requires refresh-like behaviour to work effectively.
//
// For the most part, only computed fields can be customized by this
// function.
//
// This function is only allowed on regular resources (not data sources).
CustomizeDiff CustomizeDiffFunc
// Importer is the ResourceImporter implementation for this resource.
// If this is nil, then this resource does not support importing. If
// this is non-nil, then it supports importing and ResourceImporter
// must be validated. The validity of ResourceImporter is verified
// by InternalValidate on Resource.
Importer *ResourceImporter
// If non-empty, this string is emitted as a warning during Validate.
DeprecationMessage string
// Timeouts allow users to specify specific time durations in which an
// operation should time out, to allow them to extend an action to suit their
// usage. For example, a user may specify a large Creation timeout for their
// AWS RDS Instance due to it's size, or restoring from a snapshot.
// Resource implementors must enable Timeout support by adding the allowed
// actions (Create, Read, Update, Delete, Default) to the Resource struct, and
// accessing them in the matching methods.
Timeouts *ResourceTimeout
}
// ShimInstanceStateFromValue converts a cty.Value to a
// terraform.InstanceState.
func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) {
// Get the raw shimmed value. While this is correct, the set hashes don't
// match those from the Schema.
s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion)
// We now rebuild the state through the ResourceData, so that the set indexes
// match what helper/schema expects.
data, err := schemaMap(r.Schema).Data(s, nil)
if err != nil {
return nil, err
}
s = data.State()
if s == nil {
s = &terraform.InstanceState{}
}
return s, nil
}
// See Resource documentation.
type CreateFunc func(*ResourceData, interface{}) error
// See Resource documentation.
type ReadFunc func(*ResourceData, interface{}) error
// See Resource documentation.
type UpdateFunc func(*ResourceData, interface{}) error
// See Resource documentation.
type DeleteFunc func(*ResourceData, interface{}) error
// See Resource documentation.
type ExistsFunc func(*ResourceData, interface{}) (bool, error)
// See Resource documentation.
type StateMigrateFunc func(
int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
type StateUpgrader struct {
// Version is the version schema that this Upgrader will handle, converting
// it to Version+1.
Version int
// Type describes the schema that this function can upgrade. Type is
// required to decode the schema if the state was stored in a legacy
// flatmap format.
Type cty.Type
// Upgrade takes the JSON encoded state and the provider meta value, and
// upgrades the state one single schema version. The provided state is
// deocded into the default json types using a map[string]interface{}. It
// is up to the StateUpgradeFunc to ensure that the returned value can be
// encoded using the new schema.
Upgrade StateUpgradeFunc
}
// See StateUpgrader
type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error)
// See Resource documentation.
type CustomizeDiffFunc func(*ResourceDiff, interface{}) error
// Apply creates, updates, and/or deletes a resource.
func (r *Resource) Apply(
s *terraform.InstanceState,
d *terraform.InstanceDiff,
meta interface{}) (*terraform.InstanceState, error) {
data, err := schemaMap(r.Schema).Data(s, d)
if err != nil {
return s, err
}
if s != nil && data != nil {
data.providerMeta = s.ProviderMeta
}
// Instance Diff shoould have the timeout info, need to copy it over to the
// ResourceData meta
rt := ResourceTimeout{}
if _, ok := d.Meta[TimeoutKey]; ok {
if err := rt.DiffDecode(d); err != nil {
log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
}
} else if s != nil {
if _, ok := s.Meta[TimeoutKey]; ok {
if err := rt.StateDecode(s); err != nil {
log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
}
}
} else {
log.Printf("[DEBUG] No meta timeoutkey found in Apply()")
}
data.timeouts = &rt
if s == nil {
// The Terraform API dictates that this should never happen, but
// it doesn't hurt to be safe in this case.
s = new(terraform.InstanceState)
}
if d.Destroy || d.RequiresNew() {
if s.ID != "" {
// Destroy the resource since it is created
if err := r.Delete(data, meta); err != nil {
return r.recordCurrentSchemaVersion(data.State()), err
}
// Make sure the ID is gone.
data.SetId("")
}
// If we're only destroying, and not creating, then return
// now since we're done!
if !d.RequiresNew() {
return nil, nil
}
// Reset the data to be stateless since we just destroyed
data, err = schemaMap(r.Schema).Data(nil, d)
// data was reset, need to re-apply the parsed timeouts
data.timeouts = &rt
if err != nil {
return nil, err
}
}
err = nil
if data.Id() == "" {
// We're creating, it is a new resource.
data.MarkNewResource()
err = r.Create(data, meta)
} else {
if r.Update == nil {
return s, fmt.Errorf("doesn't support update")
}
err = r.Update(data, meta)
}
return r.recordCurrentSchemaVersion(data.State()), err
}
// Diff returns a diff of this resource.
func (r *Resource) Diff(
s *terraform.InstanceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.InstanceDiff, error) {
t := &ResourceTimeout{}
err := t.ConfigDecode(r, c)
if err != nil {
return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
}
instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true)
if err != nil {
return instanceDiff, err
}
if instanceDiff != nil {
if err := t.DiffEncode(instanceDiff); err != nil {
log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
}
} else {
log.Printf("[DEBUG] Instance Diff is nil in Diff()")
}
return instanceDiff, err
}
func (r *Resource) simpleDiff(
s *terraform.InstanceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.InstanceDiff, error) {
instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false)
if err != nil {
return instanceDiff, err
}
if instanceDiff == nil {
instanceDiff = terraform.NewInstanceDiff()
}
// Make sure the old value is set in each of the instance diffs.
// This was done by the RequiresNew logic in the full legacy Diff.
for k, attr := range instanceDiff.Attributes {
if attr == nil {
continue
}
if s != nil {
attr.Old = s.Attributes[k]
}
}
return instanceDiff, nil
}
// Validate validates the resource configuration against the schema.
func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
warns, errs := schemaMap(r.Schema).Validate(c)
if r.DeprecationMessage != "" {
warns = append(warns, r.DeprecationMessage)
}
return warns, errs
}
// ReadDataApply loads the data for a data source, given a diff that
// describes the configuration arguments and desired computed attributes.
func (r *Resource) ReadDataApply(
d *terraform.InstanceDiff,
meta interface{},
) (*terraform.InstanceState, error) {
// Data sources are always built completely from scratch
// on each read, so the source state is always nil.
data, err := schemaMap(r.Schema).Data(nil, d)
if err != nil {
return nil, err
}
err = r.Read(data, meta)
state := data.State()
if state != nil && state.ID == "" {
// Data sources can set an ID if they want, but they aren't
// required to; we'll provide a placeholder if they don't,
// to preserve the invariant that all resources have non-empty
// ids.
state.ID = "-"
}
return r.recordCurrentSchemaVersion(state), err
}
// RefreshWithoutUpgrade reads the instance state, but does not call
// MigrateState or the StateUpgraders, since those are now invoked in a
// separate API call.
// RefreshWithoutUpgrade is part of the new plugin shims.
func (r *Resource) RefreshWithoutUpgrade(
s *terraform.InstanceState,
meta interface{}) (*terraform.InstanceState, error) {
// If the ID is already somehow blank, it doesn't exist
if s.ID == "" {
return nil, nil
}
rt := ResourceTimeout{}
if _, ok := s.Meta[TimeoutKey]; ok {
if err := rt.StateDecode(s); err != nil {
log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
}
}
if r.Exists != nil {
// Make a copy of data so that if it is modified it doesn't
// affect our Read later.
data, err := schemaMap(r.Schema).Data(s, nil)
data.timeouts = &rt
if err != nil {
return s, err
}
if s != nil {
data.providerMeta = s.ProviderMeta
}
exists, err := r.Exists(data, meta)
if err != nil {
return s, err
}
if !exists {
return nil, nil
}
}
data, err := schemaMap(r.Schema).Data(s, nil)
data.timeouts = &rt
if err != nil {
return s, err
}
if s != nil {
data.providerMeta = s.ProviderMeta
}
err = r.Read(data, meta)
state := data.State()
if state != nil && state.ID == "" {
state = nil
}
return r.recordCurrentSchemaVersion(state), err
}
// Refresh refreshes the state of the resource.
func (r *Resource) Refresh(
s *terraform.InstanceState,
meta interface{}) (*terraform.InstanceState, error) {
// If the ID is already somehow blank, it doesn't exist
if s.ID == "" {
return nil, nil
}
rt := ResourceTimeout{}
if _, ok := s.Meta[TimeoutKey]; ok {
if err := rt.StateDecode(s); err != nil {
log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
}
}
if r.Exists != nil {
// Make a copy of data so that if it is modified it doesn't
// affect our Read later.
data, err := schemaMap(r.Schema).Data(s, nil)
data.timeouts = &rt
if err != nil {
return s, err
}
exists, err := r.Exists(data, meta)
if err != nil {
return s, err
}
if !exists {
return nil, nil
}
}
// there may be new StateUpgraders that need to be run
s, err := r.upgradeState(s, meta)
if err != nil {
return s, err
}
data, err := schemaMap(r.Schema).Data(s, nil)
data.timeouts = &rt
if err != nil {
return s, err
}
err = r.Read(data, meta)
state := data.State()
if state != nil && state.ID == "" {
state = nil
}
return r.recordCurrentSchemaVersion(state), err
}
func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
var err error
needsMigration, stateSchemaVersion := r.checkSchemaVersion(s)
migrate := needsMigration && r.MigrateState != nil
if migrate {
s, err = r.MigrateState(stateSchemaVersion, s, meta)
if err != nil {
return s, err
}
}
if len(r.StateUpgraders) == 0 {
return s, nil
}
// If we ran MigrateState, then the stateSchemaVersion value is no longer
// correct. We can expect the first upgrade function to be the correct
// schema type version.
if migrate {
stateSchemaVersion = r.StateUpgraders[0].Version
}
schemaType := r.CoreConfigSchema().ImpliedType()
// find the expected type to convert the state
for _, upgrader := range r.StateUpgraders {
if stateSchemaVersion == upgrader.Version {
schemaType = upgrader.Type
}
}
// StateUpgraders only operate on the new JSON format state, so the state
// need to be converted.
stateVal, err := StateValueFromInstanceState(s, schemaType)
if err != nil {
return nil, err
}
jsonState, err := StateValueToJSONMap(stateVal, schemaType)
if err != nil {
return nil, err
}
for _, upgrader := range r.StateUpgraders {
if stateSchemaVersion != upgrader.Version {
continue
}
jsonState, err = upgrader.Upgrade(jsonState, meta)
if err != nil {
return nil, err
}
stateSchemaVersion++
}
// now we need to re-flatmap the new state
stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema())
if err != nil {
return nil, err
}
return r.ShimInstanceStateFromValue(stateVal)
}
// InternalValidate should be called to validate the structure
// of the resource.
//
// This should be called in a unit test for any resource to verify
// before release that a resource is properly configured for use with
// this library.
//
// Provider.InternalValidate() will automatically call this for all of
// the resources it manages, so you don't need to call this manually if it
// is part of a Provider.
func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error {
if r == nil {
return errors.New("resource is nil")
}
if !writable {
if r.Create != nil || r.Update != nil || r.Delete != nil {
return fmt.Errorf("must not implement Create, Update or Delete")
}
// CustomizeDiff cannot be defined for read-only resources
if r.CustomizeDiff != nil {
return fmt.Errorf("cannot implement CustomizeDiff")
}
}
tsm := topSchemaMap
if r.isTopLevel() && writable {
// All non-Computed attributes must be ForceNew if Update is not defined
if r.Update == nil {
nonForceNewAttrs := make([]string, 0)
for k, v := range r.Schema {
if !v.ForceNew && !v.Computed {
nonForceNewAttrs = append(nonForceNewAttrs, k)
}
}
if len(nonForceNewAttrs) > 0 {
return fmt.Errorf(
"No Update defined, must set ForceNew on: %#v", nonForceNewAttrs)
}
} else {
nonUpdateableAttrs := make([]string, 0)
for k, v := range r.Schema {
if v.ForceNew || v.Computed && !v.Optional {
nonUpdateableAttrs = append(nonUpdateableAttrs, k)
}
}
updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs)
if updateableAttrs == 0 {
return fmt.Errorf(
"All fields are ForceNew or Computed w/out Optional, Update is superfluous")
}
}
tsm = schemaMap(r.Schema)
// Destroy, and Read are required
if r.Read == nil {
return fmt.Errorf("Read must be implemented")
}
if r.Delete == nil {
return fmt.Errorf("Delete must be implemented")
}
// If we have an importer, we need to verify the importer.
if r.Importer != nil {
if err := r.Importer.InternalValidate(); err != nil {
return err
}
}
for k, f := range tsm {
if isReservedResourceFieldName(k, f) {
return fmt.Errorf("%s is a reserved field name", k)
}
}
}
lastVersion := -1
for _, u := range r.StateUpgraders {
if lastVersion >= 0 && u.Version-lastVersion > 1 {
return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version)
}
if u.Version >= r.SchemaVersion {
return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion)
}
if !u.Type.IsObjectType() {
return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version)
}
if u.Upgrade == nil {
return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version)
}
lastVersion = u.Version
}
if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 {
return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion)
}
// Data source
if r.isTopLevel() && !writable {
tsm = schemaMap(r.Schema)
for k, _ := range tsm {
if isReservedDataSourceFieldName(k) {
return fmt.Errorf("%s is a reserved field name", k)
}
}
}
return schemaMap(r.Schema).InternalValidate(tsm)
}
func isReservedDataSourceFieldName(name string) bool {
for _, reservedName := range ReservedDataSourceFields {
if name == reservedName {
return true
}
}
return false
}
func isReservedResourceFieldName(name string, s *Schema) bool {
// Allow phasing out "id"
// See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415
if name == "id" && (s.Deprecated != "" || s.Removed != "") {
return false
}
for _, reservedName := range ReservedResourceFields {
if name == reservedName {
return true
}
}
return false
}
// Data returns a ResourceData struct for this Resource. Each return value
// is a separate copy and can be safely modified differently.
//
// The data returned from this function has no actual affect on the Resource
// itself (including the state given to this function).
//
// This function is useful for unit tests and ResourceImporter functions.
func (r *Resource) Data(s *terraform.InstanceState) *ResourceData {
result, err := schemaMap(r.Schema).Data(s, nil)
if err != nil {
// At the time of writing, this isn't possible (Data never returns
// non-nil errors). We panic to find this in the future if we have to.
// I don't see a reason for Data to ever return an error.
panic(err)
}
// load the Resource timeouts
result.timeouts = r.Timeouts
if result.timeouts == nil {
result.timeouts = &ResourceTimeout{}
}
// Set the schema version to latest by default
result.meta = map[string]interface{}{
"schema_version": strconv.Itoa(r.SchemaVersion),
}
return result
}
// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing
//
// TODO: May be able to be removed with the above ResourceData function.
func (r *Resource) TestResourceData() *ResourceData {
return &ResourceData{
schema: r.Schema,
}
}
// SchemasForFlatmapPath tries its best to find a sequence of schemas that
// the given dot-delimited attribute path traverses through in the schema
// of the receiving Resource.
func (r *Resource) SchemasForFlatmapPath(path string) []*Schema {
return SchemasForFlatmapPath(path, r.Schema)
}
// Returns true if the resource is "top level" i.e. not a sub-resource.
func (r *Resource) isTopLevel() bool {
// TODO: This is a heuristic; replace with a definitive attribute?
return (r.Create != nil || r.Read != nil)
}
// Determines if a given InstanceState needs to be migrated by checking the
// stored version number with the current SchemaVersion
func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) {
// Get the raw interface{} value for the schema version. If it doesn't
// exist or is nil then set it to zero.
raw := is.Meta["schema_version"]
if raw == nil {
raw = "0"
}
// Try to convert it to a string. If it isn't a string then we pretend
// that it isn't set at all. It should never not be a string unless it
// was manually tampered with.
rawString, ok := raw.(string)
if !ok {
rawString = "0"
}
stateSchemaVersion, _ := strconv.Atoi(rawString)
// Don't run MigrateState if the version is handled by a StateUpgrader,
// since StateMigrateFuncs are not required to handle unknown versions
maxVersion := r.SchemaVersion
if len(r.StateUpgraders) > 0 {
maxVersion = r.StateUpgraders[0].Version
}
return stateSchemaVersion < maxVersion, stateSchemaVersion
}
func (r *Resource) recordCurrentSchemaVersion(
state *terraform.InstanceState) *terraform.InstanceState {
if state != nil && r.SchemaVersion > 0 {
if state.Meta == nil {
state.Meta = make(map[string]interface{})
}
state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion)
}
return state
}
// Noop is a convenience implementation of resource function which takes
// no action and returns no error.
func Noop(*ResourceData, interface{}) error {
return nil
}
// RemoveFromState is a convenience implementation of a resource function
// which sets the resource ID to empty string (to remove it from state)
// and returns no error.
func RemoveFromState(d *ResourceData, _ interface{}) error {
d.SetId("")
return nil
} | go | github | https://github.com/hashicorp/terraform | internal/legacy/helper/schema/resource.go |
//===----------------- ModulesBuilder.h --------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Experimental support for C++20 Modules.
//
// Currently we simplify the implementations by preventing reusing module files
// across different versions and different source files. But this is clearly a
// waste of time and space in the end of the day.
//
// TODO: Supporting reusing module files across different versions and
// different source files.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANGD_MODULES_BUILDER_H
#define LLVM_CLANG_TOOLS_EXTRA_CLANGD_MODULES_BUILDER_H
#include "GlobalCompilationDatabase.h"
#include "ProjectModules.h"
#include "support/Path.h"
#include "support/ThreadsafeFS.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "llvm/ADT/SmallString.h"
#include <memory>
namespace clang {
namespace clangd {
/// Store all the needed module files information to parse a single
/// source file. e.g.,
///
/// ```
/// // a.cppm
/// export module a;
///
/// // b.cppm
/// export module b;
/// import a;
///
/// // c.cppm
/// export module c;
/// import b;
/// ```
///
/// For the source file `c.cppm`, an instance of the class will store
/// the module files for `a.cppm` and `b.cppm`. But the module file for `c.cppm`
/// won't be stored. Since it is not needed to parse `c.cppm`.
///
/// Users should only get PrerequisiteModules from
/// `ModulesBuilder::buildPrerequisiteModulesFor(...)`.
///
/// Users can detect whether the PrerequisiteModules is still up to date by
/// calling the `canReuse()` member function.
///
/// The users should call `adjustHeaderSearchOptions(...)` to update the
/// compilation commands to select the built module files first. Before calling
/// `adjustHeaderSearchOptions()`, users should call `canReuse()` first to check
/// if all the stored module files are valid. In case they are not valid,
/// users should call `ModulesBuilder::buildPrerequisiteModulesFor(...)` again
/// to get the new PrerequisiteModules.
class PrerequisiteModules {
public:
/// Change commands to load the module files recorded in this
/// PrerequisiteModules first.
virtual void
adjustHeaderSearchOptions(HeaderSearchOptions &Options) const = 0;
/// Whether or not the built module files are up to date.
/// Note that this can only be used after building the module files.
virtual bool
canReuse(const CompilerInvocation &CI,
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem>) const = 0;
virtual ~PrerequisiteModules() = default;
};
/// This class handles building module files for a given source file.
///
/// In the future, we want the class to manage the module files acorss
/// different versions and different source files.
class ModulesBuilder {
public:
ModulesBuilder(const GlobalCompilationDatabase &CDB);
~ModulesBuilder();
ModulesBuilder(const ModulesBuilder &) = delete;
ModulesBuilder(ModulesBuilder &&) = delete;
ModulesBuilder &operator=(const ModulesBuilder &) = delete;
ModulesBuilder &operator=(ModulesBuilder &&) = delete;
std::unique_ptr<PrerequisiteModules>
buildPrerequisiteModulesFor(PathRef File, const ThreadsafeFS &TFS);
private:
class ModulesBuilderImpl;
std::unique_ptr<ModulesBuilderImpl> Impl;
};
} // namespace clangd
} // namespace clang
#endif | c | github | https://github.com/llvm/llvm-project | clang-tools-extra/clangd/ModulesBuilder.h |
package schemaversion
import (
"context"
"regexp"
)
// V20 migrates legacy variable syntax in data links and field options.
// This migration updates variable names from old syntax to new dotted syntax
// used in data links URLs and field option titles.
//
// Variable syntax changes:
// - __series_name → __series.name
// - $__series_name → ${__series.name}
// - __value_time → __value.time
// - __field_name → __field.name
// - $__field_name → ${__field.name}
//
// Example before migration:
//
// "panels": [
// {
// "options": {
// "dataLinks": [
// {
// "url": "http://example.com?series=$__series_name&time=__value_time"
// }
// ],
// "fieldOptions": {
// "defaults": {
// "title": "Field: __field_name",
// "links": [
// {
// "url": "http://example.com?field=$__field_name"
// }
// ]
// }
// }
// }
// }
// ]
//
// Example after migration:
//
// "panels": [
// {
// "options": {
// "dataLinks": [
// {
// "url": "http://example.com?series=${__series.name}&time=__value.time"
// }
// ],
// "fieldOptions": {
// "defaults": {
// "title": "Field: __field.name",
// "links": [
// {
// "url": "http://example.com?field=${__field.name}"
// }
// ]
// }
// }
// }
// }
// ]
func V20(_ context.Context, dashboard map[string]interface{}) error {
dashboard["schemaVersion"] = 20
panels, ok := dashboard["panels"].([]interface{})
if !ok {
return nil
}
for _, p := range panels {
panel, ok := p.(map[string]interface{})
if !ok {
continue
}
// Update data links and field options in panel options
if options, ok := panel["options"].(map[string]interface{}); ok {
updateDataLinksVariableSyntax(options)
updateFieldOptionsVariableSyntax(options)
}
}
return nil
}
// updateDataLinksVariableSyntax updates variable syntax in panel data links
func updateDataLinksVariableSyntax(options map[string]interface{}) {
dataLinks, ok := options["dataLinks"].([]interface{})
if !ok || !IsArray(dataLinks) {
return
}
for _, link := range dataLinks {
if linkMap, ok := link.(map[string]interface{}); ok {
if url, ok := linkMap["url"].(string); ok {
linkMap["url"] = updateVariablesSyntax(url)
}
}
}
}
// updateFieldOptionsVariableSyntax updates variable syntax in field options
func updateFieldOptionsVariableSyntax(options map[string]interface{}) {
fieldOptions, ok := options["fieldOptions"].(map[string]interface{})
if !ok {
return
}
defaults, ok := fieldOptions["defaults"].(map[string]interface{})
if !ok {
return
}
// Update field option title
if title, ok := defaults["title"].(string); ok {
defaults["title"] = updateVariablesSyntax(title)
}
// Update field option links
links, ok := defaults["links"].([]interface{})
if !ok || !IsArray(links) {
return
}
for _, link := range links {
if linkMap, ok := link.(map[string]interface{}); ok {
if url, ok := linkMap["url"].(string); ok {
linkMap["url"] = updateVariablesSyntax(url)
}
}
}
}
// Define the regex pattern to match legacy variable names
// Pattern matches: __series_name, $__series_name, __value_time, __field_name, $__field_name
// Defined here to avoid compilation for every function call
var legacyVariableNamesRegex = regexp.MustCompile(`(__series_name)|(\$__series_name)|(__value_time)|(__field_name)|(\$__field_name)`)
// updateVariablesSyntax updates legacy variable names to new dotted syntax
// This function replicates the frontend updateVariablesSyntax behavior
func updateVariablesSyntax(text string) string {
return legacyVariableNamesRegex.ReplaceAllStringFunc(text, func(match string) string {
switch match {
case "__series_name":
return "__series.name"
case "$__series_name":
return "${__series.name}"
case "__value_time":
return "__value.time"
case "__field_name":
return "__field.name"
case "$__field_name":
return "${__field.name}"
default:
return match
}
})
} | go | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/schemaversion/v20.go |
import sys
import unittest
import json
from rhizi_client import RhiziAPIClient, set_debugging
python2 = sys.version_info[0] == 2
if not python2:
unicode = str
class TestRhiziAPIClient(unittest.TestCase):
@classmethod
def setUpClass(self):
# constant
self.rz_doc_name = unicode("Test")
self.user_email = "tester@test.com"
self.user_password = "password"
# init client
self.client = RhiziAPIClient("http://localhost:8080", debug=True)
# create a test user
self.client.user_register(self.user_email, self.user_password, "tester", "Test", "User")
# TODO : activate email ?
# user login and store credentials
self.client.user_login(self.user_email, self.user_password)
# clean DB and create a new Test rz doc
self.client.rz_doc_delete(self.rz_doc_name)
self.client.rz_doc_create(self.rz_doc_name)
def test_make_url(self):
"""URLs should be conformed, errors should be raised when passing wrong paths """
self.assertRaises(ValueError, lambda : self.client.make_url("/start-with-slash") )
self.assertRaises(ValueError, lambda : self.client.make_url("http://rhizi.com/api") )
def test_rz_doc_create_delete_search(self):
"""API should allow creation and deleting of new documents"""
self.assertRaises(AssertionError, lambda : self.client.rz_doc_create(12) ) # wrong type
doc_name = unicode("New Test Doc")
r= self.client.rz_doc_delete(doc_name)
r = self.client.rz_doc_create(doc_name)
self.assertEqual(r.status_code, 201)
# search
r= self.client.rz_doc_search(doc_name)
self.assertEqual(r.status_code, 200)
self.assertIn(doc_name, r.text)
# clone
r_clone= self.client.rz_doc_clone(doc_name)
self.assertEqual(r_clone.status_code, 200)
resp = json.loads(r_clone.text)
# print resp
self.assertEqual(None, resp["error"])
r= self.client.rz_doc_delete(doc_name)
self.assertEqual(r.status_code, 204)
def test_node_create(self):
"""API should allow node creation"""
self.assertRaises(AssertionError, lambda : self.client.node_create(12,"haha") )
self.assertRaises(AssertionError, lambda : self.client.node_create("12",12) )
id = "ID-89388"
name = unicode("My Test Node")
# TODO : non-authorized should raise error
# self.assertRaises(ValueError, lambda : self.client.node_create("Test", name, id=id, labels=["Type"]) )
r = self.client.node_create_one(self.rz_doc_name, name, id=id, labels=["Type"])
self.assertEqual(r.status_code, 200)
nodes = [{"name": unicode("nodeA"),
"label": ["skill"],
"id":"node_01"},
{"name": unicode("nodeB"),
"label": ["keyword"],
"id":"node_02"}]
r = self.client.node_create(self.rz_doc_name, nodes)
self.assertEqual(r.status_code, 200)
def test_node_attr_update(self):
# create a node
id = "ID-1"
name = unicode("My Changing Node")
r = self.client.node_create_one(self.rz_doc_name, name, id=id, labels=["Type"])
# modify name
attrs = {"name" : "My Awesome Node",
"description" : "Greatest node ever."}
r = self.client.node_update_attr_single(self.rz_doc_name, id, attrs)
def test_edge_create(self):
nodes = [
{"name": unicode("John"), "label": ["Person"], "id":"node_03"},
{"name": unicode("ELM coding"), "label": ["Skill"], "id":"node_04"},
{"name": unicode("Video Game"), "label": ["Idea"], "id":"node_05"},
{"name": unicode("Jacky"), "label": ["Person"], "id":"node_06"},
]
r = self.client.node_create(self.rz_doc_name, nodes)
# create an edge
self.client.edge_create_one(self.rz_doc_name, nodes[0]["id"], nodes[1]["id"], relationships=["loves"])
self.assertEqual(r.status_code, 200)
# multiple edges
edges_data = [("node_03", "node_04", "is learning"),
("node_04", "node_05", "is used for"),
("node_03", "node_06", "loves"),
("node_06", "node_03", "hates")
]
edges = [ {"__src_id" : e[0],"__dst_id" : e[1] , "__type" : [e[2]], "options": {"option" : "test"} } for e in edges_data]
self.client.edge_create(self.rz_doc_name, edges)
self.assertEqual(r.status_code, 200) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'stylesheeteditor.ui'
#
# Created: Fri Jul 26 06:50:07 2013
# by: PyQt5 UI code generator 5.0.1-snapshot-2a99e59669ee
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_StyleSheetEditor(object):
def setupUi(self, StyleSheetEditor):
StyleSheetEditor.setObjectName("StyleSheetEditor")
StyleSheetEditor.resize(445, 289)
self.gridlayout = QtWidgets.QGridLayout(StyleSheetEditor)
self.gridlayout.setContentsMargins(9, 9, 9, 9)
self.gridlayout.setSpacing(6)
self.gridlayout.setObjectName("gridlayout")
spacerItem = QtWidgets.QSpacerItem(32, 20, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem, 0, 6, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(32, 20, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem1, 0, 0, 1, 1)
self.styleSheetCombo = QtWidgets.QComboBox(StyleSheetEditor)
self.styleSheetCombo.setObjectName("styleSheetCombo")
self.styleSheetCombo.addItem("")
self.styleSheetCombo.addItem("")
self.styleSheetCombo.addItem("")
self.gridlayout.addWidget(self.styleSheetCombo, 0, 5, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(10, 16, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem2, 0, 3, 1, 1)
self.styleCombo = QtWidgets.QComboBox(StyleSheetEditor)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.styleCombo.sizePolicy().hasHeightForWidth())
self.styleCombo.setSizePolicy(sizePolicy)
self.styleCombo.setObjectName("styleCombo")
self.gridlayout.addWidget(self.styleCombo, 0, 2, 1, 1)
self.label_7 = QtWidgets.QLabel(StyleSheetEditor)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_7.sizePolicy().hasHeightForWidth())
self.label_7.setSizePolicy(sizePolicy)
self.label_7.setObjectName("label_7")
self.gridlayout.addWidget(self.label_7, 0, 1, 1, 1)
self.hboxlayout = QtWidgets.QHBoxLayout()
self.hboxlayout.setSpacing(6)
self.hboxlayout.setContentsMargins(0, 0, 0, 0)
self.hboxlayout.setObjectName("hboxlayout")
spacerItem3 = QtWidgets.QSpacerItem(321, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.hboxlayout.addItem(spacerItem3)
self.saveButton = QtWidgets.QPushButton(StyleSheetEditor)
self.saveButton.setEnabled(True)
self.saveButton.setObjectName("saveButton")
self.hboxlayout.addWidget(self.saveButton)
self.applyButton = QtWidgets.QPushButton(StyleSheetEditor)
self.applyButton.setEnabled(False)
self.applyButton.setObjectName("applyButton")
self.hboxlayout.addWidget(self.applyButton)
self.gridlayout.addLayout(self.hboxlayout, 2, 0, 1, 7)
self.styleTextEdit = QtWidgets.QTextEdit(StyleSheetEditor)
self.styleTextEdit.setObjectName("styleTextEdit")
self.gridlayout.addWidget(self.styleTextEdit, 1, 0, 1, 7)
self.label_8 = QtWidgets.QLabel(StyleSheetEditor)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_8.sizePolicy().hasHeightForWidth())
self.label_8.setSizePolicy(sizePolicy)
self.label_8.setObjectName("label_8")
self.gridlayout.addWidget(self.label_8, 0, 4, 1, 1)
self.retranslateUi(StyleSheetEditor)
QtCore.QMetaObject.connectSlotsByName(StyleSheetEditor)
def retranslateUi(self, StyleSheetEditor):
_translate = QtCore.QCoreApplication.translate
StyleSheetEditor.setWindowTitle(_translate("StyleSheetEditor", "Style Editor"))
self.styleSheetCombo.setItemText(0, _translate("StyleSheetEditor", "Default"))
self.styleSheetCombo.setItemText(1, _translate("StyleSheetEditor", "Coffee"))
self.styleSheetCombo.setItemText(2, _translate("StyleSheetEditor", "Pagefold"))
self.label_7.setText(_translate("StyleSheetEditor", "Style:"))
self.saveButton.setText(_translate("StyleSheetEditor", "&Save"))
self.applyButton.setText(_translate("StyleSheetEditor", "&Apply"))
self.label_8.setText(_translate("StyleSheetEditor", "Style Sheet:")) | unknown | codeparrot/codeparrot-clean | ||
////////////////////////////////////////////////////////////////////////////
//
// Copyright 2021 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
#import <Realm/RLMCollection.h>
RLM_HEADER_AUDIT_BEGIN(nullability, sendability)
@class RLMObject, RLMResults<RLMObjectType>, RLMDictionaryChange;
/**
`RLMDictionary` is a container type in Realm representing a dynamic collection of key-value pairs.
Unlike `NSDictionary`, `RLMDictionary`s hold a single key and value type.
This is referred to in these docs as the “type” and “keyType” of the dictionary.
When declaring an `RLMDictionary` property, the object type and keyType must be marked as conforming to a
protocol by the same name as the objects it should contain.
RLM_COLLECTION_TYPE(ObjectType)
...
@property RLMDictionary<NSString *, ObjectType *><RLMString, ObjectType> *objectTypeDictionary;
`RLMDictionary`s can be queried with the same predicates as `RLMObject` and `RLMResult`s.
`RLMDictionary`s cannot be created directly. `RLMDictionary` properties on `RLMObject`s are
lazily created when accessed, or can be obtained by querying a Realm.
`RLMDictionary` only supports `NSString` as a key. Realm disallows the use of `.` or `$` characters within a dictionary key.
### Key-Value Observing
`RLMDictionary` supports dictionary key-value observing on `RLMDictionary` properties on `RLMObject`
subclasses, and the `invalidated` property on `RLMDictionary` instances themselves is
key-value observing compliant when the `RLMDictionary` is attached to a managed
`RLMObject` (`RLMDictionary`s on unmanaged `RLMObject`s will never become invalidated).
*/
@interface RLMDictionary<RLMKeyType, RLMObjectType>: NSObject<RLMCollection>
#pragma mark - Properties
/**
The number of entries in the dictionary.
*/
@property (nonatomic, readonly, assign) NSUInteger count;
/**
The type of the objects in the dictionary.
*/
@property (nonatomic, readonly, assign) RLMPropertyType type;
/**
The type of the key used in this dictionary.
*/
@property (nonatomic, readonly, assign) RLMPropertyType keyType;
/**
Indicates whether the objects in the collection can be `nil`.
*/
@property (nonatomic, readonly, getter = isOptional) BOOL optional;
/**
The class name of the objects contained in the dictionary.
Will be `nil` if `type` is not RLMPropertyTypeObject.
*/
@property (nonatomic, readonly, copy, nullable) NSString *objectClassName;
/**
The Realm which manages the dictionary. Returns `nil` for unmanaged dictionary.
*/
@property (nonatomic, readonly, nullable) RLMRealm *realm;
/**
Indicates if the dictionary can no longer be accessed.
*/
@property (nonatomic, readonly, getter = isInvalidated) BOOL invalidated;
/**
Indicates if the dictionary is frozen.
Frozen dictionaries are immutable and can be accessed from any thread. Frozen dictionaries
are created by calling `-freeze` on a managed live dictionary. Unmanaged dictionaries are
never frozen.
*/
@property (nonatomic, readonly, getter = isFrozen) BOOL frozen;
#pragma mark - Accessing Objects from a Dictionary
/**
Returns the value associated with a given key.
@param key The name of the property.
@discussion If key does not start with “@”, invokes object(forKey:). If key does start
with “@”, strips the “@” and invokes [super valueForKey:] with the rest of the key.
@return A value associated with a given key or `nil`.
*/
- (nullable id)valueForKey:(nonnull RLMKeyType)key;
/**
Returns an array containing the dictionary’s keys.
@note The order of the elements in the array is not defined.
*/
@property(readonly, copy) NSArray<RLMKeyType> *allKeys;
/**
Returns an array containing the dictionary’s values.
@note The order of the elements in the array is not defined.
*/
@property(readonly, copy) NSArray<RLMObjectType> *allValues;
/**
Returns the value associated with a given key.
@note `nil` will be returned if no value is associated with a given key. NSNull will be returned
where null is associated with the key.
@param key The key for which to return the corresponding value.
@return The value associated with key.
*/
- (nullable RLMObjectType)objectForKey:(nonnull RLMKeyType)key;
/**
Returns the value associated with a given key.
@note `nil` will be returned if no value is associated with a given key. NSNull will be returned
where null is associated with the key.
@param key The key for which to return the corresponding value.
@return The value associated with key.
*/
- (nullable RLMObjectType)objectForKeyedSubscript:(RLMKeyType)key;
/**
Applies a given block object to the each key-value pair of the dictionary.
@param block A block object to operate on entries in the dictionary.
@note If the block sets *stop to YES, the enumeration stops.
*/
- (void)enumerateKeysAndObjectsUsingBlock:(void (^)(RLMKeyType key, RLMObjectType obj, BOOL *stop))block;
#pragma mark - Adding, Removing, and Replacing Objects in a Dictionary
/**
Replace the contents of a dictionary with the contents of another dictionary - NSDictionary or RLMDictionary.
This will remove all elements in this dictionary and then apply each element from the given dictionary.
@warning This method may only be called during a write transaction.
@warning If otherDictionary is self this will result in an empty dictionary.
*/
- (void)setDictionary:(id)otherDictionary;
/**
Removes all contents in the dictionary.
@warning This method may only be called during a write transaction.
*/
- (void)removeAllObjects;
/**
Removes from the dictionary entries specified by elements in a given array. If a given key does not
exist, no mutation will happen for that key.
@warning This method may only be called during a write transaction.
*/
- (void)removeObjectsForKeys:(NSArray<RLMKeyType> *)keyArray;
/**
Removes a given key and its associated value from the dictionary. If the key does not exist the dictionary
will not be modified.
@warning This method may only be called during a write transaction.
*/
- (void)removeObjectForKey:(RLMKeyType)key;
/**
Adds a given key-value pair to the dictionary if the key is not present, or updates the value for the given key
if the key already present.
@warning This method may only be called during a write transaction.
*/
- (void)setObject:(nullable RLMObjectType)obj forKeyedSubscript:(RLMKeyType)key;
/**
Adds a given key-value pair to the dictionary if the key is not present, or updates the value for the given key
if the key already present.
@warning This method may only be called during a write transaction.
*/
- (void)setObject:(nullable RLMObjectType)anObject forKey:(RLMKeyType)aKey;
/**
Adds to the receiving dictionary the entries from another dictionary.
@note If the receiving dictionary contains the same key(s) as the otherDictionary, then
the receiving dictionary will update each key-value pair for the matching key.
@warning This method may only be called during a write transaction.
@param otherDictionary An enumerable object such as `NSDictionary` or `RLMDictionary` which contains objects of the
same type as the receiving dictionary.
*/
- (void)addEntriesFromDictionary:(id <NSFastEnumeration>)otherDictionary;
#pragma mark - Querying a Dictionary
/**
Returns all the values matching the given predicate in the dictionary.
@note The keys in the dictionary are ignored when quering values, and they will not be returned in the `RLMResults`.
@param predicateFormat A predicate format string, optionally followed by a variable number of arguments.
@return An `RLMResults` of objects that match the given predicate.
*/
- (RLMResults<RLMObjectType> *)objectsWhere:(NSString *)predicateFormat, ...;
/// :nodoc:
- (RLMResults<RLMObjectType> *)objectsWhere:(NSString *)predicateFormat args:(va_list)args;
/**
Returns all the values matching the given predicate in the dictionary.
@note The keys in the dictionary are ignored when quering values, and they will not be returned in the `RLMResults`.
@param predicate The predicate with which to filter the objects.
@return An `RLMResults` of objects that match the given predicate
*/
- (RLMResults<RLMObjectType> *)objectsWithPredicate:(NSPredicate *)predicate;
/**
Returns a sorted RLMResults of all values in the dictionary.
@note The keys in the dictionary are ignored when sorting values, and they will not be returned in the `RLMResults`.
@param keyPath The key path to sort by.
@param ascending The direction to sort in.
@return An `RLMResults` sorted by the specified key path.
*/- (RLMResults<RLMObjectType> *)sortedResultsUsingKeyPath:(NSString *)keyPath ascending:(BOOL)ascending;
/**
Returns a sorted RLMResults of all values in the dictionary.
@note The keys in the dictionary are ignored when sorting values, and they will not be returned in the `RLMResults`.
@param properties An array of `RLMSortDescriptor`s to sort by.
@return An `RLMResults` sorted by the specified properties.
*/
- (RLMResults<RLMObjectType> *)sortedResultsUsingDescriptors:(NSArray<RLMSortDescriptor *> *)properties;
/**
Returns a distinct `RLMResults` from all values in the dictionary.
@note The keys in the dictionary are ignored, and they will not be returned in the `RLMResults`.
@param keyPaths The key paths to distinct on.
@return An `RLMResults` with the distinct values of the keypath(s).
*/
- (RLMResults<RLMObjectType> *)distinctResultsUsingKeyPaths:(NSArray<NSString *> *)keyPaths;
#pragma mark - Aggregating Property Values
/**
Returns the minimum (lowest) value of the given property among all the values in the dictionary.
NSNumber *min = [object.dictionaryProperty minOfProperty:@"age"];
@param property The property whose minimum value is desired. Only properties of
types `int`, `float`, `double`, `NSDate`, `RLMValue` and `RLMDecimal128` are supported.
@return The minimum value of the property, or `nil` if the dictionary is empty.
*/
- (nullable id)minOfProperty:(NSString *)property;
/**
Returns the maximum (highest) value of the given property among all the objects in the dictionary.
NSNumber *max = [object.dictionaryProperty maxOfProperty:@"age"];
@param property The property whose minimum value is desired. Only properties of
types `int`, `float`, `double`, `NSDate`, `RLMValue` and `RLMDecimal128` are supported.
@return The maximum value of the property, or `nil` if the dictionary is empty.
*/
- (nullable id)maxOfProperty:(NSString *)property;
/**
Returns the sum of distinct values of a given property over all the objects in the dictionary.
NSNumber *sum = [object.dictionaryProperty sumOfProperty:@"age"];
@param property The property whose minimum value is desired. Only properties of
types `int`, `float`, `double`, `RLMValue` and `RLMDecimal128` are supported.
@return The sum of the given property.
*/
- (NSNumber *)sumOfProperty:(NSString *)property;
/**
Returns the average value of a given property over the objects in the dictionary.
NSNumber *average = [object.dictionaryProperty averageOfProperty:@"age"];
@param property The property whose minimum value is desired. Only properties of
types `int`, `float`, `double`, `NSDate`, `RLMValue` and `RLMDecimal128` are supported.
@return The average value of the given property, or `nil` if the dictionary is empty.
*/
- (nullable NSNumber *)averageOfProperty:(NSString *)property;
#pragma mark - Notifications
/**
Registers a block to be called each time the dictionary changes.
The block will be asynchronously called with the initial dictionary, and then
called again after each write transaction which changes any of the keys or values
within the dictionary.
The `changes` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which keys in the dictionary were added, modified or deleted. If a write transaction
did not modify any keys or values in the dictionary, the block is not called at all.
The error parameter is present only for backwards compatibility and will always
be `nil`.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
Person *person = [[Person allObjectsInRealm:realm] firstObject];
NSLog(@"person.dogs.count: %zu", person.dogs.count); // => 0
self.token = [person.dogs addNotificationBlock(RLMDictionary<NSString *, Dog *><RLMString, Dog> *dogs,
RLMDictionaryChange *changes,
NSError *error) {
// Only fired once for the example
NSLog(@"dogs.count: %zu", dogs.count); // => 1
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
person.dogs[@"frenchBulldog"] = dog;
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning This method may only be called on a non-frozen managed dictionary.
@param block The block to be called each time the dictionary changes.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMDictionary<RLMKeyType, RLMObjectType> *_Nullable dictionary,
RLMDictionaryChange *_Nullable changes,
NSError *_Nullable error))block
__attribute__((warn_unused_result));
/**
Registers a block to be called each time the dictionary changes.
The block will be asynchronously called with the initial dictionary, and then
called again after each write transaction which changes any of the key-value in
the dictionary or which objects are in the results.
The `changes` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which keys in the dictionary were added or modified. If a write transaction
did not modify any objects in the dictionary, the block is not called at all.
The error parameter is present only for backwards compatibility and will always
be `nil`.
Notifications are delivered on the given queue. If the queue is blocked and
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification.
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called when the containing Realm is read-only or frozen.
@warning The queue must be a serial queue.
@param block The block to be called whenever a change occurs.
@param queue The serial queue to deliver notifications to.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMDictionary<RLMKeyType, RLMObjectType> *_Nullable dictionary,
RLMDictionaryChange *_Nullable changes,
NSError *_Nullable error))block
queue:(nullable dispatch_queue_t)queue
__attribute__((warn_unused_result));
/**
Registers a block to be called each time the dictionary changes.
The block will be asynchronously called with the initial dictionary, and then
called again after each write transaction which changes any of the key-value in
the dictionary or which objects are in the results.
The `changes` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which keys in the dictionary were added or modified. If a write transaction
did not modify any objects in the dictionary, the block is not called at all.
The error parameter is present only for backwards compatibility and will always
be `nil`.
Notifications are delivered on the given queue. If the queue is blocked and
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification.
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called when the containing Realm is read-only or frozen.
@warning The queue must be a serial queue.
@param block The block to be called whenever a change occurs.
@param keyPaths The block will be called for changes occurring on these keypaths. If no
key paths are given, notifications are delivered for every property key path.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMDictionary<RLMKeyType, RLMObjectType> *_Nullable dictionary,
RLMDictionaryChange *_Nullable changes,
NSError *_Nullable error))block
keyPaths:(nullable NSArray<NSString *> *)keyPaths
queue:(nullable dispatch_queue_t)queue
__attribute__((warn_unused_result));
/**
Registers a block to be called each time the dictionary changes.
The block will be asynchronously called with the initial dictionary, and then
called again after each write transaction which changes any of the key-value in
the dictionary or which objects are in the results.
The `changes` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which keys in the dictionary were added or modified. If a write transaction
did not modify any objects in the dictionary, the block is not called at all.
The error parameter is present only for backwards compatibility and will always
be `nil`.
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called when the containing Realm is read-only or frozen.
@warning The queue must be a serial queue.
@param block The block to be called whenever a change occurs.
@param keyPaths The block will be called for changes occurring on these keypaths. If no
key paths are given, notifications are delivered for every property key path.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMDictionary<RLMKeyType, RLMObjectType> *_Nullable dictionary,
RLMDictionaryChange *_Nullable changes,
NSError *_Nullable error))block
keyPaths:(nullable NSArray<NSString *> *)keyPaths
__attribute__((warn_unused_result));
#pragma mark - Freeze
/**
Returns a frozen (immutable) snapshot of a dictionary.
The frozen copy is an immutable dictionary which contains the same data as this
dictionary currently contains, but will not update when writes are made to the
containing Realm. Unlike live dictionaries, frozen dictionaries can be accessed from any
thread.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning This method may only be called on a managed dictionary.
@warning Holding onto a frozen dictionary for an extended period while performing
write transaction on the Realm may result in the Realm file growing
to large sizes. See `RLMRealmConfiguration.maximumNumberOfActiveVersions`
for more information.
*/
- (instancetype)freeze;
/**
Returns a live version of this frozen collection.
This method resolves a reference to a live copy of the same frozen collection.
If called on a live collection, will return itself.
*/
- (instancetype)thaw;
#pragma mark - Unavailable Methods
/**
`-[RLMDictionary init]` is not available because `RLMDictionary`s cannot be created directly.
`RLMDictionary` properties on `RLMObject`s are lazily created when accessed.
*/
- (instancetype)init __attribute__((unavailable("RLMDictionary cannot be created directly")));
/**
`+[RLMDictionary new]` is not available because `RLMDictionary`s cannot be created directly.
`RLMDictionary` properties on `RLMObject`s are lazily created when accessed.
*/
+ (instancetype)new __attribute__((unavailable("RLMDictionary cannot be created directly")));
@end
/**
A `RLMDictionaryChange` object encapsulates information about changes to dictionaries
that are reported by Realm notifications.
`RLMDictionaryChange` is passed to the notification blocks registered with
`-addNotificationBlock` on `RLMDictionary`, and reports what keys in the
dictionary changed since the last time the notification block was called.
*/
@interface RLMDictionaryChange : NSObject
/// The keys in the new version of the dictionary which were newly inserted.
@property (nonatomic, readonly) NSArray<id> *insertions;
/// The keys in the new version of the dictionary which were modified.
@property (nonatomic, readonly) NSArray<id> *modifications;
/// The keys which were deleted from the old version.
@property (nonatomic, readonly) NSArray<id> *deletions;
@end
RLM_HEADER_AUDIT_END(nullability, sendability) | c | github | https://github.com/realm/realm-swift | Realm/RLMDictionary.h |
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
PY3 = sys.version_info > (3,)
PY2 = not PY3
if PY3:
from urllib.parse import urlparse, urlunparse
if PY2:
from urlparse import urlparse, urlunparse
if PY3:
string_types = str,
else:
string_types = str, eval('unicode')
iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems()
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib.machinery as importlib_machinery
else:
importlib_machinery = None
try:
import parser
except ImportError:
pass
import pip._vendor.packaging.version
import pip._vendor.packaging.specifiers
packaging = pip._vendor.packaging
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': platform.python_version,
'python_version': lambda: platform.python_version()[:3],
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError as e:
return cls.normalize_exception(e)
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error
message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error
messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.or_, items)
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@classmethod
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented
on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
from pip._vendor import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError as e:
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@staticmethod
def _safe_string(cand):
return (
cand[:1] in "'\"" and
not cand.startswith('"""') and
not cand.startswith("'''") and
'\\' not in cand
)
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
register_loader_type(type(None), DefaultProvider)
if importlib_machinery is not None:
register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO'
def get_metadata(self, name):
if name=='PKG-INFO':
with open(self.path,'rU') as f:
metadata = f.read()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if importlib_machinery is not None:
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if importlib_machinery is not None:
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from pip._vendor._markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
msg = "\\ must not appear on the last nonblank line"
raise RequirementParseError(msg)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise RequirementParseError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise RequirementParseError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise RequirementParseError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras)
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
self.specifier = packaging.specifiers.SpecifierSet(
",".join(["".join([x, y]) for x, y in specs])
)
self.specs = specs
self.extras = tuple(map(safe_extra, extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
)
self.__hash = hash(self.hashCmp)
def __str__(self):
extras = ','.join(self.extras)
if extras:
extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, self.specifier)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals()) | unknown | codeparrot/codeparrot-clean | ||
"""This script removes all custom parameters that has not been used
in dimensions as labels and also resets the value for the other
parameters to zero or null."""
#pylint: disable=import-error,invalid-name,broad-except
from pyrevit import revit, DB
from pyrevit import script
from pyrevit import forms
output = script.get_output()
logger = script.get_logger()
# make sure user has saved open models in case the tool crashes
if not forms.alert('Make sure your models are saved and synced. '
'Hit OK to continue...', cancel=True):
script.exit()
# ensure active document is a family document
forms.check_familydoc(revit.doc, exitscript=True)
def reset_param(family_param, reset_message):
try:
# reset formula
if family_param.CanAssignFormula:
revit.doc.FamilyManager.SetFormula(family_param, None)
reset_message += '\n\tFormula set to none'
# reset value
if family_param.StorageType == DB.StorageType.Integer \
or family_param.StorageType == DB.StorageType.Double:
revit.doc.FamilyManager.Set(family_param, 0)
reset_message += '\n\tValue set to 0'
elif family_param.StorageType == DB.StorageType.String:
revit.doc.FamilyManager.Set(family_param, '')
reset_message += '\n\tValue set to empty string'
except Exception as e:
reset_message += '\n\tFailed reset. | %s' % str(e)
return reset_message
if __name__ == '__main__':
# get parameters to purge
family_params = forms.select_family_parameters(revit.doc)
if family_params:
# now purge
max_progress = len(family_params)
with revit.Transaction('Remove all family parameters'):
for idx, param in enumerate(family_params):
logger.debug('%s, %s', param.Id, param.Definition)
output.update_progress(idx + 1, max_progress)
# if builtin, reset values and skip delete
if param.Id.IntegerValue < 0:
message = \
'Can not delete builtin "%s"' % param.Definition.Name
logger.warning(reset_param(param, message))
continue
# otherwise delete
try:
print('Removing "{}" ({})'.format(
param.Definition.Name, param.Id))
revit.doc.FamilyManager.RemoveParameter(param)
except Exception:
# if delete error, reset values and skip delete
message = 'Can not delete "%s"' % param.Definition.Name
logger.error(reset_param(param, message)) | unknown | codeparrot/codeparrot-clean | ||
# validateNoCapitalizedCalls
## File
`src/Validation/ValidateNoCapitalizedCalls.ts`
## Purpose
This validation pass ensures that capitalized functions are not called directly in a component. In React, capitalized functions are conventionally reserved for components, which should be invoked via JSX syntax rather than direct function calls.
Direct calls to capitalized functions can cause issues because:
1. Components may contain hooks, and calling them directly violates the Rules of Hooks
2. The React runtime expects components to be rendered via JSX for proper reconciliation
3. Direct calls bypass React's rendering lifecycle and state management
This validation is opt-in and controlled by the `validateNoCapitalizedCalls` configuration option.
## Input Invariants
- The function has been lowered to HIR
- Global bindings have been resolved
- The `validateNoCapitalizedCalls` configuration option is enabled (via pragma or config)
## Validation Rules
### Rule 1: No Direct Calls to Capitalized Globals
Capitalized global functions (not in the allowlist) cannot be called directly.
**Error:**
```
Error: Capitalized functions are reserved for components, which must be invoked with JSX. If this is a component, render it with JSX. Otherwise, ensure that it has no hook calls and rename it to begin with a lowercase letter. Alternatively, if you know for a fact that this function is not a component, you can allowlist it via the compiler config
[FunctionName] may be a component.
```
### Rule 2: No Direct Method Calls to Capitalized Properties
Capitalized methods on objects cannot be called directly.
**Error:**
```
Error: Capitalized functions are reserved for components, which must be invoked with JSX. If this is a component, render it with JSX. Otherwise, ensure that it has no hook calls and rename it to begin with a lowercase letter. Alternatively, if you know for a fact that this function is not a component, you can allowlist it via the compiler config
[MethodName] may be a component.
```
## Algorithm
### Phase 1: Build Allowlist
```typescript
const ALLOW_LIST = new Set([
...DEFAULT_GLOBALS.keys(), // Built-in globals (Array, Object, etc.)
...(envConfig.validateNoCapitalizedCalls ?? []), // User-configured allowlist
]);
const hookPattern = envConfig.hookPattern != null
? new RegExp(envConfig.hookPattern)
: null;
const isAllowed = (name: string): boolean => {
return ALLOW_LIST.has(name) ||
(hookPattern != null && hookPattern.test(name));
};
```
### Phase 2: Track Capitalized Globals and Properties
```typescript
const capitalLoadGlobals = new Map<IdentifierId, string>();
const capitalizedProperties = new Map<IdentifierId, string>();
```
### Phase 3: Scan Instructions
```typescript
for (const instr of block.instructions) {
switch (value.kind) {
case 'LoadGlobal':
// Track capitalized globals (excluding CONSTANTS)
if (
value.binding.name !== '' &&
/^[A-Z]/.test(value.binding.name) &&
!(value.binding.name.toUpperCase() === value.binding.name) &&
!isAllowed(value.binding.name)
) {
capitalLoadGlobals.set(lvalue.identifier.id, value.binding.name);
}
break;
case 'CallExpression':
// Check if calling a tracked capitalized global
const calleeName = capitalLoadGlobals.get(value.callee.identifier.id);
if (calleeName != null) {
CompilerError.throwInvalidReact({
reason: 'Capitalized functions are reserved for components...',
description: `${calleeName} may be a component`,
...
});
}
break;
case 'PropertyLoad':
// Track capitalized properties
if (typeof value.property === 'string' && /^[A-Z]/.test(value.property)) {
capitalizedProperties.set(lvalue.identifier.id, value.property);
}
break;
case 'MethodCall':
// Check if calling a tracked capitalized property
const propertyName = capitalizedProperties.get(value.property.identifier.id);
if (propertyName != null) {
errors.push({
reason: 'Capitalized functions are reserved for components...',
description: `${propertyName} may be a component`,
...
});
}
break;
}
}
```
## Edge Cases
### ALL_CAPS Constants
Functions with names that are entirely uppercase (like `CONSTANTS`) are not flagged:
```javascript
const x = MY_CONSTANT(); // Not an error - all caps indicates a constant, not a component
const y = MyComponent(); // Error - PascalCase indicates a component
```
### Built-in Globals
The default globals from `DEFAULT_GLOBALS` are automatically allowlisted:
```javascript
const arr = Array(5); // OK - Array is a built-in
const obj = Object.create(null); // OK - Object is a built-in
```
### User-Configured Allowlist
Users can allowlist specific functions via configuration:
```typescript
validateNoCapitalizedCalls: ['MyUtility', 'SomeFactory']
```
### Hook Patterns
Functions matching the configured hook pattern are allowed even if capitalized:
```typescript
// With hookPattern: 'React\\$use.*'
const x = React$useState(); // Allowed if it matches the hook pattern
```
### Method Calls vs Function Calls
Both direct function calls and method calls on objects are checked:
```javascript
MyComponent(); // Error - direct call
someObject.MyComponent(); // Error - method call
```
### Chained Property Access
Only the immediate property being called is checked:
```javascript
a.b.MyComponent(); // Only checks if MyComponent is capitalized
```
## TODOs
None in the source file.
## Example
### Fixture: `error.capitalized-function-call.js`
**Input:**
```javascript
// @validateNoCapitalizedCalls
function Component() {
const x = SomeFunc();
return x;
}
```
**Error:**
```
Error: Capitalized functions are reserved for components, which must be invoked with JSX. If this is a component, render it with JSX. Otherwise, ensure that it has no hook calls and rename it to begin with a lowercase letter. Alternatively, if you know for a fact that this function is not a component, you can allowlist it via the compiler config
SomeFunc may be a component.
error.capitalized-function-call.ts:3:12
1 | // @validateNoCapitalizedCalls
2 | function Component() {
> 3 | const x = SomeFunc();
| ^^^^^^^^^^ Capitalized functions are reserved for components...
4 |
5 | return x;
6 | }
```
### Fixture: `error.capitalized-method-call.js`
**Input:**
```javascript
// @validateNoCapitalizedCalls
function Component() {
const x = someGlobal.SomeFunc();
return x;
}
```
**Error:**
```
Error: Capitalized functions are reserved for components, which must be invoked with JSX. If this is a component, render it with JSX. Otherwise, ensure that it has no hook calls and rename it to begin with a lowercase letter. Alternatively, if you know for a fact that this function is not a component, you can allowlist it via the compiler config
SomeFunc may be a component.
error.capitalized-method-call.ts:3:12
1 | // @validateNoCapitalizedCalls
2 | function Component() {
> 3 | const x = someGlobal.SomeFunc();
| ^^^^^^^^^^^^^^^^^^^^^ Capitalized functions are reserved for components...
4 |
5 | return x;
6 | }
```
### Fixture: `capitalized-function-allowlist.js` (No Error)
**Input:**
```javascript
// @validateNoCapitalizedCalls:["SomeFunc"]
function Component() {
const x = SomeFunc();
return x;
}
```
**Output:**
Compiles successfully because `SomeFunc` is in the allowlist. | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/docs/passes/42-validateNoCapitalizedCalls.md |
'''
eveningcals: takes evening calibration sequence:
# 1 long darks
# 1 QTH
# ThAr and UNe at both dither A and dither B
# 1 long darks
# 10/1/2011
History
02-21-2013 EM: proceed if gang connector is in podium;
02-21-2013 EM: UT time changed to TAI
02-21-2013 EM: check time when to run 22-24 h, if other time - ask conformation
08/29/2013 EM: changed mcp.gang descriptions for updated keyword
02-17-2014 EM: fixed bug: checkFail was False, and I change to True, to halt script
is command fault
10-02-2015 Changed enum value for gang position (podium 12) from int to string,
based on recent opscore changes
'''
import RO.Wdg
import TUI.Models
import time, datetime
import RO.Astro.Tm
import subprocess
import tkMessageBox as box
import subprocess
class ScriptClass(object):
def __init__(self, sr):
# if True, run in debug-only mode (which doesn't DO anything)
# if False, real time run
sr.debug = False
self.sr=sr
sr.master.winfo_toplevel().wm_resizable(True, True)
self.logWdg = RO.Wdg.LogWdg(master=sr.master, width=35, height =20,)
self.logWdg.grid(row=0, column=0, sticky="news")
sr.master.rowconfigure(0, weight=1)
sr.master.columnconfigure(0, weight=1)
self.redWarn=RO.Constants.sevError
self.name="APOGEE: Evening Cals"
self.ver="11Oct01"
self.logWdg.text.tag_config("a", foreground="magenta")
self.logWdg.addMsg('%s, v-%s ' % (self.name,self.ver))
self.logWdg.addMsg(" %s " % (" 60-reads dark"))
self.logWdg.addMsg(" %s " % (" QuartzFlat"))
self.logWdg.addMsg(" %s " % (" ThAr and UNe at both dither A and dither B"))
self.logWdg.addMsg(" %s " % (" 60-reads dark"))
self.logWdg.addMsg("-"*20)
def getTAITimeStr(self,):
currPythonSeconds = RO.Astro.Tm.getCurrPySec()
self.currTAITuple= time.gmtime(currPythonSeconds - RO.Astro.Tm.getUTCMinusTAI())
self.taiTimeStr = time.strftime("%H:%M:%S", self.currTAITuple)
self.taiDateStr = time.strftime("%Y-%m-%d", self.currTAITuple)
return self.taiTimeStr, self.taiDateStr,self.currTAITuple
# 08/29
def checkGangPodium(self, sr):
self.mcpModel = TUI.Models.getModel("mcp")
ngang=sr.getKeyVar(self.mcpModel.apogeeGang, ind=0, defVal=0)
hlp=self.mcpModel.apogeeGangLabelDict.get(ngang, "?")
self.logWdg.addMsg("mcp.gang=%s (%s)" % (ngang, hlp))
if ngang != '12':
self.logWdg.addMsg(" Error: mcp.gang must be = 12 (podium dense)",
severity=RO.Constants.sevError)
subprocess.Popen(['say','gang error'])
return False
else:
return True
def checkTime(self,h1,h2, mes1):
sr=self.sr
tai, date, currTAITuple= self.getTAITimeStr()
mes2="TAI = %s (expect %2i:00-%2i:00)" % (tai,h1,h2)
timeNow = datetime.datetime.utcnow()
def todayAt (timeNow, hr, min=0, sec=0, micros=0):
if hr == 24: hr=23; min=59; sec=59;
return timeNow.replace(hour=hr, minute=min, second=sec, microsecond=micros)
q1 = todayAt(timeNow,h1) <=timeNow <= todayAt(timeNow,h2)
if q1:
self.logWdg.addMsg("%s - ok " % (mes2))
ask=True
else:
self.logWdg.addMsg("%s" % (mes2))
mes4=" Time WARNING: start anyway? "
self.logWdg.addMsg("%s" % (mes4), severity=self.redWarn)
subprocess.Popen(['say'," time warning"])
df='no'; ss="%s\n\n %s\n\n %s" % (mes1,mes2,mes4)
ask=box.askyesno(mes1, ss, default=df,icon="warning")
if ask==False:
self.logWdg.addMsg(" -- canceled")
subprocess.Popen(['say',"canceled"])
self.logWdg.addMsg(" ")
raise sr.ScriptError("canceled")
else:
self.logWdg.addMsg(" -- started")
subprocess.Popen(['say',"started"])
return ask
def run(self, sr):
tm = self.getTAITimeStr()[0]
self.logWdg.addMsg("-- %s -- %s " % (tm,self.name),tags=["a"])
if not self.checkGangPodium(sr):
raise sr.ScriptError("")
h1=22; h2=24; mes1="EVENING cals"
if not self.checkTime(h1,h2,mes1):
return
subprocess.Popen(['say',"apogee Cals started"])
for actorCmd in [
"apogeecal allOff",
"apogee shutter close",
"apogee expose nreads=60 ; object=Dark",
"apogee shutter open",
"apogeecal shutterOpen",
"apogeecal SourceOn source=Quartz",
"apogee expose nreads=10 ; object=QuartzFlat",
"apogeecal SourceOff source=Quartz",
"apogee dither namedpos=A",
"apogeecal SourceOn source=ThAr",
"apogee expose nreads=12 ; object=ArcLamp",
"apogeecal SourceOff source=ThAr",
"apogeecal SourceOn source=UNe",
"apogee expose nreads=40 ; object=ArcLamp",
"apogeecal SourceOff source=UNe",
"apogee dither namedpos=B",
"apogeecal SourceOn source=ThAr",
"apogee expose nreads=12 ; object=ArcLamp",
"apogeecal SourceOff source=ThAr",
"apogeecal SourceOn source=UNe",
"apogee expose nreads=40 ; object=ArcLamp",
"apogeecal SourceOff source=UNe",
"apogee dither namedpos=A",
"apogeecal shutterClose",
"apogeecal allOff",
"apogee shutter close",
"apogee expose nreads=60 ; object=Dark",
]:
actor, cmd = actorCmd.split(None, 1)
self.logWdg.addMsg("%s .... " % (actorCmd,))
yield sr.waitCmd(actor=actor, cmdStr=cmd, checkFail = True,)
cmdVar = sr.value
if cmdVar.didFail:
self.logWdg.addMsg(" ** FAILED **" % (actorCmd),severity=RO.Constants.sevError)
raise sr.ScriptError("")
tm = self.getTAITimeStr()[0]
self.logWdg.addMsg("-- %s-- done --" % (tm) ,tags=["a"])
subprocess.Popen(['say',"apogee Cals finished"])
self.logWdg.addMsg("")
# def getTAITimeStr(self,):
# currPythonSeconds = RO.Astro.Tm.getCurrPySec()
# self.currTAITuple= time.gmtime(currPythonSeconds - RO.Astro.Tm.getUTCMinusTAI())
# self.taiTimeStr = time.strftime("%H:%M:%S", self.currTAITuple)
# self.taiDateStr = time.strftime("%Y-%m-%d", self.currTAITuple)
# return self.taiTimeStr, self.taiDateStr,self.currTAITuple
# call: tai, date, currTAITuple= self.getTAITimeStr() | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/input/gpio-matrix-keypad.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: GPIO matrix keypad
maintainers:
- Marek Vasut <marek.vasut@gmail.com>
description:
GPIO driven matrix keypad is used to interface a SoC with a matrix keypad.
The matrix keypad supports multiple row and column lines, a key can be
placed at each intersection of a unique row and a unique column. The matrix
keypad can sense a key-press and key-release by means of GPIO lines and
report the event using GPIO interrupts to the cpu.
allOf:
- $ref: /schemas/input/matrix-keymap.yaml#
properties:
compatible:
const: gpio-matrix-keypad
row-gpios:
description:
List of GPIOs used as row lines. The gpio specifier for this property
depends on the gpio controller to which these row lines are connected.
col-gpios:
description:
List of GPIOs used as column lines. The gpio specifier for this property
depends on the gpio controller to which these column lines are connected.
linux,keymap: true
linux,no-autorepeat:
type: boolean
description: Do not enable autorepeat feature.
gpio-activelow:
type: boolean
description:
Force GPIO polarity to active low.
In the absence of this property GPIOs are treated as active high.
debounce-delay-ms:
description: Debounce interval in milliseconds.
default: 0
col-scan-delay-us:
description:
Delay, measured in microseconds, that is needed
before we can scan keypad after activating column gpio.
default: 0
all-cols-on-delay-us:
description:
Delay, measured in microseconds, that is needed
after activating all column gpios.
default: 0
drive-inactive-cols:
type: boolean
description:
Drive inactive columns during scan,
default is to turn inactive columns into inputs.
wakeup-source: true
required:
- compatible
- row-gpios
- col-gpios
- linux,keymap
additionalProperties: false
examples:
- |
matrix-keypad {
compatible = "gpio-matrix-keypad";
debounce-delay-ms = <5>;
col-scan-delay-us = <2>;
row-gpios = <&gpio2 25 0
&gpio2 26 0
&gpio2 27 0>;
col-gpios = <&gpio2 21 0
&gpio2 22 0>;
linux,keymap = <0x0000008B
0x0100009E
0x02000069
0x0001006A
0x0101001C
0x0201006C>;
wakeup-source;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/input/gpio-matrix-keypad.yaml |
# -*- coding: utf-8 -*-
#
# Copyright (©) 2014 Gustavo Noronha Silva
# Copyright (©) 2016 Marcelo Jorge Vieira
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This hack makes django less memory hungry (it caches queries when running
# with debug enabled.
import codecs
import sys
from datetime import date
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import Sum
from montanha.models import (
Institution, Expense, ExpenseNature, Legislator, Supplier, PerNature,
PerNatureByYear, PerNatureByMonth, PerLegislator, BiggestSupplierForYear
)
from montanha.util import (
filter_for_institution, get_date_ranges_from_data, ensure_years_in_range
)
settings.DEBUG = False
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
sys.stderr = codecs.getwriter("utf-8")(sys.stderr)
class Command(BaseCommand):
help = "Collects data for a number of sources"
def add_arguments(self, parser):
parser.add_argument('house', type=str, nargs='*', default='')
parser.add_argument(
'--agnostic',
action='store_true',
dest='agnostic',
default=False,
)
def handle(self, *args, **options):
for house in options.get('house'):
try:
institution = Institution.objects.get(siglum__iexact=house)
print u'Consolidating data for %s' % (institution.name)
self.per_nature(institution)
self.per_legislator(institution)
except Institution.DoesNotExist:
print u'Institution %s does not exist' % house
if options.get('agnostic'):
self.agnostic()
def per_nature(self, institution):
PerNature.objects.filter(institution=institution).delete()
PerNatureByYear.objects.filter(institution=institution).delete()
PerNatureByMonth.objects.filter(institution=institution).delete()
data = Expense.objects.all()
data = filter_for_institution(data, institution)
date_ranges = get_date_ranges_from_data(institution, data)
data = data \
.values('nature__id') \
.annotate(expensed=Sum('expensed')) \
.order_by('-expensed')
years = [d.year for d in Expense.objects.dates('date', 'year')]
years = ensure_years_in_range(date_ranges, years)
per_natures_to_create = list()
per_natures_by_year_to_create = list()
per_natures_by_month_to_create = list()
for item in data:
# Totals
nature = ExpenseNature.objects.get(id=item['nature__id'])
p = PerNature(
institution=institution,
date_start=date_ranges['cdf'],
date_end=date_ranges['cdt'],
nature=nature,
expensed=item['expensed']
)
per_natures_to_create.append(p)
# Totals for Legislature
per_natures_to_create += self._per_nature_total_for_legislature(
institution, nature
)
# By Year
year_to_create, month_to_create = self._per_nature_by_year(
years, institution, nature
)
per_natures_by_year_to_create += year_to_create
per_natures_by_month_to_create += month_to_create
PerNature.objects.bulk_create(per_natures_to_create)
PerNatureByMonth.objects.bulk_create(per_natures_by_month_to_create)
PerNatureByYear.objects.bulk_create(per_natures_by_year_to_create)
def _per_nature_total_for_legislature(self, institution, nature):
per_natures_to_create = list()
for legislature in institution.legislature_set.all():
print u'[%s] Consolidating nature %s totals for legislature %d-%d…' % (
institution.siglum,
nature.name,
legislature.date_start.year,
legislature.date_end.year
)
legislature_data = Expense.objects \
.filter(nature=nature) \
.filter(mandate__legislature=legislature)
legislature_ranges = get_date_ranges_from_data(institution, legislature_data)
legislature_data = legislature_data \
.values('nature__id') \
.annotate(expensed=Sum('expensed')) \
.order_by('-expensed')
if legislature_data:
legislature_data = legislature_data[0]
else:
legislature_data = dict(expensed='0.')
p = PerNature(
institution=institution,
legislature=legislature,
date_start=legislature_ranges['cdf'],
date_end=legislature_ranges['cdt'],
nature=nature,
expensed=legislature_data['expensed']
)
per_natures_to_create.append(p)
return per_natures_to_create
def _per_nature_by_year(self, years, institution, nature):
per_natures_by_year_to_create = list()
per_natures_by_month_to_create = list()
for year in years:
print u'[%s] Consolidating nature %s totals for year %d…' % (
institution.siglum, nature.name, year
)
year_data = Expense.objects \
.filter(nature=nature) \
.filter(date__year=year)
year_data = filter_for_institution(year_data, institution)
# By Month
per_natures_by_month_to_create += self._per_nature_by_month(
year_data, year, institution, nature
)
year_data = year_data \
.values('nature__id') \
.annotate(expensed=Sum("expensed"))
if year_data:
year_data = year_data[0]
else:
year_data = dict(expensed='0.')
p = PerNatureByYear(
institution=institution,
year=year,
nature=nature,
expensed=float(year_data['expensed'])
)
per_natures_by_year_to_create.append(p)
return per_natures_by_year_to_create, per_natures_by_month_to_create
def _per_nature_by_month(self, year_data, year, institution, nature):
per_natures_by_month_to_create = list()
last_date = year_data and year_data.order_by('-date')[0].date or date.today()
for month in range(1, 13):
print u'[%s] Consolidating nature %s totals for %d-%d…' % (
institution.siglum, nature.name, year, month
)
month_date = date(year, month, 1)
if month_date >= last_date:
break
mdata = year_data.filter(date__month=month) \
.values('nature__id') \
.annotate(expensed=Sum('expensed')) \
.order_by('-expensed')
if mdata:
mdata = mdata[0]
else:
mdata = dict(expensed='0.')
p = PerNatureByMonth(
institution=institution,
date=month_date,
nature=nature,
expensed=float(mdata['expensed'])
)
per_natures_by_month_to_create.append(p)
return per_natures_by_month_to_create
def per_legislator(self, institution):
PerLegislator.objects.filter(institution=institution).delete()
data = Expense.objects.all()
data = filter_for_institution(data, institution)
date_ranges = get_date_ranges_from_data(institution, data)
data = data \
.values('mandate__legislator__id') \
.annotate(expensed=Sum('expensed'))
per_legislators_to_create = list()
for item in data:
legislator = Legislator.objects.get(id=int(item['mandate__legislator__id']))
# Totals for Legislature
for legislature in institution.legislature_set.all():
print u'[%s] Consolidating legislator %s totals for legislature %d-%d…' % (
institution.siglum,
legislator.name,
legislature.date_start.year,
legislature.date_end.year
)
legislature_data = Expense.objects \
.filter(mandate__legislature=legislature) \
.filter(mandate__legislator=legislator) \
.values('mandate__legislator__id') \
.annotate(expensed=Sum('expensed')) \
.order_by('-expensed')
if legislature_data:
legislature_data = legislature_data[0]
else:
legislature_data = dict(expensed='0.')
p = PerLegislator(
institution=institution,
legislature=legislature,
date_start=date_ranges['cdf'],
date_end=date_ranges['cdt'],
legislator=legislator,
expensed=legislature_data['expensed']
)
per_legislators_to_create.append(p)
print u'[%s] Consolidating totals for legislator %s…' % (
institution.siglum, legislator.name
)
p = PerLegislator(
institution=institution,
date_start=date_ranges['cdf'],
date_end=date_ranges['cdt'],
legislator=legislator,
expensed=item['expensed']
)
per_legislators_to_create.append(p)
PerLegislator.objects.bulk_create(per_legislators_to_create)
def agnostic(self):
# Institution-agnostic consolidations - biggest suppliers
print u'Consolidating institution-agnostic totals…'
BiggestSupplierForYear.objects.all().delete()
years = [d.year for d in Expense.objects.dates('date', 'year')]
for year in years:
print u'Consolidating supplier totals for year %d…' % year
data = Expense.objects \
.filter(date__year=year) \
.values('supplier__id') \
.annotate(expensed=Sum('expensed')) \
.order_by('-expensed')
biggest_suppliers_for_year_to_add = list()
for item in data:
supplier = Supplier.objects.get(id=item['supplier__id'])
b = BiggestSupplierForYear(
supplier=supplier,
year=year,
expensed=item['expensed']
)
biggest_suppliers_for_year_to_add.append(b)
BiggestSupplierForYear.objects.bulk_create(biggest_suppliers_for_year_to_add) | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package logical
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
)
func TestContextDisableReplicationStatusEndpointsValue(t *testing.T) {
testcases := []struct {
name string
ctx context.Context
expectedValue bool
expectedOk bool
}{
{
name: "without-value",
ctx: context.Background(),
expectedValue: false,
expectedOk: false,
},
{
name: "with-nil",
ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, nil),
expectedValue: false,
expectedOk: false,
},
{
name: "with-incompatible-value",
ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, "true"),
expectedValue: false,
expectedOk: false,
},
{
name: "with-bool-true",
ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, true),
expectedValue: true,
expectedOk: true,
},
{
name: "with-bool-false",
ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, false),
expectedValue: false,
expectedOk: true,
},
}
for _, testcase := range testcases {
value, ok := ContextDisableReplicationStatusEndpointsValue(testcase.ctx)
assert.Equal(t, testcase.expectedValue, value, testcase.name)
assert.Equal(t, testcase.expectedOk, ok, testcase.name)
}
}
func TestCreateContextDisableReplicationStatusEndpoints(t *testing.T) {
ctx := CreateContextDisableReplicationStatusEndpoints(context.Background(), true)
value := ctx.Value(ctxKeyDisableReplicationStatusEndpoints{})
assert.NotNil(t, ctx)
assert.NotNil(t, value)
assert.IsType(t, bool(false), value)
assert.Equal(t, true, value.(bool))
ctx = CreateContextDisableReplicationStatusEndpoints(context.Background(), false)
value = ctx.Value(ctxKeyDisableReplicationStatusEndpoints{})
assert.NotNil(t, ctx)
assert.NotNil(t, value)
assert.IsType(t, bool(false), value)
assert.Equal(t, false, value.(bool))
}
func TestContextOriginalRequestPathValue(t *testing.T) {
testcases := []struct {
name string
ctx context.Context
expectedValue string
expectedOk bool
}{
{
name: "without-value",
ctx: context.Background(),
expectedValue: "",
expectedOk: false,
},
{
name: "with-nil",
ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, nil),
expectedValue: "",
expectedOk: false,
},
{
name: "with-incompatible-value",
ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, 6666),
expectedValue: "",
expectedOk: false,
},
{
name: "with-string-value",
ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, "test"),
expectedValue: "test",
expectedOk: true,
},
{
name: "with-empty-string",
ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, ""),
expectedValue: "",
expectedOk: true,
},
}
for _, testcase := range testcases {
value, ok := ContextOriginalRequestPathValue(testcase.ctx)
assert.Equal(t, testcase.expectedValue, value, testcase.name)
assert.Equal(t, testcase.expectedOk, ok, testcase.name)
}
}
func TestCreateContextOriginalRequestPath(t *testing.T) {
ctx := CreateContextOriginalRequestPath(context.Background(), "test")
value := ctx.Value(ctxKeyOriginalRequestPath{})
assert.NotNil(t, ctx)
assert.NotNil(t, value)
assert.IsType(t, string(""), value)
assert.Equal(t, "test", value.(string))
ctx = CreateContextOriginalRequestPath(context.Background(), "")
value = ctx.Value(ctxKeyOriginalRequestPath{})
assert.NotNil(t, ctx)
assert.NotNil(t, value)
assert.IsType(t, string(""), value)
assert.Equal(t, "", value.(string))
} | go | github | https://github.com/hashicorp/vault | sdk/logical/request_test.go |
import { ok, test } from '../../test';
export default test({
test({ assert, target }) {
const div = target.querySelector('div');
ok(div);
assert.equal(div.style.backgroundImage, 'url(https://example.com/foo.jpg)');
assert.equal(div.style.color, 'red');
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/runtime-legacy/samples/attribute-url/_config.js |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import os
import os.path
from glob import glob
import unittest
import logging
from mock import patch
import luigi
from luigi.contrib.sge import SGEJobTask, _parse_qstat_state
DEFAULT_HOME = '/home'
logger = logging.getLogger('luigi-interface')
QSTAT_OUTPUT = """job-ID prior name user state submit/start at queue slots ja-task-ID
-----------------------------------------------------------------------------------------------------------------
1 0.55500 job1 root r 07/09/2015 16:56:45 all.q@node001 1
2 0.55500 job2 root qw 07/09/2015 16:56:42 1
3 0.00000 job3 root t 07/09/2015 16:56:45 1
"""
def on_sge_master():
try:
subprocess.check_output('qstat', shell=True)
return True
except subprocess.CalledProcessError:
return False
class TestSGEWrappers(unittest.TestCase):
def test_track_job(self):
'''`track_job` returns the state using qstat'''
self.assertEqual(_parse_qstat_state(QSTAT_OUTPUT, 1), 'r')
self.assertEqual(_parse_qstat_state(QSTAT_OUTPUT, 2), 'qw')
self.assertEqual(_parse_qstat_state(QSTAT_OUTPUT, 3), 't')
self.assertEqual(_parse_qstat_state('', 1), 'u')
self.assertEqual(_parse_qstat_state('', 4), 'u')
class TestJobTask(SGEJobTask):
'''Simple SGE job: write a test file to NSF shared drive and waits a minute'''
i = luigi.Parameter()
def work(self):
logger.info('Running test job...')
with open(self.output().path, 'w') as f:
f.write('this is a test\n')
def output(self):
return luigi.LocalTarget(os.path.join(DEFAULT_HOME, 'testfile_' + str(self.i)))
class TestSGEJob(unittest.TestCase):
'''Test from SGE master node'''
def test_run_job(self):
if on_sge_master():
outfile = os.path.join(DEFAULT_HOME, 'testfile_1')
tasks = [TestJobTask(i=str(i), n_cpu=1) for i in range(3)]
luigi.build(tasks, local_scheduler=True, workers=3)
self.assertTrue(os.path.exists(outfile))
@patch('subprocess.check_output')
def test_run_job_with_dump(self, mock_check_output):
mock_check_output.side_effect = [
'Your job 12345 ("test_job") has been submitted',
''
]
task = TestJobTask(i=1, n_cpu=1, shared_tmp_dir='/tmp')
luigi.build([task], local_scheduler=True)
self.assertEqual(mock_check_output.call_count, 2)
def tearDown(self):
for fpath in glob(os.path.join(DEFAULT_HOME, 'test_file_*')):
try:
os.remove(fpath)
except OSError:
pass | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"math"
"testing"
"go.uber.org/zap/zaptest"
)
func TestFilePipeline(t *testing.T) {
tdir := t.TempDir()
fp := newFilePipeline(zaptest.NewLogger(t), tdir, SegmentSizeBytes)
defer fp.Close()
f, ferr := fp.Open()
if ferr != nil {
t.Fatal(ferr)
}
f.Close()
}
func TestFilePipelineFailPreallocate(t *testing.T) {
tdir := t.TempDir()
fp := newFilePipeline(zaptest.NewLogger(t), tdir, math.MaxInt64)
defer fp.Close()
f, ferr := fp.Open()
if f != nil || ferr == nil { // no space left on device
t.Fatal("expected error on invalid pre-allocate size, but no error")
}
} | go | github | https://github.com/etcd-io/etcd | server/storage/wal/file_pipeline_test.go |
name: CI (push)
on:
push:
branches:
- main
- '[0-9]+.[0-9]+.x'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
defaults:
run:
shell: bash
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Initialize environment
uses: angular/dev-infra/github-actions/npm/checkout-and-setup-node@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Install node modules
run: pnpm install --frozen-lockfile
- name: Check code lint
run: pnpm tslint
- name: Check for circular dependencies
run: pnpm ts-circular-deps:check
- name: Validate pull approve configuration
run: pnpm ng-dev pullapprove verify
- name: Validate angular robot configuration
run: pnpm ng-dev ngbot verify
- name: Confirm code builds with typescript as expected
run: pnpm check-tooling-setup
devtools:
runs-on: ubuntu-latest
steps:
- name: Initialize environment
uses: angular/dev-infra/github-actions/npm/checkout-and-setup-node@8757d815eb39854621b7a42ed742fd34e1c448d9
with:
disable-package-manager-cache: true
- name: Setup Bazel
uses: angular/dev-infra/github-actions/bazel/setup@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel RBE
uses: angular/dev-infra/github-actions/bazel/configure-remote@8757d815eb39854621b7a42ed742fd34e1c448d9
with:
google_credential: ${{ secrets.RBE_TRUSTED_BUILDS_USER }}
- name: Install node modules
run: pnpm install --frozen-lockfile
- name: Run unit tests
run: pnpm devtools:test
- name: Test build
run: pnpm devtools:build:chrome
- name: Cypress run
uses: cypress-io/github-action@84d178e4bbce871e23f2ffa3085898cde0e4f0ec # v7.1.2
with:
command: pnpm devtools:test:e2e
start: pnpm bazel run //devtools/src:devserver
wait-on: 'http://localhost:4200'
wait-on-timeout: 300
install: false
test:
runs-on: ubuntu-latest
steps:
- name: Initialize environment
uses: angular/dev-infra/github-actions/npm/checkout-and-setup-node@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel
uses: angular/dev-infra/github-actions/bazel/setup@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel Remote Caching
uses: angular/dev-infra/github-actions/bazel/configure-remote@8757d815eb39854621b7a42ed742fd34e1c448d9
with:
google_credential: ${{ secrets.RBE_TRUSTED_BUILDS_USER }}
- name: Install node modules
run: pnpm install --frozen-lockfile
- name: Run CI tests for framework
run: pnpm test:ci
integration-tests:
runs-on: ubuntu-latest
steps:
- name: Initialize environment
uses: angular/dev-infra/github-actions/npm/checkout-and-setup-node@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel
uses: angular/dev-infra/github-actions/bazel/setup@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel Remote Caching
uses: angular/dev-infra/github-actions/bazel/configure-remote@8757d815eb39854621b7a42ed742fd34e1c448d9
with:
google_credential: ${{ secrets.RBE_TRUSTED_BUILDS_USER }}
- name: Install node modules
run: pnpm install --frozen-lockfile
- name: Run integration CI tests for framework
run: pnpm integration-tests:ci
adev:
runs-on:
labels: ubuntu-latest
steps:
- name: Initialize environment
uses: angular/dev-infra/github-actions/npm/checkout-and-setup-node@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel
uses: angular/dev-infra/github-actions/bazel/setup@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel RBE
uses: angular/dev-infra/github-actions/bazel/configure-remote@8757d815eb39854621b7a42ed742fd34e1c448d9
with:
google_credential: ${{ secrets.RBE_TRUSTED_BUILDS_USER }}
- name: Install node modules
run: pnpm install --frozen-lockfile
- name: Run tests
run: pnpm bazel test //adev/...
- name: Build adev in fast mode to ensure it continues to work
run: pnpm bazel build //adev:build
vscode-ng-language-service:
runs-on:
labels: ubuntu-latest
steps:
- name: Initialize environment
uses: angular/dev-infra/github-actions/npm/checkout-and-setup-node@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel
uses: angular/dev-infra/github-actions/bazel/setup@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel RBE
uses: angular/dev-infra/github-actions/bazel/configure-remote@8757d815eb39854621b7a42ed742fd34e1c448d9
with:
google_credential: ${{ secrets.RBE_TRUSTED_BUILDS_USER }}
- name: Install node modules
run: pnpm install --frozen-lockfile
- name: Run tests
run: pnpm bazel test //vscode-ng-language-service/...
publish-snapshots:
runs-on:
labels: ubuntu-latest
steps:
- name: Initialize environment
uses: angular/dev-infra/github-actions/npm/checkout-and-setup-node@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel
uses: angular/dev-infra/github-actions/bazel/setup@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel RBE
uses: angular/dev-infra/github-actions/bazel/configure-remote@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Install node modules
run: pnpm install --frozen-lockfile
- run: echo "https://${{secrets.SNAPSHOT_BUILDS_GITHUB_TOKEN}}:@github.com" > ${HOME}/.git_credentials
- run: pnpm build
- run: ./scripts/ci/publish-snapshot-build-artifacts.sh
zone-js:
runs-on:
labels: ubuntu-latest
steps:
- name: Initialize environment
uses: angular/dev-infra/github-actions/npm/checkout-and-setup-node@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel
uses: angular/dev-infra/github-actions/bazel/setup@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel RBE
uses: angular/dev-infra/github-actions/bazel/configure-remote@8757d815eb39854621b7a42ed742fd34e1c448d9
with:
google_credential: ${{ secrets.RBE_TRUSTED_BUILDS_USER }}
- name: Install node modules
run: pnpm install --frozen-lockfile
- run: |
pnpm bazel build \
//packages/zone.js/bundles:zone.umd.js \
//packages/zone.js:npm_package \
//packages/zone.js/test/closure:closure
- run: |
rm -Rf packages/zone.js/build
rm -Rf packages/zone.js/test/extra/*.umd.js
mkdir -p packages/zone.js/build/
mkdir -p packages/zone.js/build/test/
cp dist/bin/packages/zone.js/bundles/zone.umd.js packages/zone.js/build/zone.umd.js
cp dist/bin/packages/zone.js/npm_package/bundles/zone-mix.umd.js ./packages/zone.js/test/extra/
cp dist/bin/packages/zone.js/npm_package/bundles/zone-patch-electron.umd.js ./packages/zone.js/test/extra/
cp dist/bin/packages/zone.js/test/closure/zone.closure.js ./packages/zone.js/build/test/zone.closure.mjs
# Install
- run: pnpm -C packages/zone.js install --frozen-lockfile
# Run zone.js tools tests
- run: pnpm -C packages/zone.js promisefinallytest
- run: pnpm -C packages/zone.js jest:test
- run: pnpm -C packages/zone.js jest:nodetest
- run: pnpm -C packages/zone.js vitest:test
- run: pnpm -C packages/zone.js electrontest
- run: pnpm -C packages/zone.js/test/typings test
# saucelabs:
# runs-on: ubuntu-latest
# env:
# SAUCE_TUNNEL_IDENTIFIER: angular-framework-${{ github.run_number }}
# steps:
# - name: Initialize environment
# uses: angular/dev-infra/github-actions/npm/checkout-and-setup-node@b5a3609f89c06eb4037dce22a93641213a5d1508
# - name: Install node modules
# run: pnpm install --frozen-lockfile
# - uses: ./.github/actions/saucelabs-legacy
adev-deploy:
needs: [adev]
if: needs.adev.result == 'success'
runs-on: ubuntu-latest-8core
steps:
- name: Initialize environment
uses: angular/dev-infra/github-actions/npm/checkout-and-setup-node@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel
uses: angular/dev-infra/github-actions/bazel/setup@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Setup Bazel RBE
uses: angular/dev-infra/github-actions/bazel/configure-remote@8757d815eb39854621b7a42ed742fd34e1c448d9
- name: Install node modules
run: pnpm install --frozen-lockfile
- name: Build adev
# `snapshot-build` config is used to stamp the exact version with sha in the footer.
run: pnpm bazel build //adev:build.production --config=snapshot-build
- name: Deploy to firebase
uses: ./.github/actions/deploy-docs-site
with:
serviceKey: ${{ secrets.ANGULAR_DEV_SITE_DEPLOY }}
githubReleaseTrainReadToken: ${{ secrets.DOCS_DEPLOY_GITHUB_RELEASE_TRAIN_TOKEN }}
configPath: 'adev/firebase.json'
distDir: 'dist/bin/adev/dist'
- name: Update Algolia synonym record
run: node adev/scripts/synonyms/update-synonyms.mts
env:
ALGOLIA_KEY: ${{ secrets.ALGOLIA_SYNONYM_MANAGER }} | unknown | github | https://github.com/angular/angular | .github/workflows/ci.yml |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.support;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.function.BiConsumer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jspecify.annotations.Nullable;
import org.springframework.beans.PropertyValue;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.BeanDefinitionHolder;
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
import org.springframework.beans.factory.config.BeanPostProcessor;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.beans.factory.config.ConstructorArgumentValues.ValueHolder;
import org.springframework.beans.factory.config.TypedStringValue;
import org.springframework.beans.factory.support.AbstractBeanDefinition;
import org.springframework.beans.factory.support.AbstractBeanFactory;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.beans.factory.support.BeanDefinitionRegistryPostProcessor;
import org.springframework.beans.factory.support.BeanDefinitionValueResolver;
import org.springframework.beans.factory.support.DefaultListableBeanFactory;
import org.springframework.beans.factory.support.MergedBeanDefinitionPostProcessor;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.core.OrderComparator;
import org.springframework.core.Ordered;
import org.springframework.core.PriorityOrdered;
import org.springframework.core.metrics.ApplicationStartup;
import org.springframework.core.metrics.StartupStep;
/**
* Delegate for AbstractApplicationContext's post-processor handling.
*
* @author Juergen Hoeller
* @author Sam Brannen
* @author Stephane Nicoll
* @since 4.0
*/
final class PostProcessorRegistrationDelegate {
private PostProcessorRegistrationDelegate() {
}
public static void invokeBeanFactoryPostProcessors(
ConfigurableListableBeanFactory beanFactory, List<BeanFactoryPostProcessor> beanFactoryPostProcessors) {
// WARNING: Although it may appear that the body of this method can be easily
// refactored to avoid the use of multiple loops and multiple lists, the use
// of multiple lists and multiple passes over the names of processors is
// intentional. We must ensure that we honor the contracts for PriorityOrdered
// and Ordered processors. Specifically, we must NOT cause processors to be
// instantiated (via getBean() invocations) or registered in the ApplicationContext
// in the wrong order.
//
// Before submitting a pull request (PR) to change this method, please review the
// list of all declined PRs involving changes to PostProcessorRegistrationDelegate
// to ensure that your proposal does not result in a breaking change:
// https://github.com/spring-projects/spring-framework/issues?q=PostProcessorRegistrationDelegate+is%3Aclosed+label%3A%22status%3A+declined%22
// Invoke BeanDefinitionRegistryPostProcessors first, if any.
Set<String> processedBeans = new HashSet<>();
if (beanFactory instanceof BeanDefinitionRegistry registry) {
List<BeanFactoryPostProcessor> regularPostProcessors = new ArrayList<>();
List<BeanDefinitionRegistryPostProcessor> registryProcessors = new ArrayList<>();
for (BeanFactoryPostProcessor postProcessor : beanFactoryPostProcessors) {
if (postProcessor instanceof BeanDefinitionRegistryPostProcessor registryProcessor) {
registryProcessor.postProcessBeanDefinitionRegistry(registry);
registryProcessors.add(registryProcessor);
}
else {
regularPostProcessors.add(postProcessor);
}
}
// Do not initialize FactoryBeans here: We need to leave all regular beans
// uninitialized to let the bean factory post-processors apply to them!
// Separate between BeanDefinitionRegistryPostProcessors that implement
// PriorityOrdered, Ordered, and the rest.
List<BeanDefinitionRegistryPostProcessor> currentRegistryProcessors = new ArrayList<>();
// First, invoke the BeanDefinitionRegistryPostProcessors that implement PriorityOrdered.
String[] postProcessorNames =
beanFactory.getBeanNamesForType(BeanDefinitionRegistryPostProcessor.class, true, false);
for (String ppName : postProcessorNames) {
if (beanFactory.isTypeMatch(ppName, PriorityOrdered.class)) {
currentRegistryProcessors.add(beanFactory.getBean(ppName, BeanDefinitionRegistryPostProcessor.class));
processedBeans.add(ppName);
}
}
sortPostProcessors(currentRegistryProcessors, beanFactory);
registryProcessors.addAll(currentRegistryProcessors);
invokeBeanDefinitionRegistryPostProcessors(currentRegistryProcessors, registry, beanFactory.getApplicationStartup());
currentRegistryProcessors.clear();
// Next, invoke the BeanDefinitionRegistryPostProcessors that implement Ordered.
postProcessorNames = beanFactory.getBeanNamesForType(BeanDefinitionRegistryPostProcessor.class, true, false);
for (String ppName : postProcessorNames) {
if (!processedBeans.contains(ppName) && beanFactory.isTypeMatch(ppName, Ordered.class)) {
currentRegistryProcessors.add(beanFactory.getBean(ppName, BeanDefinitionRegistryPostProcessor.class));
processedBeans.add(ppName);
}
}
sortPostProcessors(currentRegistryProcessors, beanFactory);
registryProcessors.addAll(currentRegistryProcessors);
invokeBeanDefinitionRegistryPostProcessors(currentRegistryProcessors, registry, beanFactory.getApplicationStartup());
currentRegistryProcessors.clear();
// Finally, invoke all other BeanDefinitionRegistryPostProcessors until no further ones appear.
boolean reiterate = true;
while (reiterate) {
reiterate = false;
postProcessorNames = beanFactory.getBeanNamesForType(BeanDefinitionRegistryPostProcessor.class, true, false);
for (String ppName : postProcessorNames) {
if (!processedBeans.contains(ppName)) {
currentRegistryProcessors.add(beanFactory.getBean(ppName, BeanDefinitionRegistryPostProcessor.class));
processedBeans.add(ppName);
reiterate = true;
}
}
sortPostProcessors(currentRegistryProcessors, beanFactory);
registryProcessors.addAll(currentRegistryProcessors);
invokeBeanDefinitionRegistryPostProcessors(currentRegistryProcessors, registry, beanFactory.getApplicationStartup());
currentRegistryProcessors.clear();
}
// Now, invoke the postProcessBeanFactory callback of all processors handled so far.
invokeBeanFactoryPostProcessors(registryProcessors, beanFactory);
invokeBeanFactoryPostProcessors(regularPostProcessors, beanFactory);
}
else {
// Invoke factory processors registered with the context instance.
invokeBeanFactoryPostProcessors(beanFactoryPostProcessors, beanFactory);
}
// Do not initialize FactoryBeans here: We need to leave all regular beans
// uninitialized to let the bean factory post-processors apply to them!
String[] postProcessorNames =
beanFactory.getBeanNamesForType(BeanFactoryPostProcessor.class, true, false);
// Separate between BeanFactoryPostProcessors that implement PriorityOrdered,
// Ordered, and the rest.
List<BeanFactoryPostProcessor> priorityOrderedPostProcessors = new ArrayList<>();
List<String> orderedPostProcessorNames = new ArrayList<>();
List<String> nonOrderedPostProcessorNames = new ArrayList<>();
for (String ppName : postProcessorNames) {
if (processedBeans.contains(ppName)) {
// skip - already processed in first phase above
}
else if (beanFactory.isTypeMatch(ppName, PriorityOrdered.class)) {
priorityOrderedPostProcessors.add(beanFactory.getBean(ppName, BeanFactoryPostProcessor.class));
}
else if (beanFactory.isTypeMatch(ppName, Ordered.class)) {
orderedPostProcessorNames.add(ppName);
}
else {
nonOrderedPostProcessorNames.add(ppName);
}
}
// First, invoke the BeanFactoryPostProcessors that implement PriorityOrdered.
sortPostProcessors(priorityOrderedPostProcessors, beanFactory);
invokeBeanFactoryPostProcessors(priorityOrderedPostProcessors, beanFactory);
// Next, invoke the BeanFactoryPostProcessors that implement Ordered.
List<BeanFactoryPostProcessor> orderedPostProcessors = new ArrayList<>(orderedPostProcessorNames.size());
for (String postProcessorName : orderedPostProcessorNames) {
orderedPostProcessors.add(beanFactory.getBean(postProcessorName, BeanFactoryPostProcessor.class));
}
sortPostProcessors(orderedPostProcessors, beanFactory);
invokeBeanFactoryPostProcessors(orderedPostProcessors, beanFactory);
// Finally, invoke all other BeanFactoryPostProcessors.
List<BeanFactoryPostProcessor> nonOrderedPostProcessors = new ArrayList<>(nonOrderedPostProcessorNames.size());
for (String postProcessorName : nonOrderedPostProcessorNames) {
nonOrderedPostProcessors.add(beanFactory.getBean(postProcessorName, BeanFactoryPostProcessor.class));
}
invokeBeanFactoryPostProcessors(nonOrderedPostProcessors, beanFactory);
// Clear cached merged bean definitions since the post-processors might have
// modified the original metadata, for example, replacing placeholders in values...
beanFactory.clearMetadataCache();
}
public static void registerBeanPostProcessors(
ConfigurableListableBeanFactory beanFactory, AbstractApplicationContext applicationContext) {
// WARNING: Although it may appear that the body of this method can be easily
// refactored to avoid the use of multiple loops and multiple lists, the use
// of multiple lists and multiple passes over the names of processors is
// intentional. We must ensure that we honor the contracts for PriorityOrdered
// and Ordered processors. Specifically, we must NOT cause processors to be
// instantiated (via getBean() invocations) or registered in the ApplicationContext
// in the wrong order.
//
// Before submitting a pull request (PR) to change this method, please review the
// list of all declined PRs involving changes to PostProcessorRegistrationDelegate
// to ensure that your proposal does not result in a breaking change:
// https://github.com/spring-projects/spring-framework/issues?q=PostProcessorRegistrationDelegate+is%3Aclosed+label%3A%22status%3A+declined%22
String[] postProcessorNames = beanFactory.getBeanNamesForType(BeanPostProcessor.class, true, false);
// Register BeanPostProcessorChecker that logs a warn message when
// a bean is created during BeanPostProcessor instantiation, i.e. when
// a bean is not eligible for getting processed by all BeanPostProcessors.
int beanProcessorTargetCount = beanFactory.getBeanPostProcessorCount() + 1 + postProcessorNames.length;
beanFactory.addBeanPostProcessor(
new BeanPostProcessorChecker(beanFactory, postProcessorNames, beanProcessorTargetCount));
// Separate between BeanPostProcessors that implement PriorityOrdered,
// Ordered, and the rest.
List<BeanPostProcessor> priorityOrderedPostProcessors = new ArrayList<>();
List<BeanPostProcessor> internalPostProcessors = new ArrayList<>();
List<String> orderedPostProcessorNames = new ArrayList<>();
List<String> nonOrderedPostProcessorNames = new ArrayList<>();
for (String ppName : postProcessorNames) {
if (beanFactory.isTypeMatch(ppName, PriorityOrdered.class)) {
BeanPostProcessor pp = beanFactory.getBean(ppName, BeanPostProcessor.class);
priorityOrderedPostProcessors.add(pp);
if (pp instanceof MergedBeanDefinitionPostProcessor) {
internalPostProcessors.add(pp);
}
}
else if (beanFactory.isTypeMatch(ppName, Ordered.class)) {
orderedPostProcessorNames.add(ppName);
}
else {
nonOrderedPostProcessorNames.add(ppName);
}
}
// First, register the BeanPostProcessors that implement PriorityOrdered.
sortPostProcessors(priorityOrderedPostProcessors, beanFactory);
registerBeanPostProcessors(beanFactory, priorityOrderedPostProcessors);
// Next, register the BeanPostProcessors that implement Ordered.
List<BeanPostProcessor> orderedPostProcessors = new ArrayList<>(orderedPostProcessorNames.size());
for (String ppName : orderedPostProcessorNames) {
BeanPostProcessor pp = beanFactory.getBean(ppName, BeanPostProcessor.class);
orderedPostProcessors.add(pp);
if (pp instanceof MergedBeanDefinitionPostProcessor) {
internalPostProcessors.add(pp);
}
}
sortPostProcessors(orderedPostProcessors, beanFactory);
registerBeanPostProcessors(beanFactory, orderedPostProcessors);
// Now, register all regular BeanPostProcessors.
List<BeanPostProcessor> nonOrderedPostProcessors = new ArrayList<>(nonOrderedPostProcessorNames.size());
for (String ppName : nonOrderedPostProcessorNames) {
BeanPostProcessor pp = beanFactory.getBean(ppName, BeanPostProcessor.class);
nonOrderedPostProcessors.add(pp);
if (pp instanceof MergedBeanDefinitionPostProcessor) {
internalPostProcessors.add(pp);
}
}
registerBeanPostProcessors(beanFactory, nonOrderedPostProcessors);
// Finally, re-register all internal BeanPostProcessors.
sortPostProcessors(internalPostProcessors, beanFactory);
registerBeanPostProcessors(beanFactory, internalPostProcessors);
// Re-register post-processor for detecting inner beans as ApplicationListeners,
// moving it to the end of the processor chain (for picking up proxies etc).
beanFactory.addBeanPostProcessor(new ApplicationListenerDetector(applicationContext));
}
/**
* Load and sort the post-processors of the specified type.
* @param beanFactory the bean factory to use
* @param beanPostProcessorType the post-processor type
* @param <T> the post-processor type
* @return a list of sorted post-processors for the specified type
*/
static <T extends BeanPostProcessor> List<T> loadBeanPostProcessors(
ConfigurableListableBeanFactory beanFactory, Class<T> beanPostProcessorType) {
String[] postProcessorNames = beanFactory.getBeanNamesForType(beanPostProcessorType, true, false);
List<T> postProcessors = new ArrayList<>();
for (String ppName : postProcessorNames) {
postProcessors.add(beanFactory.getBean(ppName, beanPostProcessorType));
}
sortPostProcessors(postProcessors, beanFactory);
return postProcessors;
}
/**
* Selectively invoke {@link MergedBeanDefinitionPostProcessor} instances
* registered in the specified bean factory, resolving bean definitions and
* any attributes if necessary as well as any inner bean definitions that
* they may contain.
* @param beanFactory the bean factory to use
*/
static void invokeMergedBeanDefinitionPostProcessors(DefaultListableBeanFactory beanFactory) {
new MergedBeanDefinitionPostProcessorInvoker(beanFactory).invokeMergedBeanDefinitionPostProcessors();
}
private static void sortPostProcessors(List<?> postProcessors, ConfigurableListableBeanFactory beanFactory) {
// Nothing to sort?
if (postProcessors.size() <= 1) {
return;
}
Comparator<Object> comparatorToUse = null;
if (beanFactory instanceof DefaultListableBeanFactory dlbf) {
comparatorToUse = dlbf.getDependencyComparator();
}
if (comparatorToUse == null) {
comparatorToUse = OrderComparator.INSTANCE;
}
postProcessors.sort(comparatorToUse);
}
/**
* Invoke the given BeanDefinitionRegistryPostProcessor beans.
*/
private static void invokeBeanDefinitionRegistryPostProcessors(
Collection<? extends BeanDefinitionRegistryPostProcessor> postProcessors, BeanDefinitionRegistry registry, ApplicationStartup applicationStartup) {
for (BeanDefinitionRegistryPostProcessor postProcessor : postProcessors) {
StartupStep postProcessBeanDefRegistry = applicationStartup.start("spring.context.beandef-registry.post-process")
.tag("postProcessor", postProcessor::toString);
postProcessor.postProcessBeanDefinitionRegistry(registry);
postProcessBeanDefRegistry.end();
}
}
/**
* Invoke the given BeanFactoryPostProcessor beans.
*/
private static void invokeBeanFactoryPostProcessors(
Collection<? extends BeanFactoryPostProcessor> postProcessors, ConfigurableListableBeanFactory beanFactory) {
for (BeanFactoryPostProcessor postProcessor : postProcessors) {
StartupStep postProcessBeanFactory = beanFactory.getApplicationStartup().start("spring.context.bean-factory.post-process")
.tag("postProcessor", postProcessor::toString);
postProcessor.postProcessBeanFactory(beanFactory);
postProcessBeanFactory.end();
}
}
/**
* Register the given BeanPostProcessor beans.
*/
private static void registerBeanPostProcessors(
ConfigurableListableBeanFactory beanFactory, List<? extends BeanPostProcessor> postProcessors) {
if (beanFactory instanceof AbstractBeanFactory abstractBeanFactory) {
// Bulk addition is more efficient against our CopyOnWriteArrayList there
abstractBeanFactory.addBeanPostProcessors(postProcessors);
}
else {
for (BeanPostProcessor postProcessor : postProcessors) {
beanFactory.addBeanPostProcessor(postProcessor);
}
}
}
/**
* BeanPostProcessor that logs a warn message when a bean is created during
* BeanPostProcessor instantiation, i.e. when a bean is not eligible for
* getting processed by all BeanPostProcessors.
*/
private static final class BeanPostProcessorChecker implements BeanPostProcessor {
private static final Log logger = LogFactory.getLog(BeanPostProcessorChecker.class);
private final ConfigurableListableBeanFactory beanFactory;
private final String[] postProcessorNames;
private final int beanPostProcessorTargetCount;
public BeanPostProcessorChecker(ConfigurableListableBeanFactory beanFactory,
String[] postProcessorNames, int beanPostProcessorTargetCount) {
this.beanFactory = beanFactory;
this.postProcessorNames = postProcessorNames;
this.beanPostProcessorTargetCount = beanPostProcessorTargetCount;
}
@Override
public Object postProcessBeforeInitialization(Object bean, String beanName) {
return bean;
}
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) {
if (!(bean instanceof BeanPostProcessor) && !isInfrastructureBean(beanName) &&
this.beanFactory.getBeanPostProcessorCount() < this.beanPostProcessorTargetCount) {
if (logger.isWarnEnabled()) {
Set<String> bppsInCreation = new LinkedHashSet<>(2);
for (String bppName : this.postProcessorNames) {
if (this.beanFactory.isCurrentlyInCreation(bppName)) {
bppsInCreation.add(bppName);
}
}
if (bppsInCreation.size() == 1) {
String bppName = bppsInCreation.iterator().next();
if (this.beanFactory.containsBeanDefinition(bppName) &&
beanName.equals(this.beanFactory.getBeanDefinition(bppName).getFactoryBeanName())) {
logger.warn("Bean '" + beanName + "' of type [" + bean.getClass().getName() +
"] is not eligible for getting processed by all BeanPostProcessors " +
"(for example: not eligible for auto-proxying). The currently created " +
"BeanPostProcessor " + bppsInCreation + " is declared through a non-static " +
"factory method on that class; consider declaring it as static instead.");
return bean;
}
}
logger.warn("Bean '" + beanName + "' of type [" + bean.getClass().getName() +
"] is not eligible for getting processed by all BeanPostProcessors " +
"(for example: not eligible for auto-proxying). Is this bean getting eagerly " +
"injected/applied to a currently created BeanPostProcessor " + bppsInCreation + "? " +
"Check the corresponding BeanPostProcessor declaration and its dependencies/advisors. " +
"If this bean does not have to be post-processed, declare it with ROLE_INFRASTRUCTURE.");
}
}
return bean;
}
private boolean isInfrastructureBean(@Nullable String beanName) {
if (beanName != null && this.beanFactory.containsBeanDefinition(beanName)) {
BeanDefinition bd = this.beanFactory.getBeanDefinition(beanName);
return (bd.getRole() == BeanDefinition.ROLE_INFRASTRUCTURE);
}
return false;
}
}
private static final class MergedBeanDefinitionPostProcessorInvoker {
private final DefaultListableBeanFactory beanFactory;
private MergedBeanDefinitionPostProcessorInvoker(DefaultListableBeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
private void invokeMergedBeanDefinitionPostProcessors() {
List<MergedBeanDefinitionPostProcessor> postProcessors = PostProcessorRegistrationDelegate.loadBeanPostProcessors(
this.beanFactory, MergedBeanDefinitionPostProcessor.class);
for (String beanName : this.beanFactory.getBeanDefinitionNames()) {
RootBeanDefinition bd = (RootBeanDefinition) this.beanFactory.getMergedBeanDefinition(beanName);
Class<?> beanType = resolveBeanType(bd);
postProcessRootBeanDefinition(postProcessors, beanName, beanType, bd);
bd.markAsPostProcessed();
}
registerBeanPostProcessors(this.beanFactory, postProcessors);
}
private void postProcessRootBeanDefinition(List<MergedBeanDefinitionPostProcessor> postProcessors,
String beanName, Class<?> beanType, RootBeanDefinition bd) {
BeanDefinitionValueResolver valueResolver = new BeanDefinitionValueResolver(this.beanFactory, beanName, bd);
postProcessors.forEach(postProcessor -> postProcessor.postProcessMergedBeanDefinition(bd, beanType, beanName));
for (PropertyValue propertyValue : bd.getPropertyValues().getPropertyValueList()) {
postProcessValue(postProcessors, valueResolver, propertyValue.getValue());
}
for (ValueHolder valueHolder : bd.getConstructorArgumentValues().getIndexedArgumentValues().values()) {
postProcessValue(postProcessors, valueResolver, valueHolder.getValue());
}
for (ValueHolder valueHolder : bd.getConstructorArgumentValues().getGenericArgumentValues()) {
postProcessValue(postProcessors, valueResolver, valueHolder.getValue());
}
}
private void postProcessValue(List<MergedBeanDefinitionPostProcessor> postProcessors,
BeanDefinitionValueResolver valueResolver, @Nullable Object value) {
if (value instanceof BeanDefinitionHolder bdh &&
bdh.getBeanDefinition() instanceof AbstractBeanDefinition innerBd) {
Class<?> innerBeanType = resolveBeanType(innerBd);
resolveInnerBeanDefinition(valueResolver, innerBd, (innerBeanName, innerBeanDefinition)
-> postProcessRootBeanDefinition(postProcessors, innerBeanName, innerBeanType, innerBeanDefinition));
}
else if (value instanceof AbstractBeanDefinition innerBd) {
Class<?> innerBeanType = resolveBeanType(innerBd);
resolveInnerBeanDefinition(valueResolver, innerBd, (innerBeanName, innerBeanDefinition)
-> postProcessRootBeanDefinition(postProcessors, innerBeanName, innerBeanType, innerBeanDefinition));
}
else if (value instanceof TypedStringValue typedStringValue) {
resolveTypeStringValue(typedStringValue);
}
}
private void resolveInnerBeanDefinition(BeanDefinitionValueResolver valueResolver, BeanDefinition innerBeanDefinition,
BiConsumer<String, RootBeanDefinition> resolver) {
valueResolver.resolveInnerBean(null, innerBeanDefinition, (name, rbd) -> {
resolver.accept(name, rbd);
return Void.class;
});
}
private void resolveTypeStringValue(TypedStringValue typedStringValue) {
try {
typedStringValue.resolveTargetType(this.beanFactory.getBeanClassLoader());
}
catch (ClassNotFoundException ignored) {
}
}
private Class<?> resolveBeanType(AbstractBeanDefinition bd) {
if (!bd.hasBeanClass()) {
try {
bd.resolveBeanClass(this.beanFactory.getBeanClassLoader());
}
catch (ClassNotFoundException ex) {
// ignore
}
}
return bd.getResolvableType().toClass();
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/context/support/PostProcessorRegistrationDelegate.java |
it("should handle bound function expressions", function(done) {
require.ensure(
[],
function(require) {
expect(this).toEqual({ test: true });
require("./empty?test");
expect(process.nextTick).toBeTypeOf("function"); // check if injection still works
require.ensure(
[],
function(require) {
expect(this).toEqual({ test: true });
done();
}.bind(this)
);
}.bind({ test: true })
);
});
it("should handle require.ensure without function expression", function(done) {
function f() {
done();
}
require.ensure([], f);
});
it("should parse expression in require.ensure, which isn't a function expression", function(done) {
require.ensure(
[],
(function() {
expect(require("./empty?require.ensure:test")).toEqual({});
return function f() {
done();
};
})()
);
});
it("should accept an already included module", function(done) {
if (Math.random() < 0) require("./require.include");
var value = null;
require.ensure([], function(require) {
value = require("./require.include");
});
setImmediate(function() {
expect(value).toBe("require.include");
expect(value).toBe("require.include");
done();
});
}); | javascript | github | https://github.com/webpack/webpack | test/cases/chunks/parsing/index.js |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.test.cases.generated.cases.components.typeInfoProvider;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fir.test.configurators.AnalysisApiFirTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.typeInfoProvider.AbstractDoubleColonReceiverTypeTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/typeInfoProvider/doubleColonReceiverType")
@TestDataPath("$PROJECT_ROOT")
public class FirIdeNormalAnalysisScriptSourceModuleDoubleColonReceiverTypeTestGenerated extends AbstractDoubleColonReceiverTypeTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.ScriptSource,
AnalysisSessionMode.Normal,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInDoubleColonReceiverType() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/typeInfoProvider/doubleColonReceiverType"), Pattern.compile("^(.+)\\.kts$"), null, true);
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/tests-gen/org/jetbrains/kotlin/analysis/api/fir/test/cases/generated/cases/components/typeInfoProvider/FirIdeNormalAnalysisScriptSourceModuleDoubleColonReceiverTypeTestGenerated.java |
# -*- coding: utf-8 -*-
import os
import pytest
from cookiecutter import config
from cookiecutter.exceptions import (
ConfigDoesNotExistException, InvalidConfiguration
)
def test_merge_configs():
default = {
'cookiecutters_dir': '/home/example/some-path-to-templates',
'replay_dir': '/home/example/some-path-to-replay-files',
'default_context': {},
'abbreviations': {
'gh': 'https://github.com/{0}.git',
'gl': 'https://gitlab.com/{0}.git',
'bb': 'https://bitbucket.org/{0}',
}
}
user_config = {
'default_context': {
'full_name': 'Raphael Pierzina',
'github_username': 'hackebrot',
},
'abbreviations': {
'gl': 'https://gitlab.com/hackebrot/{0}.git',
'pytest-plugin': 'https://github.com/pytest-dev/pytest-plugin.git',
}
}
expected_config = {
'cookiecutters_dir': '/home/example/some-path-to-templates',
'replay_dir': '/home/example/some-path-to-replay-files',
'default_context': {
'full_name': 'Raphael Pierzina',
'github_username': 'hackebrot',
},
'abbreviations': {
'gh': 'https://github.com/{0}.git',
'gl': 'https://gitlab.com/hackebrot/{0}.git',
'bb': 'https://bitbucket.org/{0}',
'pytest-plugin': 'https://github.com/pytest-dev/pytest-plugin.git',
}
}
assert config.merge_configs(default, user_config) == expected_config
def test_get_config():
"""
Opening and reading config file
"""
conf = config.get_config('tests/test-config/valid-config.yaml')
expected_conf = {
'cookiecutters_dir': '/home/example/some-path-to-templates',
'replay_dir': '/home/example/some-path-to-replay-files',
'default_context': {
'full_name': 'Firstname Lastname',
'email': 'firstname.lastname@gmail.com',
'github_username': 'example'
},
'abbreviations': {
'gh': 'https://github.com/{0}.git',
'gl': 'https://gitlab.com/{0}.git',
'bb': 'https://bitbucket.org/{0}',
'helloworld': 'https://github.com/hackebrot/helloworld'
}
}
assert conf == expected_conf
def test_get_config_does_not_exist():
"""
Check that `exceptions.ConfigDoesNotExistException` is raised when
attempting to get a non-existent config file.
"""
with pytest.raises(ConfigDoesNotExistException):
config.get_config('tests/test-config/this-does-not-exist.yaml')
def test_invalid_config():
"""
An invalid config file should raise an `InvalidConfiguration` exception.
"""
with pytest.raises(InvalidConfiguration) as excinfo:
config.get_config('tests/test-config/invalid-config.yaml')
expected_error_msg = (
'Unable to parse YAML file '
'tests/test-config/invalid-config.yaml. '
'Error: '
)
assert expected_error_msg in str(excinfo.value)
def test_get_config_with_defaults():
"""
A config file that overrides 1 of 3 defaults
"""
conf = config.get_config('tests/test-config/valid-partial-config.yaml')
default_cookiecutters_dir = os.path.expanduser('~/.cookiecutters/')
default_replay_dir = os.path.expanduser('~/.cookiecutter_replay/')
expected_conf = {
'cookiecutters_dir': default_cookiecutters_dir,
'replay_dir': default_replay_dir,
'default_context': {
'full_name': 'Firstname Lastname',
'email': 'firstname.lastname@gmail.com',
'github_username': 'example'
},
'abbreviations': {
'gh': 'https://github.com/{0}.git',
'gl': 'https://gitlab.com/{0}.git',
'bb': 'https://bitbucket.org/{0}',
}
}
assert conf == expected_conf | unknown | codeparrot/codeparrot-clean | ||
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef INT_BLK_MQ_H
#define INT_BLK_MQ_H
#include <linux/blk-mq.h>
#include "blk-stat.h"
struct blk_mq_tag_set;
struct elevator_tags;
struct blk_mq_ctxs {
struct kobject kobj;
struct blk_mq_ctx __percpu *queue_ctx;
};
/**
* struct blk_mq_ctx - State for a software queue facing the submitting CPUs
*/
struct blk_mq_ctx {
struct {
spinlock_t lock;
struct list_head rq_lists[HCTX_MAX_TYPES];
} ____cacheline_aligned_in_smp;
unsigned int cpu;
unsigned short index_hw[HCTX_MAX_TYPES];
struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
struct request_queue *queue;
struct blk_mq_ctxs *ctxs;
struct kobject kobj;
} ____cacheline_aligned_in_smp;
enum {
BLK_MQ_NO_TAG = -1U,
BLK_MQ_TAG_MIN = 1,
BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
};
#define BLK_MQ_CPU_WORK_BATCH (8)
typedef unsigned int __bitwise blk_insert_t;
#define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01)
void blk_mq_submit_bio(struct bio *bio);
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
unsigned int flags);
void blk_mq_exit_queue(struct request_queue *q);
struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
struct elevator_tags *tags,
unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
bool);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start);
void blk_mq_put_rq_ref(struct request *rq);
/*
* Internal helpers for allocating/freeing the request map
*/
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx);
void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags);
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
unsigned int hctx_idx, unsigned int depth);
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags,
unsigned int hctx_idx);
/*
* CPU -> queue mappings
*/
extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
/*
* blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
* @q: request queue
* @type: the hctx type index
* @cpu: CPU
*/
static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
enum hctx_type type,
unsigned int cpu)
{
return queue_hctx((q), (q->tag_set->map[type].mq_map[cpu]));
}
static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
{
enum hctx_type type = HCTX_TYPE_DEFAULT;
/*
* The caller ensure that if REQ_POLLED, poll must be enabled.
*/
if (opf & REQ_POLLED)
type = HCTX_TYPE_POLL;
else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
type = HCTX_TYPE_READ;
return type;
}
/*
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
* @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
* @ctx: software queue cpu ctx
*/
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf,
struct blk_mq_ctx *ctx)
{
return ctx->hctxs[blk_mq_get_hctx_type(opf)];
}
/*
* Default to double of smaller one between hw queue_depth and
* 128, since we don't split into sync/async like the old code
* did. Additionally, this is a per-hw queue depth.
*/
static inline unsigned int blk_mq_default_nr_requests(
struct blk_mq_tag_set *set)
{
return 2 * min_t(unsigned int, set->queue_depth, BLKDEV_DEFAULT_RQ);
}
/*
* sysfs helpers
*/
extern void blk_mq_sysfs_init(struct request_queue *q);
extern void blk_mq_sysfs_deinit(struct request_queue *q);
int blk_mq_sysfs_register(struct gendisk *disk);
void blk_mq_sysfs_unregister(struct gendisk *disk);
int blk_mq_sysfs_register_hctxs(struct request_queue *q);
void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
void blk_mq_free_plug_rqs(struct blk_plug *plug);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_cancel_work_sync(struct request_queue *q);
void blk_mq_release(struct request_queue *q);
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
return per_cpu_ptr(q->queue_ctx, cpu);
}
/*
* This assumes per-cpu software queueing queues. They could be per-node
* as well, for instance. For now this is hardcoded as-is. Note that we don't
* care about preemption, since we know the ctx's are persistent. This does
* mean that we can't rely on ctx always matching the currently running CPU.
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
return __blk_mq_get_ctx(q, raw_smp_processor_id());
}
struct blk_mq_alloc_data {
/* input parameter */
struct request_queue *q;
blk_mq_req_flags_t flags;
unsigned int shallow_depth;
blk_opf_t cmd_flags;
req_flags_t rq_flags;
/* allocate multiple requests/tags in one go */
unsigned int nr_tags;
struct rq_list *cached_rqs;
/* input & output parameter */
struct blk_mq_ctx *ctx;
struct blk_mq_hw_ctx *hctx;
};
struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
unsigned int reserved_tags, unsigned int flags, int node);
void blk_mq_free_tags(struct blk_mq_tag_set *set, struct blk_mq_tags *tags);
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
unsigned int *offset);
void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
unsigned int tag);
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
unsigned int size);
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
unsigned int nr);
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
void *priv);
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
void *priv);
static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
struct blk_mq_hw_ctx *hctx)
{
if (!hctx)
return &bt->ws[0];
return sbq_wait_ptr(bt, &hctx->wait_index);
}
void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_tag_busy(hctx);
}
static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_tag_idle(hctx);
}
static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
unsigned int tag)
{
return tag < tags->nr_reserved_tags;
}
static inline bool blk_mq_is_shared_tags(unsigned int flags)
{
return flags & BLK_MQ_F_TAG_HCTX_SHARED;
}
static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
{
if (data->rq_flags & RQF_SCHED_TAGS)
return data->hctx->sched_tags;
return data->hctx->tags;
}
static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
{
/* Fast path: hardware queue is not stopped most of the time. */
if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return false;
/*
* This barrier is used to order adding of dispatch list before and
* the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
* in blk_mq_start_stopped_hw_queue() so that dispatch code could
* either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
* empty to avoid missing dispatching requests.
*/
smp_mb();
return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
{
return hctx->nr_ctx && hctx->tags;
}
void blk_mq_in_driver_rw(struct block_device *part, unsigned int inflight[2]);
static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
int budget_token)
{
if (q->mq_ops->put_budget)
q->mq_ops->put_budget(q, budget_token);
}
static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
{
if (q->mq_ops->get_budget)
return q->mq_ops->get_budget(q);
return 0;
}
static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
{
if (token < 0)
return;
if (rq->q->mq_ops->set_rq_budget_token)
rq->q->mq_ops->set_rq_budget_token(rq, token);
}
static inline int blk_mq_get_rq_budget_token(struct request *rq)
{
if (rq->q->mq_ops->get_rq_budget_token)
return rq->q->mq_ops->get_rq_budget_token(rq);
return -1;
}
static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (blk_mq_is_shared_tags(hctx->flags))
atomic_add(val, &hctx->queue->nr_active_requests_shared_tags);
else
atomic_add(val, &hctx->nr_active);
}
static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
{
__blk_mq_add_active_requests(hctx, 1);
}
static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (blk_mq_is_shared_tags(hctx->flags))
atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
else
atomic_sub(val, &hctx->nr_active);
}
static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
{
__blk_mq_sub_active_requests(hctx, 1);
}
static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_add_active_requests(hctx, val);
}
static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_inc_active_requests(hctx);
}
static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_sub_active_requests(hctx, val);
}
static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_dec_active_requests(hctx);
}
static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
{
if (blk_mq_is_shared_tags(hctx->flags))
return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
return atomic_read(&hctx->nr_active);
}
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
blk_mq_dec_active_requests(hctx);
blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
rq->tag = BLK_MQ_NO_TAG;
}
static inline void blk_mq_put_driver_tag(struct request *rq)
{
if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
return;
__blk_mq_put_driver_tag(rq->mq_hctx, rq);
}
bool __blk_mq_alloc_driver_tag(struct request *rq);
static inline bool blk_mq_get_driver_tag(struct request *rq)
{
if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
return false;
return true;
}
static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
{
int cpu;
for_each_possible_cpu(cpu)
qmap->mq_map[cpu] = 0;
}
/* Free all requests on the list */
static inline void blk_mq_free_requests(struct list_head *list)
{
while (!list_empty(list)) {
struct request *rq = list_entry_rq(list->next);
list_del_init(&rq->queuelist);
blk_mq_free_request(rq);
}
}
/*
* For shared tag users, we track the number of currently active users
* and attempt to provide a fair share of the tag depth for each of them.
*/
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct sbitmap_queue *bt)
{
unsigned int depth, users;
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return true;
/*
* Don't try dividing an ant
*/
if (bt->sb.depth == 1)
return true;
if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue;
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return true;
} else {
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return true;
}
users = READ_ONCE(hctx->tags->active_queues);
if (!users)
return true;
/*
* Allow at least some tags
*/
depth = max((bt->sb.depth + users - 1) / users, 4U);
return __blk_mq_active_requests(hctx) < depth;
}
/* run the code block in @dispatch_ops with rcu/srcu read lock held */
#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
do { \
if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
int srcu_idx; \
\
might_sleep_if(check_sleep); \
srcu_idx = srcu_read_lock(__tag_set->srcu); \
(dispatch_ops); \
srcu_read_unlock(__tag_set->srcu, srcu_idx); \
} else { \
rcu_read_lock(); \
(dispatch_ops); \
rcu_read_unlock(); \
} \
} while (0)
#define blk_mq_run_dispatch_ops(q, dispatch_ops) \
__blk_mq_run_dispatch_ops(q, true, dispatch_ops) \
static inline bool blk_mq_can_poll(struct request_queue *q)
{
return (q->limits.features & BLK_FEAT_POLL) &&
q->tag_set->map[HCTX_TYPE_POLL].nr_queues;
}
#endif | c | github | https://github.com/torvalds/linux | block/blk-mq.h |
import re
from django import forms
from django.db.models import Q
from django.template import Context
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from crispy_forms.helper import FormHelper
from crispy_forms import layout as crispy
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.commtrack.util import generate_code
from corehq.apps.custom_data_fields import CustomDataEditor
from corehq.apps.custom_data_fields.edit_entity import get_prefixed
from corehq.apps.es import UserES
from corehq.apps.users.forms import MultipleSelectionForm
from corehq.apps.locations.permissions import LOCATION_ACCESS_DENIED
from corehq.apps.users.models import CommCareUser
from corehq.apps.users.util import raw_username, user_display_string
from .models import SQLLocation, LocationType
from .permissions import user_can_access_location_id
from .signals import location_edited
class ParentLocWidget(forms.Widget):
def render(self, name, value, attrs=None):
return get_template(
'locations/manage/partials/parent_loc_widget.html'
).render(Context({
'name': name,
'value': value,
}))
class LocTypeWidget(forms.Widget):
def render(self, name, value, attrs=None):
return get_template(
'locations/manage/partials/loc_type_widget.html'
).render(Context({
'name': name,
'value': value,
}))
class LocationForm(forms.Form):
parent_id = forms.CharField(
label=ugettext_lazy('Parent'),
required=False,
widget=ParentLocWidget(),
)
name = forms.CharField(
label=ugettext_lazy('Name'),
max_length=100,
)
location_type = forms.CharField(
label=ugettext_lazy('Organization Level'),
required=False,
widget=LocTypeWidget(),
)
coordinates = forms.CharField(
label=ugettext_lazy('Coordinates'),
max_length=30,
required=False,
help_text=ugettext_lazy("enter as 'lat lon' or 'lat, lon' "
"(e.g., '42.3652 -71.1029')"),
)
site_code = forms.CharField(
label='Site Code',
required=False,
help_text=ugettext_lazy("A unique system code for this location. "
"Leave this blank to have it auto generated"),
)
external_id = forms.CharField(
label='External ID',
required=False,
help_text=ugettext_lazy("A number referencing this location on an external system")
)
external_id.widget.attrs['readonly'] = True
strict = True # optimization hack: strict or loose validation
def __init__(self, location, bound_data=None, is_new=False, user=None,
*args, **kwargs):
self.location = location
self.domain = location.domain
self.user = user
self.is_new_location = is_new
kwargs['initial'] = {
'parent_id': location.parent_location_id,
'name': location.name,
'site_code': location.site_code,
'external_id': location.external_id,
}
if not self.is_new_location:
kwargs['initial']['location_type'] = self.location.location_type.name
kwargs['initial']['parent_id'] = self.location.parent_location_id
lat, lon = (getattr(self.location, k, None)
for k in ('latitude', 'longitude'))
kwargs['initial']['coordinates'] = ('%s, %s' % (lat, lon)
if lat is not None else '')
self.custom_data = self.get_custom_data(bound_data, is_new)
self.custom_data.form.helper.label_class = 'col-sm-3 col-md-4 col-lg-2'
self.custom_data.form.helper.field_class = 'col-sm-4 col-md-5 col-lg-3'
super(LocationForm, self).__init__(bound_data, *args, **kwargs)
self.fields['parent_id'].widget.domain = self.domain
self.fields['parent_id'].widget.user = user
if not self.location.external_id:
self.fields['external_id'].widget = forms.HiddenInput()
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.label_class = 'col-sm-3 col-md-4 col-lg-2'
self.helper.field_class = 'col-sm-4 col-md-5 col-lg-3'
self.helper.layout = crispy.Layout(
crispy.Fieldset(*self.get_fields(is_new))
)
def get_fields(self, is_new):
if is_new:
return filter(None, [
_("Location Information"),
'name',
'location_type' if len(self._get_allowed_types(self.domain, self.location.parent)) > 1 else None,
])
else:
return [
_("Location Information"),
'name',
'parent_id',
'location_type',
'coordinates',
'site_code',
'external_id',
]
def get_custom_data(self, bound_data, is_new):
from .views import LocationFieldsView
existing = self.location.metadata
# Don't show validation error preemptively on new user creation
if is_new and bound_data is None:
existing = None
return CustomDataEditor(
field_view=LocationFieldsView,
domain=self.domain,
# For new locations, only display required fields
required_only=is_new,
existing_custom_data=existing,
post_dict=bound_data,
)
def is_valid(self):
return all([
super(LocationForm, self).is_valid(),
self.custom_data.is_valid(),
])
@property
def errors(self):
errors = super(LocationForm, self).errors
errors.update(self.custom_data.errors)
return errors
def clean_parent_id(self):
if self.is_new_location:
parent = self.location.parent
parent_id = self.location.parent_location_id
else:
parent_id = self.cleaned_data['parent_id'] or None
parent = SQLLocation.objects.get(location_id=parent_id) if parent_id else None
if self.user and not user_can_access_location_id(self.domain, self.user, parent_id):
raise forms.ValidationError(LOCATION_ACCESS_DENIED)
self.cleaned_data['parent'] = parent
if self.location.location_id is not None and self.location.parent_location_id != parent_id:
# location is being re-parented
if parent and self.location.location_id in parent.path:
raise forms.ValidationError(_("Location's parent is itself or a descendant"))
if self.location.get_descendants().exists():
raise forms.ValidationError(_(
'only locations that have no child locations can be '
'moved to a different parent'
))
self.cleaned_data['orig_parent_id'] = self.location.parent_location_id
return parent_id
def clean_name(self):
def has_siblings_with_name(location, name, parent_location_id):
qs = SQLLocation.objects.filter(domain=location.domain,
name=name)
if parent_location_id:
qs = qs.filter(parent__location_id=parent_location_id)
else: # Top level
qs = qs.filter(parent=None)
return (qs.exclude(location_id=self.location.location_id)
.exists())
name = self.cleaned_data['name']
parent_location_id = self.cleaned_data.get('parent_id', None)
if self.strict:
if has_siblings_with_name(self.location, name, parent_location_id):
raise forms.ValidationError(_(
'name conflicts with another location with this parent'
))
return name
def clean_site_code(self):
site_code = self.cleaned_data['site_code']
if site_code:
site_code = site_code.lower()
if (SQLLocation.objects.filter(domain=self.domain,
site_code__iexact=site_code)
.exclude(location_id=self.location.location_id)
.exists()):
raise forms.ValidationError(_(
'another location already uses this site code'
))
return site_code
def clean(self):
if 'name' in self.cleaned_data and not self.cleaned_data.get('site_code', None):
all_codes = [
code.lower() for code in
(SQLLocation.objects.exclude(location_id=self.location.location_id)
.filter(domain=self.domain)
.values_list('site_code', flat=True))
]
self.cleaned_data['site_code'] = generate_code(self.cleaned_data['name'], all_codes)
@staticmethod
def _get_allowed_types(domain, parent):
parent_type = parent.location_type if parent else None
return list(LocationType.objects
.filter(domain=domain,
parent_type=parent_type)
.all())
def clean_location_type(self):
loc_type = self.cleaned_data['location_type']
allowed_types = self._get_allowed_types(self.domain, self.cleaned_data.get('parent'))
if not allowed_types:
raise forms.ValidationError(_('The selected parent location cannot have child locations!'))
if not loc_type:
if len(allowed_types) == 1:
loc_type_obj = allowed_types[0]
else:
raise forms.ValidationError(_('You must select a location type'))
else:
try:
loc_type_obj = (LocationType.objects
.filter(domain=self.domain)
.get(Q(code=loc_type) | Q(name=loc_type)))
except LocationType.DoesNotExist:
raise forms.ValidationError(_("LocationType '{}' not found").format(loc_type))
else:
if loc_type_obj not in allowed_types:
raise forms.ValidationError(_('Location type not valid for the selected parent.'))
self.cleaned_data['location_type_object'] = loc_type_obj
return loc_type_obj.name
def clean_coordinates(self):
coords = self.cleaned_data['coordinates'].strip()
if not coords:
return None
pieces = re.split('[ ,]+', coords)
if len(pieces) != 2:
raise forms.ValidationError(_('could not understand coordinates'))
try:
lat = float(pieces[0])
lon = float(pieces[1])
except ValueError:
raise forms.ValidationError(_('could not understand coordinates'))
return [lat, lon]
def save(self, instance=None, commit=True):
if self.errors:
raise ValueError('form does not validate')
location = instance or self.location
is_new = location.location_id is None
location.name = self.cleaned_data['name']
location.site_code = self.cleaned_data['site_code']
location.location_type = self.cleaned_data['location_type_object']
location.metadata = self.custom_data.get_data_to_save()
location.parent = self.cleaned_data['parent']
coords = self.cleaned_data['coordinates']
if coords:
location.latitude = coords[0]
location.longitude = coords[1]
location.metadata.update(get_prefixed(self.data))
if commit:
location.save()
if not is_new:
orig_parent_id = self.cleaned_data.get('orig_parent_id')
reparented = orig_parent_id is not None
location_edited.send(sender='loc_mgmt', sql_loc=location,
moved=reparented, previous_parent=orig_parent_id)
return location
class UsersAtLocationForm(MultipleSelectionForm):
def __init__(self, domain_object, location, *args, **kwargs):
self.domain_object = domain_object
self.location = location
super(UsersAtLocationForm, self).__init__(
initial={'selected_ids': self.users_at_location},
*args, **kwargs
)
self.fields['selected_ids'].choices = self.get_all_users()
self.fields['selected_ids'].label = ugettext_lazy("Workers at Location")
def get_all_users(self):
user_query = (UserES()
.domain(self.domain_object.name)
.mobile_users()
.fields(['_id', 'username', 'first_name', 'last_name']))
return [
(u['_id'], user_display_string(u['username'],
u.get('first_name', ''),
u.get('last_name', '')))
for u in user_query.run().hits
]
@property
@memoized
def users_at_location(self):
return (UserES()
.domain(self.domain_object.name)
.mobile_users()
.location(self.location.location_id)
.get_ids())
def unassign_users(self, users):
for doc in iter_docs(CommCareUser.get_db(), users):
# This could probably be sped up by bulk saving, but there's a lot
# of stuff going on - seems tricky.
CommCareUser.wrap(doc).unset_location_by_id(self.location.location_id, fall_back_to_next=True)
def assign_users(self, users):
for doc in iter_docs(CommCareUser.get_db(), users):
CommCareUser.wrap(doc).add_to_assigned_locations(self.location)
def save(self):
selected_users = set(self.cleaned_data['selected_ids'])
previous_users = set(self.users_at_location)
to_remove = previous_users - selected_users
to_add = selected_users - previous_users
self.unassign_users(to_remove)
self.assign_users(to_add) | unknown | codeparrot/codeparrot-clean | ||
import { test } from '../../test';
export default test({
test(assert, target) {
// This test case guards against a potential future bug where we could optimize html tags away for static content:
// Even if the {@html } block seems static, it should be preserved as such, because it could be dynamic originally
// (like {@html browser ? 'foo' : 'bar'} which is then different between client and server.
// Also see https://github.com/sveltejs/svelte/issues/8683 where this happened for Svelte 4.
assert.htmlEqual(target.innerHTML, 'Server');
},
errors: [
'The value of an `{@html ...}` block changed between server and client renders. The client value will be ignored in favour of the server value'
]
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/hydration/samples/raw-mismatch-static/_config.js |
from __future__ import print_function
import os
import sys
GREEN = '\033[32m'
YELLOW = '\033[33m'
RED = '\033[31m'
EXIT = '\033[0m'
INFO = 'info'
WARNING = 'warning'
ERROR = 'error'
LEVEL_COLOR_MAP = {
INFO: GREEN,
WARNING: YELLOW,
ERROR: RED
}
class Sender(object):
def __init__(self, prefix=None):
self._prefix = prefix
def format(self, msg, level):
""" Escapes a message with a color assigned based on its level. Informational messages
are green, warnings are yellow, and errors are red. The given prefix is prepended to the
message for namespace identification. If $TERM is not set, no color escape sequences are
added.
Args:
msg: Given message
level: stderr 'level' priority. Must be INFO, WARNING, or ERROR.
prefix: Given namespace of the project calling this library.
"""
msg = level.upper() + ': ' + self._prefix + ' ' + msg
if os.getenv('TERM'):
msg = LEVEL_COLOR_MAP[level] + msg + EXIT
return msg
def write(self, msg, level):
""" Prints a message to stderr with a color assigned based on its level. Informational
messages are green, warnings are yellow, and errors are red. The given prefix is prepended
to the message for namespace identification. If $TERM is not set, no color escape sequences
are added.
Args:
msg: Given message
level: stderr 'level' priority. Must be INFO, WARNING, or ERROR.
prefix: Given namespace of the project calling this library.
"""
print(self.format(msg, level), file=sys.stderr)
def info(self, msg, **kwargs):
self.write(msg, INFO, **kwargs)
def warning(self, msg, **kwargs):
self.write(msg, WARNING, **kwargs)
def error(self, msg, **kwargs):
self.write(msg, ERROR, **kwargs)
def format_error(self, msg, **kwargs):
return self.format(msg, ERROR, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
from django.core import management
from django.test import TestCase
from models import Article
class SampleTestCase(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Test cases can load fixture objects into models defined in packages"
self.assertEqual(Article.objects.count(), 4)
self.assertQuerysetEqual(
Article.objects.all(),[
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
"Python program becomes self aware"
],
lambda a: a.headline
)
class FixtureTestCase(TestCase):
def test_initial_data(self):
"Fixtures can load initial data into models defined in packages"
#Syncdb introduces 1 initial data object from initial_data.json
self.assertQuerysetEqual(
Article.objects.all(), [
"Python program becomes self aware"
],
lambda a: a.headline
)
def test_loaddata(self):
"Fixtures can load data into models defined in packages"
# Load fixture 1. Single JSON file, with two objects
management.call_command("loaddata", "fixture1.json", verbosity=0, commit=False)
self.assertQuerysetEqual(
Article.objects.all(), [
"Time to reform copyright",
"Poker has no place on ESPN",
"Python program becomes self aware",
],
lambda a: a.headline,
)
# Load fixture 2. JSON file imported by default. Overwrites some
# existing objects
management.call_command("loaddata", "fixture2.json", verbosity=0, commit=False)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
"Python program becomes self aware",
],
lambda a: a.headline,
)
# Load a fixture that doesn't exist
management.call_command("loaddata", "unknown.json", verbosity=0, commit=False)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
"Python program becomes self aware",
],
lambda a: a.headline,
) | unknown | codeparrot/codeparrot-clean | ||
"""Unit tests of MQConnectionManager in the DIRAC.Resources.MessageQueue.MConnectionManager
Also, test of internal functions for mq connection storage.
"""
## ignore use of __functions, _functions
#pylint: disable=no-member, protected-access
import unittest
import mock
from DIRAC import S_OK
from DIRAC.Resources.MessageQueue.MQConnectionManager import MQConnectionManager
class TestMQConnectionManager( unittest.TestCase ):
def setUp( self ):
self.maxDiff = None # To show full difference between structures in case of error
dest = {}
dest.update({'/queue/test1': ['producer4', 'consumer1', 'consumer2', 'consumer4']})
dest.update({'/queue/test2': ['producer2', 'consumer1', 'consumer2']})
dest.update({'/topic/test1': ['producer1']})
dest4 = {'/queue/test3': ['producer1', 'consumer2','consumer3','consumer4']}
conn1 = {'MQConnector':'TestConnector1', 'destinations':dest}
conn2 = {'MQConnector':'TestConnector2', 'destinations':dest4}
storage = {'mardirac3.in2p3.fr':conn1, 'testdir.blabla.ch':conn2}
self.mgr = MQConnectionManager(connectionStorage = storage)
def tearDown( self ):
pass
class TestMQConnectionStorageFunctions_connectionExists( TestMQConnectionManager ):
def test_success( self ):
self.assertTrue(self.mgr._MQConnectionManager__connectionExists( 'mardirac3.in2p3.fr'))
def test_failure( self ):
self.assertFalse(self.mgr._MQConnectionManager__connectionExists( 'nonexisting'))
class TestMQConnectionStorageFunctions_destinationExists( TestMQConnectionManager ):
def test_success( self ):
self.assertTrue(self.mgr._MQConnectionManager__destinationExists('mardirac3.in2p3.fr', '/queue/test1'))
def test_failure( self ):
self.assertFalse(self.mgr._MQConnectionManager__destinationExists( 'nonexisting', '/queue/test1'))
def test_failure2( self ):
self.assertFalse(self.mgr._MQConnectionManager__destinationExists('mardirac3.in2p3.fr', '/queue/nonexisting'))
class TestMQConnectionStorageFunctions_messengerExists( TestMQConnectionManager ):
def test_success( self ):
self.assertTrue(self.mgr._MQConnectionManager__messengerExists('mardirac3.in2p3.fr', '/queue/test1','consumer2' ))
self.assertTrue(self.mgr._MQConnectionManager__messengerExists('mardirac3.in2p3.fr', '/queue/test1','producer4' ))
def test_failure( self ):
self.assertFalse(self.mgr._MQConnectionManager__messengerExists('noexisting', '/queue/test1','producer4' ))
def test_failure2( self ):
self.assertFalse(self.mgr._MQConnectionManager__messengerExists( 'mardirac3.in2p3.fr', '/queue/nonexisting','producer4'))
def test_failure3( self ):
self.assertFalse(self.mgr._MQConnectionManager__messengerExists( 'mardirac3.in2p3.fr', '/queue/test1','producer10'))
class TestMQConnectionStorageFunctions_getConnection( TestMQConnectionManager ):
def test_success( self ):
expectedConn = {'MQConnector':'TestConnector2', 'destinations':{'/queue/test3': ['producer1', 'consumer2','consumer3','consumer4']}}
self.assertEqual(self.mgr._MQConnectionManager__getConnection('testdir.blabla.ch'),expectedConn)
def test_failure( self ):
self.assertEqual(self.mgr._MQConnectionManager__getConnection('nonexisiting'), {})
class TestMQConnectionStorageFunctions_getAllConnections( TestMQConnectionManager ):
def test_success( self ):
expectedOutput = ['testdir.blabla.ch','mardirac3.in2p3.fr']
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllConnections()),sorted(expectedOutput))
class TestMQConnectionStorageFunctions_getConnector( TestMQConnectionManager ):
def test_success( self ):
self.assertEqual(self.mgr._MQConnectionManager__getConnector('testdir.blabla.ch'),'TestConnector2')
def test_failure( self ):
self.assertIsNone(self.mgr._MQConnectionManager__getConnector('nonexisiting'))
class TestMQConnectionStorageFunctions_setConnector( TestMQConnectionManager ):
def test_success( self ):
self.assertTrue(self.mgr._MQConnectionManager__setConnector('testdir.blabla.ch', 'TestConnector5'))
self.assertEqual(self.mgr._MQConnectionManager__getConnector('testdir.blabla.ch'),'TestConnector5')
def test_failure( self ):
self.assertFalse(self.mgr._MQConnectionManager__setConnector('nonexisiting', 'TestConnector3'))
class TestMQConnectionStorageFunctions_getDestinations( TestMQConnectionManager ):
def test_success( self ):
expectedDests ={'/queue/test1': ['producer4', 'consumer1', 'consumer2', 'consumer4'],
'/queue/test2': ['producer2', 'consumer1', 'consumer2'],
'/topic/test1': ['producer1']}
self.assertEqual(self.mgr._MQConnectionManager__getDestinations('mardirac3.in2p3.fr'),expectedDests)
def test_failure( self ):
self.assertEqual(self.mgr._MQConnectionManager__getDestinations('nonexisiting'), {})
class TestMQConnectionStorageFunctions_getMessengersId( TestMQConnectionManager ):
def test_success( self ):
expectedMess =['producer4', 'consumer1', 'consumer2', 'consumer4']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersId('mardirac3.in2p3.fr', '/queue/test1'),expectedMess)
def test_success2( self ):
expectedMess2 =['producer2', 'consumer1', 'consumer2']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersId('mardirac3.in2p3.fr', '/queue/test2'),expectedMess2)
def test_failure( self ):
self.assertEqual(self.mgr._MQConnectionManager__getMessengersId('nonexisiting', '/queue/test2'), [])
def test_failure2( self ):
self.assertEqual(self.mgr._MQConnectionManager__getMessengersId('mardirac3.in2p3.fr', 'nonexisiting'), [])
class TestMQConnectionStorageFunctions_getMessengersIdWithType( TestMQConnectionManager ):
def test_success( self ):
expectedMess =['producer4']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('mardirac3.in2p3.fr', '/queue/test1', 'producer'),expectedMess)
def test_success2( self ):
expectedMess2 =['producer2']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('mardirac3.in2p3.fr', '/queue/test2', 'producer'),expectedMess2)
def test_success3( self ):
expectedMess =[ 'consumer1', 'consumer2', 'consumer4']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('mardirac3.in2p3.fr', '/queue/test1', 'consumer'),expectedMess)
def test_success4( self ):
expectedMess2 =['consumer1', 'consumer2']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('mardirac3.in2p3.fr', '/queue/test2', 'consumer'),expectedMess2)
def test_failure( self ):
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('nonexisiting', '/queue/test2', 'producer'), [])
def test_failure2( self ):
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('mardirac3.in2p3.fr', 'nonexisiting', 'producer'), [])
class TestMQConnectionStorageFunctions_getAllMessengersInfo( TestMQConnectionManager ):
def test_success( self ):
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
class TestMQConnectionStorageFunctions_getAllMessengersId( TestMQConnectionManager ):
def test_success( self ):
expectedOutput= ['producer4', 'consumer1', 'consumer2', 'consumer4', 'producer2', 'consumer1', 'consumer2', 'producer1', 'producer1', 'consumer2', 'consumer3', 'consumer4']
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersId()),sorted(expectedOutput))
class TestMQConnectionStorageFunctions_getAllMessengersIdWithType( TestMQConnectionManager ):
def test_success( self ):
expectedOutput= ['consumer1', 'consumer2', 'consumer4', 'consumer1', 'consumer2','consumer2', 'consumer3', 'consumer4']
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersIdWithType('consumer')),sorted(expectedOutput))
expectedOutput= ['producer4', 'producer2', 'producer1', 'producer1']
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersIdWithType('producer')),sorted(expectedOutput))
class TestMQConnectionStorageFunctions_addMessenger( TestMQConnectionManager ):
def test_success( self ):
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer1', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__addMessenger('mardirac3.in2p3.fr', '/queue/test1', 'producer1'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_success2( self ):
# new queue
expectedOutput= ['mardirac3.in2p3.fr/queue/test5/producer8', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__addMessenger('mardirac3.in2p3.fr', '/queue/test5', 'producer8'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_success3( self ):
# new connection
expectedOutput= ['mytest.is.the.best/queue/test10/producer24', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__addMessenger('mytest.is.the.best', '/queue/test10', 'producer24'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_success4( self ):
# two times
expectedOutput= ['mytest.is.the.best/queue/test10/producer2', 'mytest.is.the.best/queue/test10/producer24', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__addMessenger('mytest.is.the.best', '/queue/test10', 'producer24'))
self.assertTrue(self.mgr._MQConnectionManager__addMessenger('mytest.is.the.best', '/queue/test10', 'producer2'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_failure( self ):
# messenger already exists
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertFalse(self.mgr._MQConnectionManager__addMessenger('mardirac3.in2p3.fr', '/queue/test1', 'producer4'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
class TestMQConnectionStorageFunctions_removeMessenger( TestMQConnectionManager ):
def test_success( self ):
expectedOutput= [ 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('mardirac3.in2p3.fr', '/queue/test1', 'producer4'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_success2( self ):
#remove whole destination /topic/test1 cause only one element
expectedOutput= [ 'mardirac3.in2p3.fr/queue/test1/producer4','mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2','testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('mardirac3.in2p3.fr', '/topic/test1', 'producer1'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_success3( self ):
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1']
#remove whole connection
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/test3', 'producer1'))
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/test3', 'consumer2'))
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/test3', 'consumer3'))
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/test3', 'consumer4'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_failure( self ):
#remove nonexisting messenger
self.assertFalse(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/test3', 'producer10'))
def test_failure2( self ):
#remove nonexisting destination
self.assertFalse(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/nonexisting', 'producer1'))
def test_failure3( self ):
#remove nonexisting connection
self.assertFalse(self.mgr._MQConnectionManager__removeMessenger('nonexisting', '/queue/test103', 'producer1'))
class TestMQConnectionManager_addNewmessenger( TestMQConnectionManager ):
def test_success( self ):
result = self.mgr.addNewMessenger(mqURI = "mardirac3.in2p3.fr::Queue::test1", messengerType = "producer" )
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'producer5')
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer5', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
def test_success2( self ):
result = self.mgr.addNewMessenger(mqURI = "mardirac3.in2p3.fr::Topic::test1", messengerType = "consumer" )
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'consumer5')
def test_success3( self ):
result = self.mgr.addNewMessenger(mqURI = "testdir.blabla.ch::Queue::test3", messengerType = "consumer" )
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'consumer5')
def test_success4( self ):
#connection does not exist
result = self.mgr.addNewMessenger(mqURI = "noexisting.blabla.ch::Queue::test3", messengerType = "consumer" )
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'consumer5')
expectedOutput= ['noexisting.blabla.ch/queue/test3/consumer5', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
class TestMQConnectionManager_startConnection( TestMQConnectionManager ):
def test_success( self ):
#existing connection
result = self.mgr.startConnection(mqURI = "mardirac3.in2p3.fr::Queue::test1", params ={}, messengerType = "producer")
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'producer5')
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer5', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
@mock.patch('DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.createConnectorAndConnect')
def test_success2( self, mock_createConnectorAndConnect):
#connection does not exist
mock_createConnectorAndConnect.return_value = S_OK('MyConnector')
result = self.mgr.startConnection(mqURI = "noexisting.blabla.ch::Queue::test3", params={}, messengerType = "consumer" )
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'consumer5')
expectedOutput= ['noexisting.blabla.ch/queue/test3/consumer5', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
result = self.mgr.getConnector('noexisting.blabla.ch')
self.assertEqual(result['Value'], 'MyConnector')
class TestMQConnectionManager_stopConnection( TestMQConnectionManager ):
def test_success( self ):
result = self.mgr.stopConnection(mqURI = "mardirac3.in2p3.fr::Queue::test1", messengerId = "producer4")
self.assertTrue(result['OK'])
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
def test_success2( self ):
result = self.mgr.stopConnection(mqURI = "mardirac3.in2p3.fr::Topic::test1", messengerId = "producer1")
self.assertTrue(result['OK'])
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
@mock.patch('DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.unsubscribe')
@mock.patch('DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.disconnect')
def test_success3( self, mock_disconnect, mock_unsubscribe ):
mock_disconnect.return_value = S_OK()
mock_unsubscribe.return_value = S_OK()
result = self.mgr.stopConnection(mqURI = "testdir.blabla.ch::Queue::test3", messengerId = "consumer3")
self.assertTrue(result['OK'])
result = self.mgr.stopConnection(mqURI = "testdir.blabla.ch::Queue::test3", messengerId = "producer1")
self.assertTrue(result['OK'])
result = self.mgr.stopConnection(mqURI = "testdir.blabla.ch::Queue::test3", messengerId = "consumer2")
self.assertTrue(result['OK'])
result = self.mgr.stopConnection(mqURI = "testdir.blabla.ch::Queue::test3", messengerId = "consumer4")
self.assertTrue(result['OK'])
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
class TestMQConnectionManager_removeAllConnections( TestMQConnectionManager ):
@mock.patch('DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.disconnect')
def test_success( self, mock_disconnect):
mock_disconnect.return_value = S_OK()
result = self.mgr.removeAllConnections()
self.assertTrue(result['OK'])
expectedOutput= []
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
class TestMQConnectionManager_getAllMessengers( TestMQConnectionManager ):
def test_success( self ):
result = self.mgr.getAllMessengers()
self.assertTrue(result['OK'])
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
class TestMQConnectionManager_getConnector( TestMQConnectionManager ):
def test_success( self ):
result = self.mgr.getConnector('mardirac3.in2p3.fr')
self.assertTrue(result['OK'])
def test_failure( self ):
result = self.mgr.getConnector('nonexistent.in2p3.fr')
self.assertEqual(result['Message'], 'Failed to get the MQConnector!')
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_addNewmessenger ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_startConnection ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_stopConnection ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_removeAllConnections ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_getAllMessengers ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_getConnector ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_connectionExists ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_destinationExists ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_messengerExists ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getConnection ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getAllConnections ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getConnector ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_setConnector ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getDestinations ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getMessengersId ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getMessengersIdWithType ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_addMessenger ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_removeMessenger ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getAllMessengersInfo) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getAllMessengersId) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getAllMessengersIdWithType) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite ) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import static com.google.common.collect.Maps.immutableEntry;
import static com.google.common.collect.Maps.transformEntries;
import static com.google.common.collect.Maps.transformValues;
import static com.google.common.collect.Maps.unmodifiableNavigableMap;
import static com.google.common.collect.ReflectionFreeAssertThrows.assertThrows;
import static com.google.common.collect.testing.Helpers.mapEntry;
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.Truth.assertWithMessage;
import static java.util.Arrays.asList;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singleton;
import static java.util.Collections.singletonMap;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.J2ktIncompatible;
import com.google.common.base.Converter;
import com.google.common.base.Equivalence;
import com.google.common.base.Function;
import com.google.common.base.Functions;
import com.google.common.collect.Maps.EntryTransformer;
import com.google.common.collect.Maps.ValueDifferenceImpl;
import com.google.common.testing.EqualsTester;
import com.google.common.testing.NullPointerTester;
import com.google.common.testing.SerializableTester;
import java.io.IOException;
import java.io.StringReader;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumMap;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.IdentityHashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NavigableMap;
import java.util.NavigableSet;
import java.util.Properties;
import java.util.Set;
import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentMap;
import junit.framework.TestCase;
import org.jspecify.annotations.NullMarked;
import org.jspecify.annotations.Nullable;
/**
* Unit test for {@code Maps}.
*
* @author Kevin Bourrillion
* @author Mike Bostock
* @author Jared Levy
*/
@GwtCompatible
@NullMarked
@SuppressWarnings("JUnitIncompatibleType") // Many intentional violations here.
public class MapsTest extends TestCase {
private static final Comparator<Integer> SOME_COMPARATOR = Collections.reverseOrder();
public void testHashMap() {
@SuppressWarnings("UseCollectionConstructor") // test of factory method
HashMap<Integer, Integer> map = Maps.newHashMap();
assertEquals(emptyMap(), map);
}
public void testHashMapWithInitialMap() {
Map<String, Integer> original = new TreeMap<>();
original.put("a", 1);
original.put("b", 2);
original.put("c", 3);
@SuppressWarnings("UseCollectionConstructor") // test of factory method
HashMap<String, Integer> map = Maps.newHashMap(original);
assertEquals(original, map);
}
public void testHashMapGeneralizesTypes() {
Map<String, Integer> original = new TreeMap<>();
original.put("a", 1);
original.put("b", 2);
original.put("c", 3);
@SuppressWarnings("UseCollectionConstructor") // test of factory method
HashMap<Object, Object> map = Maps.newHashMap(original);
assertEquals(original, map);
}
public void testCapacityForNegativeSizeFails() {
assertThrows(IllegalArgumentException.class, () -> Maps.capacity(-1));
}
/**
* Tests that nHMWES makes hash maps large enough that adding the expected number of elements
* won't cause a rehash.
*
* <p>As of jdk7u40, HashMap has an empty-map optimization. The argument to new HashMap(int) is
* noted, but the initial table is a zero-length array.
*
* <p>This test may fail miserably on non-OpenJDK environments...
*/
@J2ktIncompatible
@GwtIncompatible // reflection
@AndroidIncompatible // relies on assumptions about OpenJDK
public void testNewHashMapWithExpectedSize_wontGrow() throws Exception {
// before jdk7u40: creates one-bucket table
// after jdk7u40: creates empty table
assertThat(bucketsOf(Maps.newHashMapWithExpectedSize(0))).isAtMost(1);
for (int size = 1; size < 200; size++) {
assertWontGrow(
size,
new HashMap<>(),
Maps.newHashMapWithExpectedSize(size),
Maps.newHashMapWithExpectedSize(size));
}
}
/** Same test as above but for newLinkedHashMapWithExpectedSize */
@J2ktIncompatible
@GwtIncompatible // reflection
@AndroidIncompatible // relies on assumptions about OpenJDK
public void testNewLinkedHashMapWithExpectedSize_wontGrow() throws Exception {
assertThat(bucketsOf(Maps.newLinkedHashMapWithExpectedSize(0))).isAtMost(1);
for (int size = 1; size < 200; size++) {
assertWontGrow(
size,
new LinkedHashMap<>(),
Maps.newLinkedHashMapWithExpectedSize(size),
Maps.newLinkedHashMapWithExpectedSize(size));
}
}
@J2ktIncompatible
@GwtIncompatible // reflection
private static void assertWontGrow(
int size,
HashMap<Object, Object> referenceMap,
HashMap<Object, Object> map1,
HashMap<Object, Object> map2)
throws Exception {
// Only start measuring table size after the first element inserted, to
// deal with empty-map optimization.
map1.put(0, null);
int initialBuckets = bucketsOf(map1);
for (int i = 1; i < size; i++) {
map1.put(i, null);
}
assertWithMessage("table size after adding %s elements", size)
.that(bucketsOf(map1))
.isEqualTo(initialBuckets);
/*
* Something slightly different happens when the entries are added all at
* once; make sure that passes too.
*/
map2.putAll(map1);
assertWithMessage("table size after adding %s elements", size)
.that(bucketsOf(map1))
.isEqualTo(initialBuckets);
// Ensure that referenceMap, which doesn't use WithExpectedSize, ends up with the same table
// size as the other two maps. If it ended up with a smaller size that would imply that we
// computed the wrong initial capacity.
for (int i = 0; i < size; i++) {
referenceMap.put(i, null);
}
assertWithMessage("table size after adding %s elements", size)
.that(initialBuckets)
.isAtMost(bucketsOf(referenceMap));
}
@J2ktIncompatible
@GwtIncompatible // reflection
private static int bucketsOf(HashMap<?, ?> hashMap) throws Exception {
Field tableField = HashMap.class.getDeclaredField("table");
tableField.setAccessible(true);
Object[] table = (Object[]) tableField.get(hashMap);
// In JDK8, table is set lazily, so it may be null.
return table == null ? 0 : table.length;
}
public void testCapacityForLargeSizes() {
int[] largeExpectedSizes =
new int[] {
Integer.MAX_VALUE / 2 - 1,
Integer.MAX_VALUE / 2,
Integer.MAX_VALUE / 2 + 1,
Integer.MAX_VALUE - 1,
Integer.MAX_VALUE
};
for (int expectedSize : largeExpectedSizes) {
int capacity = Maps.capacity(expectedSize);
assertTrue(
"capacity (" + capacity + ") must be >= expectedSize (" + expectedSize + ")",
capacity >= expectedSize);
}
}
public void testLinkedHashMap() {
@SuppressWarnings("UseCollectionConstructor") // test of factory method
LinkedHashMap<Integer, Integer> map = Maps.newLinkedHashMap();
assertEquals(emptyMap(), map);
}
public void testLinkedHashMapWithInitialMap() {
Map<String, String> map =
new LinkedHashMap<String, String>(
ImmutableMap.of(
"Hello", "World",
"first", "second",
"polygene", "lubricants",
"alpha", "betical"));
@SuppressWarnings("UseCollectionConstructor") // test of factory method
LinkedHashMap<String, String> copy = Maps.newLinkedHashMap(map);
Iterator<Entry<String, String>> iter = copy.entrySet().iterator();
assertTrue(iter.hasNext());
Entry<String, String> entry = iter.next();
assertEquals("Hello", entry.getKey());
assertEquals("World", entry.getValue());
assertTrue(iter.hasNext());
entry = iter.next();
assertEquals("first", entry.getKey());
assertEquals("second", entry.getValue());
assertTrue(iter.hasNext());
entry = iter.next();
assertEquals("polygene", entry.getKey());
assertEquals("lubricants", entry.getValue());
assertTrue(iter.hasNext());
entry = iter.next();
assertEquals("alpha", entry.getKey());
assertEquals("betical", entry.getValue());
assertFalse(iter.hasNext());
}
public void testLinkedHashMapGeneralizesTypes() {
Map<String, Integer> original = new LinkedHashMap<>();
original.put("a", 1);
original.put("b", 2);
original.put("c", 3);
@SuppressWarnings("UseCollectionConstructor") // test of factory method
HashMap<Object, Object> map = Maps.<Object, Object>newLinkedHashMap(original);
assertEquals(original, map);
}
// Intentionally using IdentityHashMap to test creation.
@SuppressWarnings("IdentityHashMapBoxing")
public void testIdentityHashMap() {
IdentityHashMap<Integer, Integer> map = Maps.newIdentityHashMap();
assertEquals(emptyMap(), map);
}
public void testConcurrentMap() {
ConcurrentMap<Integer, Integer> map = Maps.newConcurrentMap();
assertEquals(emptyMap(), map);
}
public void testTreeMap() {
TreeMap<Integer, Integer> map = Maps.newTreeMap();
assertEquals(emptyMap(), map);
assertThat(map.comparator()).isNull();
}
public void testTreeMapDerived() {
TreeMap<Derived, Integer> map = Maps.newTreeMap();
assertEquals(emptyMap(), map);
map.put(new Derived("foo"), 1);
map.put(new Derived("bar"), 2);
assertThat(map.keySet()).containsExactly(new Derived("bar"), new Derived("foo")).inOrder();
assertThat(map.values()).containsExactly(2, 1).inOrder();
assertThat(map.comparator()).isNull();
}
public void testTreeMapNonGeneric() {
TreeMap<LegacyComparable, Integer> map = Maps.newTreeMap();
assertEquals(emptyMap(), map);
map.put(new LegacyComparable("foo"), 1);
map.put(new LegacyComparable("bar"), 2);
assertThat(map.keySet())
.containsExactly(new LegacyComparable("bar"), new LegacyComparable("foo"))
.inOrder();
assertThat(map.values()).containsExactly(2, 1).inOrder();
assertThat(map.comparator()).isNull();
}
public void testTreeMapWithComparator() {
TreeMap<Integer, Integer> map = Maps.newTreeMap(SOME_COMPARATOR);
assertEquals(emptyMap(), map);
assertSame(SOME_COMPARATOR, map.comparator());
}
public void testTreeMapWithInitialMap() {
SortedMap<Integer, Integer> map = Maps.newTreeMap();
map.put(5, 10);
map.put(3, 20);
map.put(1, 30);
TreeMap<Integer, Integer> copy = Maps.newTreeMap(map);
assertEquals(copy, map);
assertSame(copy.comparator(), map.comparator());
}
public enum SomeEnum {
SOME_INSTANCE
}
public void testEnumMap() {
EnumMap<SomeEnum, Integer> map = Maps.newEnumMap(SomeEnum.class);
assertEquals(emptyMap(), map);
map.put(SomeEnum.SOME_INSTANCE, 0);
assertEquals(singletonMap(SomeEnum.SOME_INSTANCE, 0), map);
}
public void testEnumMapNullClass() {
assertThrows(
NullPointerException.class,
() -> Maps.<SomeEnum, Long>newEnumMap((Class<MapsTest.SomeEnum>) null));
}
public void testEnumMapWithInitialEnumMap() {
EnumMap<SomeEnum, Integer> original = Maps.newEnumMap(SomeEnum.class);
original.put(SomeEnum.SOME_INSTANCE, 0);
EnumMap<SomeEnum, Integer> copy = Maps.newEnumMap(original);
assertEquals(original, copy);
}
public void testEnumMapWithInitialEmptyEnumMap() {
EnumMap<SomeEnum, Integer> original = Maps.newEnumMap(SomeEnum.class);
EnumMap<SomeEnum, Integer> copy = Maps.newEnumMap(original);
assertEquals(original, copy);
assertNotSame(original, copy);
}
public void testEnumMapWithInitialMap() {
HashMap<SomeEnum, Integer> original = new HashMap<>();
original.put(SomeEnum.SOME_INSTANCE, 0);
EnumMap<SomeEnum, Integer> copy = Maps.newEnumMap(original);
assertEquals(original, copy);
}
public void testEnumMapWithInitialEmptyMap() {
Map<SomeEnum, Integer> original = new HashMap<>();
assertThrows(IllegalArgumentException.class, () -> Maps.newEnumMap(original));
}
public void testToStringImplWithNullKeys() throws Exception {
Map<@Nullable String, String> hashmap = new HashMap<>();
hashmap.put("foo", "bar");
hashmap.put(null, "baz");
assertEquals(hashmap.toString(), Maps.toStringImpl(hashmap));
}
public void testToStringImplWithNullValues() throws Exception {
Map<String, @Nullable String> hashmap = new HashMap<>();
hashmap.put("foo", "bar");
hashmap.put("baz", null);
assertEquals(hashmap.toString(), Maps.toStringImpl(hashmap));
}
@J2ktIncompatible
@GwtIncompatible // NullPointerTester
public void testNullPointerExceptions() {
new NullPointerTester().testAllPublicStaticMethods(Maps.class);
}
private static final Map<Integer, Integer> EMPTY = emptyMap();
private static final Map<Integer, Integer> SINGLETON = singletonMap(1, 2);
public void testMapDifferenceEmptyEmpty() {
MapDifference<Integer, Integer> diff = Maps.difference(EMPTY, EMPTY);
assertTrue(diff.areEqual());
assertEquals(EMPTY, diff.entriesOnlyOnLeft());
assertEquals(EMPTY, diff.entriesOnlyOnRight());
assertEquals(EMPTY, diff.entriesInCommon());
assertEquals(EMPTY, diff.entriesDiffering());
assertEquals("equal", diff.toString());
}
public void testMapDifferenceEmptySingleton() {
MapDifference<Integer, Integer> diff = Maps.difference(EMPTY, SINGLETON);
assertFalse(diff.areEqual());
assertEquals(EMPTY, diff.entriesOnlyOnLeft());
assertEquals(SINGLETON, diff.entriesOnlyOnRight());
assertEquals(EMPTY, diff.entriesInCommon());
assertEquals(EMPTY, diff.entriesDiffering());
assertEquals("not equal: only on right={1=2}", diff.toString());
}
public void testMapDifferenceSingletonEmpty() {
MapDifference<Integer, Integer> diff = Maps.difference(SINGLETON, EMPTY);
assertFalse(diff.areEqual());
assertEquals(SINGLETON, diff.entriesOnlyOnLeft());
assertEquals(EMPTY, diff.entriesOnlyOnRight());
assertEquals(EMPTY, diff.entriesInCommon());
assertEquals(EMPTY, diff.entriesDiffering());
assertEquals("not equal: only on left={1=2}", diff.toString());
}
public void testMapDifferenceTypical() {
Map<Integer, String> left = ImmutableMap.of(1, "a", 2, "b", 3, "c", 4, "d", 5, "e");
Map<Integer, String> right = ImmutableMap.of(1, "a", 3, "f", 5, "g", 6, "z");
MapDifference<Integer, String> diff1 = Maps.difference(left, right);
assertFalse(diff1.areEqual());
assertEquals(ImmutableMap.of(2, "b", 4, "d"), diff1.entriesOnlyOnLeft());
assertEquals(ImmutableMap.of(6, "z"), diff1.entriesOnlyOnRight());
assertEquals(ImmutableMap.of(1, "a"), diff1.entriesInCommon());
assertEquals(
ImmutableMap.of(
3, ValueDifferenceImpl.create("c", "f"), 5, ValueDifferenceImpl.create("e", "g")),
diff1.entriesDiffering());
assertEquals(
"not equal: only on left={2=b, 4=d}: only on right={6=z}: "
+ "value differences={3=(c, f), 5=(e, g)}",
diff1.toString());
MapDifference<Integer, String> diff2 = Maps.difference(right, left);
assertFalse(diff2.areEqual());
assertEquals(ImmutableMap.of(6, "z"), diff2.entriesOnlyOnLeft());
assertEquals(ImmutableMap.of(2, "b", 4, "d"), diff2.entriesOnlyOnRight());
assertEquals(ImmutableMap.of(1, "a"), diff2.entriesInCommon());
assertEquals(
ImmutableMap.of(
3, ValueDifferenceImpl.create("f", "c"), 5, ValueDifferenceImpl.create("g", "e")),
diff2.entriesDiffering());
assertEquals(
"not equal: only on left={6=z}: only on right={2=b, 4=d}: "
+ "value differences={3=(f, c), 5=(g, e)}",
diff2.toString());
}
public void testMapDifferenceEquals() {
Map<Integer, String> left = ImmutableMap.of(1, "a", 2, "b", 3, "c", 4, "d", 5, "e");
Map<Integer, String> right = ImmutableMap.of(1, "a", 3, "f", 5, "g", 6, "z");
Map<Integer, String> right2 = ImmutableMap.of(1, "a", 3, "h", 5, "g", 6, "z");
MapDifference<Integer, String> original = Maps.difference(left, right);
MapDifference<Integer, String> same = Maps.difference(left, right);
MapDifference<Integer, String> reverse = Maps.difference(right, left);
MapDifference<Integer, String> diff2 = Maps.difference(left, right2);
new EqualsTester()
.addEqualityGroup(original, same)
.addEqualityGroup(reverse)
.addEqualityGroup(diff2)
.testEquals();
}
public void testMapDifferencePredicateTypical() {
Map<Integer, String> left = ImmutableMap.of(1, "a", 2, "b", 3, "c", 4, "d", 5, "e");
Map<Integer, String> right = ImmutableMap.of(1, "A", 3, "F", 5, "G", 6, "Z");
// TODO(kevinb): replace with Ascii.caseInsensitiveEquivalence() when it
// exists
Equivalence<String> caseInsensitiveEquivalence =
Equivalence.equals()
.onResultOf(
new Function<String, String>() {
@Override
public String apply(String input) {
return input.toLowerCase();
}
});
MapDifference<Integer, String> diff1 = Maps.difference(left, right, caseInsensitiveEquivalence);
assertFalse(diff1.areEqual());
assertEquals(ImmutableMap.of(2, "b", 4, "d"), diff1.entriesOnlyOnLeft());
assertEquals(ImmutableMap.of(6, "Z"), diff1.entriesOnlyOnRight());
assertEquals(ImmutableMap.of(1, "a"), diff1.entriesInCommon());
assertEquals(
ImmutableMap.of(
3, ValueDifferenceImpl.create("c", "F"), 5, ValueDifferenceImpl.create("e", "G")),
diff1.entriesDiffering());
assertEquals(
"not equal: only on left={2=b, 4=d}: only on right={6=Z}: "
+ "value differences={3=(c, F), 5=(e, G)}",
diff1.toString());
MapDifference<Integer, String> diff2 = Maps.difference(right, left, caseInsensitiveEquivalence);
assertFalse(diff2.areEqual());
assertEquals(ImmutableMap.of(6, "Z"), diff2.entriesOnlyOnLeft());
assertEquals(ImmutableMap.of(2, "b", 4, "d"), diff2.entriesOnlyOnRight());
assertEquals(ImmutableMap.of(1, "A"), diff2.entriesInCommon());
assertEquals(
ImmutableMap.of(
3, ValueDifferenceImpl.create("F", "c"), 5, ValueDifferenceImpl.create("G", "e")),
diff2.entriesDiffering());
assertEquals(
"not equal: only on left={6=Z}: only on right={2=b, 4=d}: "
+ "value differences={3=(F, c), 5=(G, e)}",
diff2.toString());
}
private static final SortedMap<Integer, Integer> SORTED_EMPTY = Maps.newTreeMap();
private static final ImmutableSortedMap<Integer, Integer> SORTED_SINGLETON =
ImmutableSortedMap.of(1, 2);
public void testMapDifferenceOfSortedMapIsSorted() {
Map<Integer, Integer> map = SORTED_SINGLETON;
MapDifference<Integer, Integer> difference = Maps.difference(map, EMPTY);
assertTrue(difference instanceof SortedMapDifference);
}
public void testSortedMapDifferenceEmptyEmpty() {
SortedMapDifference<Integer, Integer> diff = Maps.difference(SORTED_EMPTY, SORTED_EMPTY);
assertTrue(diff.areEqual());
assertEquals(SORTED_EMPTY, diff.entriesOnlyOnLeft());
assertEquals(SORTED_EMPTY, diff.entriesOnlyOnRight());
assertEquals(SORTED_EMPTY, diff.entriesInCommon());
assertEquals(SORTED_EMPTY, diff.entriesDiffering());
assertEquals("equal", diff.toString());
}
public void testSortedMapDifferenceEmptySingleton() {
SortedMapDifference<Integer, Integer> diff = Maps.difference(SORTED_EMPTY, SORTED_SINGLETON);
assertFalse(diff.areEqual());
assertEquals(SORTED_EMPTY, diff.entriesOnlyOnLeft());
assertEquals(SORTED_SINGLETON, diff.entriesOnlyOnRight());
assertEquals(SORTED_EMPTY, diff.entriesInCommon());
assertEquals(SORTED_EMPTY, diff.entriesDiffering());
assertEquals("not equal: only on right={1=2}", diff.toString());
}
public void testSortedMapDifferenceSingletonEmpty() {
SortedMapDifference<Integer, Integer> diff = Maps.difference(SORTED_SINGLETON, SORTED_EMPTY);
assertFalse(diff.areEqual());
assertEquals(SORTED_SINGLETON, diff.entriesOnlyOnLeft());
assertEquals(SORTED_EMPTY, diff.entriesOnlyOnRight());
assertEquals(SORTED_EMPTY, diff.entriesInCommon());
assertEquals(SORTED_EMPTY, diff.entriesDiffering());
assertEquals("not equal: only on left={1=2}", diff.toString());
}
public void testSortedMapDifferenceTypical() {
SortedMap<Integer, String> left =
ImmutableSortedMap.<Integer, String>reverseOrder()
.put(1, "a")
.put(2, "b")
.put(3, "c")
.put(4, "d")
.put(5, "e")
.build();
SortedMap<Integer, String> right = ImmutableSortedMap.of(1, "a", 3, "f", 5, "g", 6, "z");
SortedMapDifference<Integer, String> diff1 = Maps.difference(left, right);
assertFalse(diff1.areEqual());
assertThat(diff1.entriesOnlyOnLeft().entrySet())
.containsExactly(immutableEntry(4, "d"), immutableEntry(2, "b"))
.inOrder();
assertThat(diff1.entriesOnlyOnRight().entrySet()).contains(immutableEntry(6, "z"));
assertThat(diff1.entriesInCommon().entrySet()).contains(immutableEntry(1, "a"));
assertThat(diff1.entriesDiffering().entrySet())
.containsExactly(
immutableEntry(5, ValueDifferenceImpl.create("e", "g")),
immutableEntry(3, ValueDifferenceImpl.create("c", "f")))
.inOrder();
assertEquals(
"not equal: only on left={4=d, 2=b}: only on right={6=z}: "
+ "value differences={5=(e, g), 3=(c, f)}",
diff1.toString());
SortedMapDifference<Integer, String> diff2 = Maps.difference(right, left);
assertFalse(diff2.areEqual());
assertThat(diff2.entriesOnlyOnLeft().entrySet()).contains(immutableEntry(6, "z"));
assertThat(diff2.entriesOnlyOnRight().entrySet())
.containsExactly(immutableEntry(2, "b"), immutableEntry(4, "d"))
.inOrder();
assertThat(diff1.entriesInCommon().entrySet()).contains(immutableEntry(1, "a"));
assertEquals(
ImmutableMap.of(
3, ValueDifferenceImpl.create("f", "c"),
5, ValueDifferenceImpl.create("g", "e")),
diff2.entriesDiffering());
assertEquals(
"not equal: only on left={6=z}: only on right={2=b, 4=d}: "
+ "value differences={3=(f, c), 5=(g, e)}",
diff2.toString());
}
public void testSortedMapDifferenceImmutable() {
SortedMap<Integer, String> left =
Maps.newTreeMap(ImmutableSortedMap.of(1, "a", 2, "b", 3, "c", 4, "d", 5, "e"));
SortedMap<Integer, String> right =
Maps.newTreeMap(ImmutableSortedMap.of(1, "a", 3, "f", 5, "g", 6, "z"));
SortedMapDifference<Integer, String> diff1 = Maps.difference(left, right);
left.put(6, "z");
assertFalse(diff1.areEqual());
assertThat(diff1.entriesOnlyOnLeft().entrySet())
.containsExactly(immutableEntry(2, "b"), immutableEntry(4, "d"))
.inOrder();
assertThat(diff1.entriesOnlyOnRight().entrySet()).contains(immutableEntry(6, "z"));
assertThat(diff1.entriesInCommon().entrySet()).contains(immutableEntry(1, "a"));
assertThat(diff1.entriesDiffering().entrySet())
.containsExactly(
immutableEntry(3, ValueDifferenceImpl.create("c", "f")),
immutableEntry(5, ValueDifferenceImpl.create("e", "g")))
.inOrder();
assertThrows(UnsupportedOperationException.class, () -> diff1.entriesInCommon().put(7, "x"));
assertThrows(UnsupportedOperationException.class, () -> diff1.entriesOnlyOnLeft().put(7, "x"));
assertThrows(UnsupportedOperationException.class, () -> diff1.entriesOnlyOnRight().put(7, "x"));
}
public void testSortedMapDifferenceEquals() {
SortedMap<Integer, String> left = ImmutableSortedMap.of(1, "a", 2, "b", 3, "c", 4, "d", 5, "e");
SortedMap<Integer, String> right = ImmutableSortedMap.of(1, "a", 3, "f", 5, "g", 6, "z");
SortedMap<Integer, String> right2 = ImmutableSortedMap.of(1, "a", 3, "h", 5, "g", 6, "z");
SortedMapDifference<Integer, String> original = Maps.difference(left, right);
SortedMapDifference<Integer, String> same = Maps.difference(left, right);
SortedMapDifference<Integer, String> reverse = Maps.difference(right, left);
SortedMapDifference<Integer, String> diff2 = Maps.difference(left, right2);
new EqualsTester()
.addEqualityGroup(original, same)
.addEqualityGroup(reverse)
.addEqualityGroup(diff2)
.testEquals();
}
private static final Function<String, Integer> LENGTH_FUNCTION =
new Function<String, Integer>() {
@Override
public Integer apply(String input) {
return input.length();
}
};
public void testAsMap() {
Set<String> strings = ImmutableSet.of("one", "two", "three");
Map<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5), map);
assertEquals(Integer.valueOf(5), map.get("three"));
assertThat(map.get("five")).isNull();
assertThat(map.entrySet())
.containsExactly(mapEntry("one", 3), mapEntry("two", 3), mapEntry("three", 5))
.inOrder();
}
public void testAsMapReadsThrough() {
Set<String> strings = new LinkedHashSet<>();
Collections.addAll(strings, "one", "two", "three");
Map<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5), map);
assertThat(map.get("four")).isNull();
strings.add("four");
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5, "four", 4), map);
assertEquals(Integer.valueOf(4), map.get("four"));
}
public void testAsMapWritesThrough() {
Set<String> strings = new LinkedHashSet<>();
Collections.addAll(strings, "one", "two", "three");
Map<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5), map);
assertEquals(Integer.valueOf(3), map.remove("two"));
assertThat(strings).containsExactly("one", "three").inOrder();
}
public void testAsMapEmpty() {
Set<String> strings = ImmutableSet.of();
Map<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertThat(map.entrySet()).isEmpty();
assertTrue(map.isEmpty());
assertThat(map.get("five")).isNull();
}
private static class NonNavigableSortedSet extends ForwardingSortedSet<String> {
private final SortedSet<String> delegate = Sets.newTreeSet();
@Override
protected SortedSet<String> delegate() {
return delegate;
}
}
public void testAsMapSorted() {
SortedSet<String> strings = new NonNavigableSortedSet();
Collections.addAll(strings, "one", "two", "three");
SortedMap<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5), map);
assertEquals(Integer.valueOf(5), map.get("three"));
assertThat(map.get("five")).isNull();
assertThat(map.entrySet())
.containsExactly(mapEntry("one", 3), mapEntry("three", 5), mapEntry("two", 3))
.inOrder();
assertThat(map.tailMap("onea").entrySet())
.containsExactly(mapEntry("three", 5), mapEntry("two", 3))
.inOrder();
assertThat(map.subMap("one", "two").entrySet())
.containsExactly(mapEntry("one", 3), mapEntry("three", 5))
.inOrder();
}
public void testAsMapSortedReadsThrough() {
SortedSet<String> strings = new NonNavigableSortedSet();
Collections.addAll(strings, "one", "two", "three");
SortedMap<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertThat(map.comparator()).isNull();
assertEquals(ImmutableSortedMap.of("one", 3, "two", 3, "three", 5), map);
assertThat(map.get("four")).isNull();
strings.add("four");
assertEquals(ImmutableSortedMap.of("one", 3, "two", 3, "three", 5, "four", 4), map);
assertEquals(Integer.valueOf(4), map.get("four"));
SortedMap<String, Integer> headMap = map.headMap("two");
assertEquals(ImmutableSortedMap.of("four", 4, "one", 3, "three", 5), headMap);
strings.add("five");
strings.remove("one");
assertEquals(ImmutableSortedMap.of("five", 4, "four", 4, "three", 5), headMap);
assertThat(map.entrySet())
.containsExactly(
mapEntry("five", 4), mapEntry("four", 4), mapEntry("three", 5), mapEntry("two", 3))
.inOrder();
}
public void testAsMapSortedWritesThrough() {
SortedSet<String> strings = new NonNavigableSortedSet();
Collections.addAll(strings, "one", "two", "three");
SortedMap<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5), map);
assertEquals(Integer.valueOf(3), map.remove("two"));
assertThat(strings).containsExactly("one", "three").inOrder();
}
public void testAsMapSortedSubViewKeySetsDoNotSupportAdd() {
SortedMap<String, Integer> map = Maps.asMap(new NonNavigableSortedSet(), LENGTH_FUNCTION);
assertThrows(UnsupportedOperationException.class, () -> map.subMap("a", "z").keySet().add("a"));
assertThrows(UnsupportedOperationException.class, () -> map.tailMap("a").keySet().add("a"));
assertThrows(UnsupportedOperationException.class, () -> map.headMap("r").keySet().add("a"));
assertThrows(
UnsupportedOperationException.class, () -> map.headMap("r").tailMap("m").keySet().add("a"));
}
public void testAsMapSortedEmpty() {
SortedSet<String> strings = new NonNavigableSortedSet();
SortedMap<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertThat(map.entrySet()).isEmpty();
assertTrue(map.isEmpty());
assertThat(map.get("five")).isNull();
}
@GwtIncompatible // NavigableMap
public void testAsMapNavigable() {
NavigableSet<String> strings = Sets.newTreeSet(asList("one", "two", "three"));
NavigableMap<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5), map);
assertEquals(Integer.valueOf(5), map.get("three"));
assertThat(map.get("five")).isNull();
assertThat(map.entrySet())
.containsExactly(mapEntry("one", 3), mapEntry("three", 5), mapEntry("two", 3))
.inOrder();
assertThat(map.tailMap("onea").entrySet())
.containsExactly(mapEntry("three", 5), mapEntry("two", 3))
.inOrder();
assertThat(map.subMap("one", "two").entrySet())
.containsExactly(mapEntry("one", 3), mapEntry("three", 5))
.inOrder();
assertEquals(ImmutableSortedMap.of("two", 3, "three", 5), map.tailMap("three", true));
assertEquals(ImmutableSortedMap.of("one", 3, "three", 5), map.headMap("two", false));
assertEquals(ImmutableSortedMap.of("three", 5), map.subMap("one", false, "tr", true));
assertEquals("three", map.higherKey("one"));
assertEquals("three", map.higherKey("r"));
assertEquals("three", map.ceilingKey("r"));
assertEquals("one", map.ceilingKey("one"));
assertEquals(mapEntry("three", 5), map.higherEntry("one"));
assertEquals(mapEntry("one", 3), map.ceilingEntry("one"));
assertEquals("one", map.lowerKey("three"));
assertEquals("one", map.lowerKey("r"));
assertEquals("one", map.floorKey("r"));
assertEquals("three", map.floorKey("three"));
assertThat(map.descendingMap().entrySet())
.containsExactly(mapEntry("two", 3), mapEntry("three", 5), mapEntry("one", 3))
.inOrder();
assertEquals(map.headMap("three", true), map.descendingMap().tailMap("three", true));
assertThat(map.tailMap("three", false).entrySet()).contains(mapEntry("two", 3));
assertThat(map.tailMap("three", true).lowerEntry("three")).isNull();
assertThat(map.headMap("two", false).values()).containsExactly(3, 5).inOrder();
assertThat(map.headMap("two", false).descendingMap().values()).containsExactly(5, 3).inOrder();
assertThat(map.descendingKeySet()).containsExactly("two", "three", "one").inOrder();
assertEquals(mapEntry("one", 3), map.pollFirstEntry());
assertEquals(mapEntry("two", 3), map.pollLastEntry());
assertEquals(1, map.size());
}
@GwtIncompatible // NavigableMap
public void testAsMapNavigableReadsThrough() {
NavigableSet<String> strings = Sets.newTreeSet();
Collections.addAll(strings, "one", "two", "three");
NavigableMap<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertThat(map.comparator()).isNull();
assertEquals(ImmutableSortedMap.of("one", 3, "two", 3, "three", 5), map);
assertThat(map.get("four")).isNull();
strings.add("four");
assertEquals(ImmutableSortedMap.of("one", 3, "two", 3, "three", 5, "four", 4), map);
assertEquals(Integer.valueOf(4), map.get("four"));
SortedMap<String, Integer> headMap = map.headMap("two");
assertEquals(ImmutableSortedMap.of("four", 4, "one", 3, "three", 5), headMap);
strings.add("five");
strings.remove("one");
assertEquals(ImmutableSortedMap.of("five", 4, "four", 4, "three", 5), headMap);
assertThat(map.entrySet())
.containsExactly(
mapEntry("five", 4), mapEntry("four", 4), mapEntry("three", 5), mapEntry("two", 3))
.inOrder();
NavigableMap<String, Integer> tailMap = map.tailMap("s", true);
NavigableMap<String, Integer> subMap = map.subMap("a", true, "t", false);
strings.add("six");
strings.remove("two");
assertThat(tailMap.entrySet())
.containsExactly(mapEntry("six", 3), mapEntry("three", 5))
.inOrder();
assertThat(subMap.entrySet())
.containsExactly(mapEntry("five", 4), mapEntry("four", 4), mapEntry("six", 3))
.inOrder();
}
@GwtIncompatible // NavigableMap
public void testAsMapNavigableWritesThrough() {
NavigableSet<String> strings = Sets.newTreeSet();
Collections.addAll(strings, "one", "two", "three");
NavigableMap<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5), map);
assertEquals(Integer.valueOf(3), map.remove("two"));
assertThat(strings).containsExactly("one", "three").inOrder();
assertEquals(mapEntry("three", 5), map.subMap("one", false, "zzz", true).pollLastEntry());
assertThat(strings).contains("one");
}
@GwtIncompatible // NavigableMap
public void testAsMapNavigableSubViewKeySetsDoNotSupportAdd() {
NavigableMap<String, Integer> map = Maps.asMap(Sets.<String>newTreeSet(), LENGTH_FUNCTION);
assertThrows(UnsupportedOperationException.class, () -> map.descendingKeySet().add("a"));
assertThrows(
UnsupportedOperationException.class,
() -> map.subMap("a", true, "z", false).keySet().add("a"));
assertThrows(
UnsupportedOperationException.class, () -> map.tailMap("a", true).keySet().add("a"));
assertThrows(
UnsupportedOperationException.class, () -> map.headMap("r", true).keySet().add("a"));
assertThrows(
UnsupportedOperationException.class,
() -> map.headMap("r", false).tailMap("m", true).keySet().add("a"));
}
@GwtIncompatible // NavigableMap
public void testAsMapNavigableEmpty() {
NavigableSet<String> strings = ImmutableSortedSet.of();
NavigableMap<String, Integer> map = Maps.asMap(strings, LENGTH_FUNCTION);
assertThat(map.entrySet()).isEmpty();
assertTrue(map.isEmpty());
assertThat(map.get("five")).isNull();
}
public void testToMap() {
Iterable<String> strings = ImmutableList.of("one", "two", "three");
ImmutableMap<String, Integer> map = Maps.toMap(strings, LENGTH_FUNCTION);
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5), map);
assertThat(map.entrySet())
.containsExactly(mapEntry("one", 3), mapEntry("two", 3), mapEntry("three", 5))
.inOrder();
}
public void testToMapIterator() {
Iterator<String> strings = ImmutableList.of("one", "two", "three").iterator();
ImmutableMap<String, Integer> map = Maps.toMap(strings, LENGTH_FUNCTION);
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5), map);
assertThat(map.entrySet())
.containsExactly(mapEntry("one", 3), mapEntry("two", 3), mapEntry("three", 5))
.inOrder();
}
public void testToMapWithDuplicateKeys() {
Iterable<String> strings = ImmutableList.of("one", "two", "three", "two", "one");
ImmutableMap<String, Integer> map = Maps.toMap(strings, LENGTH_FUNCTION);
assertEquals(ImmutableMap.of("one", 3, "two", 3, "three", 5), map);
assertThat(map.entrySet())
.containsExactly(mapEntry("one", 3), mapEntry("two", 3), mapEntry("three", 5))
.inOrder();
}
public void testToMapWithNullKeys() {
Iterable<@Nullable String> strings = asList("one", null, "three");
assertThrows(
NullPointerException.class,
() -> Maps.toMap((Iterable<String>) strings, Functions.constant("foo")));
}
public void testToMapWithNullValues() {
Iterable<String> strings = ImmutableList.of("one", "two", "three");
assertThrows(NullPointerException.class, () -> Maps.toMap(strings, Functions.constant(null)));
}
private static final ImmutableBiMap<Integer, String> INT_TO_STRING_MAP =
new ImmutableBiMap.Builder<Integer, String>()
.put(1, "one")
.put(2, "two")
.put(3, "three")
.build();
public void testUniqueIndexCollection() {
ImmutableMap<Integer, String> outputMap =
Maps.uniqueIndex(INT_TO_STRING_MAP.values(), Functions.forMap(INT_TO_STRING_MAP.inverse()));
assertEquals(INT_TO_STRING_MAP, outputMap);
}
public void testUniqueIndexIterable() {
ImmutableMap<Integer, String> outputMap =
Maps.uniqueIndex(
new Iterable<String>() {
@Override
public Iterator<String> iterator() {
return INT_TO_STRING_MAP.values().iterator();
}
},
Functions.forMap(INT_TO_STRING_MAP.inverse()));
assertEquals(INT_TO_STRING_MAP, outputMap);
}
public void testUniqueIndexIterator() {
ImmutableMap<Integer, String> outputMap =
Maps.uniqueIndex(
INT_TO_STRING_MAP.values().iterator(), Functions.forMap(INT_TO_STRING_MAP.inverse()));
assertEquals(INT_TO_STRING_MAP, outputMap);
}
/** Can't create the map if more than one value maps to the same key. */
public void testUniqueIndexDuplicates() {
IllegalArgumentException expected =
assertThrows(
IllegalArgumentException.class,
() -> Maps.uniqueIndex(ImmutableSet.of("one", "uno"), Functions.constant(1)));
assertThat(expected).hasMessageThat().contains("Multimaps.index");
}
/** Null values are not allowed. */
public void testUniqueIndexNullValue() {
List<@Nullable String> listWithNull = Lists.newArrayList((String) null);
assertThrows(
NullPointerException.class,
() -> Maps.uniqueIndex((List<String>) listWithNull, Functions.constant(1)));
}
/** Null keys aren't allowed either. */
public void testUniqueIndexNullKey() {
List<String> oneStringList = Lists.newArrayList("foo");
assertThrows(
NullPointerException.class,
() -> Maps.uniqueIndex(oneStringList, Functions.constant(null)));
}
@J2ktIncompatible
@GwtIncompatible // Maps.fromProperties
public void testFromProperties() throws IOException {
Properties testProp = new Properties();
Map<String, String> result = Maps.fromProperties(testProp);
assertTrue(result.isEmpty());
testProp.setProperty("first", "true");
result = Maps.fromProperties(testProp);
assertEquals("true", result.get("first"));
assertEquals(1, result.size());
testProp.setProperty("second", "null");
result = Maps.fromProperties(testProp);
assertEquals("true", result.get("first"));
assertEquals("null", result.get("second"));
assertEquals(2, result.size());
// Now test values loaded from a stream.
String props = "test\n second = 2\n Third item : a short phrase ";
testProp.load(new StringReader(props));
result = Maps.fromProperties(testProp);
assertEquals(4, result.size());
assertEquals("true", result.get("first"));
assertEquals("", result.get("test"));
assertEquals("2", result.get("second"));
assertEquals("item : a short phrase ", result.get("Third"));
assertFalse(result.containsKey("not here"));
// Test loading system properties
result = Maps.fromProperties(System.getProperties());
assertTrue(result.containsKey("java.version"));
// Test that defaults work, too.
testProp = new Properties(System.getProperties());
String override = "test\njava.version : hidden";
testProp.load(new StringReader(override));
result = Maps.fromProperties(testProp);
assertThat(result.size()).isGreaterThan(2);
assertEquals("", result.get("test"));
assertEquals("hidden", result.get("java.version"));
assertNotSame(System.getProperty("java.version"), result.get("java.version"));
}
@J2ktIncompatible
@GwtIncompatible // Maps.fromProperties
public void testFromPropertiesNullKey() {
Properties properties =
new Properties() {
@Override
public Enumeration<?> propertyNames() {
return Iterators.asEnumeration(asList(null, "first", "second").iterator());
}
};
properties.setProperty("first", "true");
properties.setProperty("second", "null");
assertThrows(NullPointerException.class, () -> Maps.fromProperties(properties));
}
@J2ktIncompatible
@GwtIncompatible // Maps.fromProperties
public void testFromPropertiesNonStringKeys() {
Properties properties =
new Properties() {
@Override
public Enumeration<?> propertyNames() {
return Iterators.asEnumeration(
Arrays.<Object>asList(Integer.valueOf(123), "first").iterator());
}
};
assertThrows(ClassCastException.class, () -> Maps.fromProperties(properties));
}
public void testAsConverter_nominal() throws Exception {
ImmutableBiMap<String, Integer> biMap =
ImmutableBiMap.of(
"one", 1,
"two", 2);
Converter<String, Integer> converter = Maps.asConverter(biMap);
for (Entry<String, Integer> entry : biMap.entrySet()) {
assertSame(entry.getValue(), converter.convert(entry.getKey()));
}
}
public void testAsConverter_inverse() throws Exception {
ImmutableBiMap<String, Integer> biMap =
ImmutableBiMap.of(
"one", 1,
"two", 2);
Converter<String, Integer> converter = Maps.asConverter(biMap);
for (Entry<String, Integer> entry : biMap.entrySet()) {
assertSame(entry.getKey(), converter.reverse().convert(entry.getValue()));
}
}
public void testAsConverter_noMapping() throws Exception {
ImmutableBiMap<String, Integer> biMap =
ImmutableBiMap.of(
"one", 1,
"two", 2);
Converter<String, Integer> converter = Maps.asConverter(biMap);
assertThrows(IllegalArgumentException.class, () -> converter.convert("three"));
}
public void testAsConverter_nullConversions() throws Exception {
ImmutableBiMap<String, Integer> biMap =
ImmutableBiMap.of(
"one", 1,
"two", 2);
Converter<String, Integer> converter = Maps.asConverter(biMap);
assertThat(converter.convert(null)).isNull();
assertThat(converter.reverse().convert(null)).isNull();
}
public void testAsConverter_isAView() throws Exception {
BiMap<String, Integer> biMap = HashBiMap.create();
biMap.put("one", 1);
biMap.put("two", 2);
Converter<String, Integer> converter = Maps.asConverter(biMap);
assertEquals((Integer) 1, converter.convert("one"));
assertEquals((Integer) 2, converter.convert("two"));
assertThrows(IllegalArgumentException.class, () -> converter.convert("three"));
biMap.put("three", 3);
assertEquals((Integer) 1, converter.convert("one"));
assertEquals((Integer) 2, converter.convert("two"));
assertEquals((Integer) 3, converter.convert("three"));
}
public void testAsConverter_withNullMapping() throws Exception {
BiMap<String, @Nullable Integer> biMap = HashBiMap.create();
biMap.put("one", 1);
biMap.put("two", 2);
biMap.put("three", null);
assertThrows(
IllegalArgumentException.class,
() -> Maps.asConverter((BiMap<String, Integer>) biMap).convert("three"));
}
public void testAsConverter_toString() {
ImmutableBiMap<String, Integer> biMap =
ImmutableBiMap.of(
"one", 1,
"two", 2);
Converter<String, Integer> converter = Maps.asConverter(biMap);
assertEquals("Maps.asConverter({one=1, two=2})", converter.toString());
}
public void testAsConverter_serialization() {
ImmutableBiMap<String, Integer> biMap =
ImmutableBiMap.of(
"one", 1,
"two", 2);
Converter<String, Integer> converter = Maps.asConverter(biMap);
SerializableTester.reserializeAndAssert(converter);
}
public void testUnmodifiableBiMap() {
BiMap<Integer, String> mod = HashBiMap.create();
mod.put(1, "one");
mod.put(2, "two");
mod.put(3, "three");
BiMap<Number, String> unmod = Maps.<Number, String>unmodifiableBiMap(mod);
/* No aliasing on inverse operations. */
assertSame(unmod.inverse(), unmod.inverse());
assertSame(unmod, unmod.inverse().inverse());
/* Unmodifiable is a view. */
mod.put(4, "four");
assertEquals(true, unmod.get(4).equals("four"));
assertEquals(true, unmod.inverse().get("four").equals(4));
/* UnsupportedOperationException on direct modifications. */
assertThrows(UnsupportedOperationException.class, () -> unmod.put(4, "four"));
assertThrows(UnsupportedOperationException.class, () -> unmod.forcePut(4, "four"));
assertThrows(UnsupportedOperationException.class, () -> unmod.putAll(singletonMap(4, "four")));
/* UnsupportedOperationException on indirect modifications. */
BiMap<String, Number> inverse = unmod.inverse();
assertThrows(UnsupportedOperationException.class, () -> inverse.put("four", 4));
assertThrows(UnsupportedOperationException.class, () -> inverse.forcePut("four", 4));
assertThrows(
UnsupportedOperationException.class, () -> inverse.putAll(singletonMap("four", 4)));
Set<String> values = unmod.values();
assertThrows(UnsupportedOperationException.class, () -> values.remove("four"));
Set<Entry<Number, String>> entries = unmod.entrySet();
Entry<Number, String> entry = entries.iterator().next();
assertThrows(UnsupportedOperationException.class, () -> entry.setValue("four"));
@SuppressWarnings("unchecked")
Entry<Integer, String> entry2 = (Entry<Integer, String>) entries.toArray()[0];
assertThrows(UnsupportedOperationException.class, () -> entry2.setValue("four"));
}
public void testImmutableEntry() {
Entry<String, Integer> e = immutableEntry("foo", 1);
assertEquals("foo", e.getKey());
assertEquals(1, (int) e.getValue());
assertThrows(UnsupportedOperationException.class, () -> e.setValue(2));
assertEquals("foo=1", e.toString());
assertEquals(101575, e.hashCode());
}
public void testImmutableEntryNull() {
Entry<@Nullable String, @Nullable Integer> e = immutableEntry((String) null, (Integer) null);
assertThat(e.getKey()).isNull();
assertThat(e.getValue()).isNull();
assertThrows(UnsupportedOperationException.class, () -> e.setValue(null));
assertEquals("null=null", e.toString());
assertEquals(0, e.hashCode());
}
/** See {@link SynchronizedBiMapTest} for more tests. */
@J2ktIncompatible // Synchronized
public void testSynchronizedBiMap() {
BiMap<String, Integer> bimap = HashBiMap.create();
bimap.put("one", 1);
BiMap<String, Integer> sync = Maps.synchronizedBiMap(bimap);
bimap.put("two", 2);
sync.put("three", 3);
assertEquals(ImmutableSet.of(1, 2, 3), bimap.inverse().keySet());
assertEquals(ImmutableSet.of(1, 2, 3), sync.inverse().keySet());
}
private static final Function<Integer, Double> SQRT_FUNCTION = in -> Math.sqrt(in);
public void testTransformValues() {
Map<String, Integer> map = ImmutableMap.of("a", 4, "b", 9);
Map<String, Double> transformed = transformValues(map, SQRT_FUNCTION);
assertEquals(ImmutableMap.of("a", 2.0, "b", 3.0), transformed);
}
public void testTransformEntries() {
Map<String, String> map = ImmutableMap.of("a", "4", "b", "9");
EntryTransformer<String, String, String> concat =
new EntryTransformer<String, String, String>() {
@Override
public String transformEntry(String key, String value) {
return key + value;
}
};
Map<String, String> transformed = transformEntries(map, concat);
assertEquals(ImmutableMap.of("a", "a4", "b", "b9"), transformed);
}
@SuppressWarnings("unused")
public void testTransformEntriesGenerics() {
Map<Object, Object> map1 = ImmutableMap.<Object, Object>of(1, 2);
Map<Object, Number> map2 = ImmutableMap.<Object, Number>of(1, 2);
Map<Object, Integer> map3 = ImmutableMap.<Object, Integer>of(1, 2);
Map<Number, Object> map4 = ImmutableMap.<Number, Object>of(1, 2);
Map<Number, Number> map5 = ImmutableMap.<Number, Number>of(1, 2);
Map<Number, Integer> map6 = ImmutableMap.<Number, Integer>of(1, 2);
Map<Integer, Object> map7 = ImmutableMap.<Integer, Object>of(1, 2);
Map<Integer, Number> map8 = ImmutableMap.<Integer, Number>of(1, 2);
Map<Integer, Integer> map9 = ImmutableMap.<Integer, Integer>of(1, 2);
Map<? extends Number, ? extends Number> map0 = ImmutableMap.of(1, 2);
EntryTransformer<Number, Number, Double> transformer =
new EntryTransformer<Number, Number, Double>() {
@Override
public Double transformEntry(Number key, Number value) {
return key.doubleValue() + value.doubleValue();
}
};
Map<Object, Double> objectKeyed;
Map<Number, Double> numberKeyed;
Map<Integer, Double> integerKeyed;
numberKeyed = transformEntries(map5, transformer);
numberKeyed = transformEntries(map6, transformer);
integerKeyed = transformEntries(map8, transformer);
integerKeyed = transformEntries(map9, transformer);
Map<? extends Number, Double> wildcarded = transformEntries(map0, transformer);
// Can't loosen the key type:
// objectKeyed = transformEntries(map5, transformer);
// objectKeyed = transformEntries(map6, transformer);
// objectKeyed = transformEntries(map8, transformer);
// objectKeyed = transformEntries(map9, transformer);
// numberKeyed = transformEntries(map8, transformer);
// numberKeyed = transformEntries(map9, transformer);
// Can't loosen the value type:
// Map<Number, Number> looseValued1 = transformEntries(map5, transformer);
// Map<Number, Number> looseValued2 = transformEntries(map6, transformer);
// Map<Integer, Number> looseValued3 = transformEntries(map8, transformer);
// Map<Integer, Number> looseValued4 = transformEntries(map9, transformer);
// Can't call with too loose a key:
// transformEntries(map1, transformer);
// transformEntries(map2, transformer);
// transformEntries(map3, transformer);
// Can't call with too loose a value:
// transformEntries(map1, transformer);
// transformEntries(map4, transformer);
// transformEntries(map7, transformer);
}
public void testTransformEntriesExample() {
Map<String, Boolean> options = ImmutableMap.of("verbose", true, "sort", false);
EntryTransformer<String, Boolean, String> flagPrefixer =
new EntryTransformer<String, Boolean, String>() {
@Override
public String transformEntry(String key, Boolean value) {
return value ? key : "no" + key;
}
};
Map<String, String> transformed = transformEntries(options, flagPrefixer);
assertEquals("{verbose=verbose, sort=nosort}", transformed.toString());
}
// Logically this would accept a NavigableMap, but that won't work under GWT.
private static <K, V> SortedMap<K, V> sortedNotNavigable(SortedMap<K, V> map) {
return new ForwardingSortedMap<K, V>() {
@Override
protected SortedMap<K, V> delegate() {
return map;
}
};
}
public void testSortedMapTransformValues() {
SortedMap<String, Integer> map = sortedNotNavigable(ImmutableSortedMap.of("a", 4, "b", 9));
SortedMap<String, Double> transformed = transformValues(map, SQRT_FUNCTION);
/*
* We'd like to sanity check that we didn't get a NavigableMap out, but we
* can't easily do so while maintaining GWT compatibility.
*/
assertEquals(ImmutableSortedMap.of("a", 2.0, "b", 3.0), transformed);
}
@GwtIncompatible // NavigableMap
public void testNavigableMapTransformValues() {
NavigableMap<String, Integer> map = ImmutableSortedMap.of("a", 4, "b", 9);
NavigableMap<String, Double> transformed = transformValues(map, SQRT_FUNCTION);
assertEquals(ImmutableSortedMap.of("a", 2.0, "b", 3.0), transformed);
}
public void testSortedMapTransformEntries() {
SortedMap<String, String> map = sortedNotNavigable(ImmutableSortedMap.of("a", "4", "b", "9"));
EntryTransformer<String, String, String> concat =
new EntryTransformer<String, String, String>() {
@Override
public String transformEntry(String key, String value) {
return key + value;
}
};
SortedMap<String, String> transformed = transformEntries(map, concat);
/*
* We'd like to sanity check that we didn't get a NavigableMap out, but we
* can't easily do so while maintaining GWT compatibility.
*/
assertEquals(ImmutableSortedMap.of("a", "a4", "b", "b9"), transformed);
}
@GwtIncompatible // NavigableMap
public void testNavigableMapTransformEntries() {
NavigableMap<String, String> map = ImmutableSortedMap.of("a", "4", "b", "9");
EntryTransformer<String, String, String> concat =
new EntryTransformer<String, String, String>() {
@Override
public String transformEntry(String key, String value) {
return key + value;
}
};
NavigableMap<String, String> transformed = transformEntries(map, concat);
assertEquals(ImmutableSortedMap.of("a", "a4", "b", "b9"), transformed);
}
@GwtIncompatible // NavigableMap
public void testUnmodifiableNavigableMap() {
TreeMap<Integer, String> mod = Maps.newTreeMap();
mod.put(1, "one");
mod.put(2, "two");
mod.put(3, "three");
NavigableMap<Integer, String> unmod = unmodifiableNavigableMap(mod);
/* unmod is a view. */
mod.put(4, "four");
assertEquals("four", unmod.get(4));
assertEquals("four", unmod.descendingMap().get(4));
ensureNotDirectlyModifiable(unmod);
ensureNotDirectlyModifiable(unmod.descendingMap());
ensureNotDirectlyModifiable(unmod.headMap(2, true));
ensureNotDirectlyModifiable(unmod.subMap(1, true, 3, true));
ensureNotDirectlyModifiable(unmod.tailMap(2, true));
Collection<String> values = unmod.values();
assertThrows(UnsupportedOperationException.class, () -> values.add("4"));
assertThrows(UnsupportedOperationException.class, () -> values.remove("four"));
assertThrows(UnsupportedOperationException.class, () -> values.removeAll(singleton("four")));
assertThrows(UnsupportedOperationException.class, () -> values.retainAll(singleton("four")));
assertThrows(
UnsupportedOperationException.class,
() -> {
Iterator<String> iterator = values.iterator();
iterator.next();
iterator.remove();
});
Set<Entry<Integer, String>> entries = unmod.entrySet();
assertThrows(
UnsupportedOperationException.class,
() -> {
Iterator<Entry<Integer, String>> iterator = entries.iterator();
iterator.next();
iterator.remove();
});
{
Entry<Integer, String> entry = entries.iterator().next();
assertThrows(UnsupportedOperationException.class, () -> entry.setValue("four"));
}
{
Entry<Integer, String> entry = unmod.lowerEntry(1);
assertThat(entry).isNull();
}
{
Entry<Integer, String> entry = unmod.floorEntry(2);
assertThrows(UnsupportedOperationException.class, () -> entry.setValue("four"));
}
{
Entry<Integer, String> entry = unmod.ceilingEntry(2);
assertThrows(UnsupportedOperationException.class, () -> entry.setValue("four"));
}
{
Entry<Integer, String> entry = unmod.lowerEntry(2);
assertThrows(UnsupportedOperationException.class, () -> entry.setValue("four"));
}
{
Entry<Integer, String> entry = unmod.higherEntry(2);
assertThrows(UnsupportedOperationException.class, () -> entry.setValue("four"));
}
{
Entry<Integer, String> entry = unmod.firstEntry();
assertThrows(UnsupportedOperationException.class, () -> entry.setValue("four"));
}
{
Entry<Integer, String> entry = unmod.lastEntry();
assertThrows(UnsupportedOperationException.class, () -> entry.setValue("four"));
}
{
@SuppressWarnings("unchecked")
Entry<Integer, String> entry = (Entry<Integer, String>) entries.toArray()[0];
assertThrows(UnsupportedOperationException.class, () -> entry.setValue("four"));
}
}
@GwtIncompatible // NavigableMap
void ensureNotDirectlyModifiable(NavigableMap<Integer, String> unmod) {
try {
unmod.put(4, "four");
fail("UnsupportedOperationException expected");
} catch (UnsupportedOperationException expected) {
}
try {
unmod.putAll(singletonMap(4, "four"));
fail("UnsupportedOperationException expected");
} catch (UnsupportedOperationException expected) {
}
try {
unmod.remove(4);
fail("UnsupportedOperationException expected");
} catch (UnsupportedOperationException expected) {
}
try {
unmod.pollFirstEntry();
fail("UnsupportedOperationException expected");
} catch (UnsupportedOperationException expected) {
}
try {
unmod.pollLastEntry();
fail("UnsupportedOperationException expected");
} catch (UnsupportedOperationException expected) {
}
}
@GwtIncompatible // NavigableMap
public void testSubMap_boundedRange() {
ImmutableSortedMap<Integer, Integer> map = ImmutableSortedMap.of(2, 0, 4, 0, 6, 0, 8, 0, 10, 0);
ImmutableSortedMap<Integer, Integer> empty = ImmutableSortedMap.of();
assertEquals(map, Maps.subMap(map, Range.closed(0, 12)));
assertEquals(ImmutableSortedMap.of(2, 0, 4, 0), Maps.subMap(map, Range.closed(0, 4)));
assertEquals(ImmutableSortedMap.of(2, 0, 4, 0, 6, 0), Maps.subMap(map, Range.closed(2, 6)));
assertEquals(ImmutableSortedMap.of(4, 0, 6, 0), Maps.subMap(map, Range.closed(3, 7)));
assertEquals(empty, Maps.subMap(map, Range.closed(20, 30)));
assertEquals(map, Maps.subMap(map, Range.open(0, 12)));
assertEquals(ImmutableSortedMap.of(2, 0), Maps.subMap(map, Range.open(0, 4)));
assertEquals(ImmutableSortedMap.of(4, 0), Maps.subMap(map, Range.open(2, 6)));
assertEquals(ImmutableSortedMap.of(4, 0, 6, 0), Maps.subMap(map, Range.open(3, 7)));
assertEquals(empty, Maps.subMap(map, Range.open(20, 30)));
assertEquals(map, Maps.subMap(map, Range.openClosed(0, 12)));
assertEquals(ImmutableSortedMap.of(2, 0, 4, 0), Maps.subMap(map, Range.openClosed(0, 4)));
assertEquals(ImmutableSortedMap.of(4, 0, 6, 0), Maps.subMap(map, Range.openClosed(2, 6)));
assertEquals(ImmutableSortedMap.of(4, 0, 6, 0), Maps.subMap(map, Range.openClosed(3, 7)));
assertEquals(empty, Maps.subMap(map, Range.openClosed(20, 30)));
assertEquals(map, Maps.subMap(map, Range.closedOpen(0, 12)));
assertEquals(ImmutableSortedMap.of(2, 0), Maps.subMap(map, Range.closedOpen(0, 4)));
assertEquals(ImmutableSortedMap.of(2, 0, 4, 0), Maps.subMap(map, Range.closedOpen(2, 6)));
assertEquals(ImmutableSortedMap.of(4, 0, 6, 0), Maps.subMap(map, Range.closedOpen(3, 7)));
assertEquals(empty, Maps.subMap(map, Range.closedOpen(20, 30)));
}
@GwtIncompatible // NavigableMap
public void testSubMap_halfBoundedRange() {
ImmutableSortedMap<Integer, Integer> map = ImmutableSortedMap.of(2, 0, 4, 0, 6, 0, 8, 0, 10, 0);
ImmutableSortedMap<Integer, Integer> empty = ImmutableSortedMap.of();
assertEquals(map, Maps.subMap(map, Range.atLeast(0)));
assertEquals(
ImmutableSortedMap.of(4, 0, 6, 0, 8, 0, 10, 0), Maps.subMap(map, Range.atLeast(4)));
assertEquals(ImmutableSortedMap.of(8, 0, 10, 0), Maps.subMap(map, Range.atLeast(7)));
assertEquals(empty, Maps.subMap(map, Range.atLeast(20)));
assertEquals(map, Maps.subMap(map, Range.greaterThan(0)));
assertEquals(ImmutableSortedMap.of(6, 0, 8, 0, 10, 0), Maps.subMap(map, Range.greaterThan(4)));
assertEquals(ImmutableSortedMap.of(8, 0, 10, 0), Maps.subMap(map, Range.greaterThan(7)));
assertEquals(empty, Maps.subMap(map, Range.greaterThan(20)));
assertEquals(empty, Maps.subMap(map, Range.lessThan(0)));
assertEquals(ImmutableSortedMap.of(2, 0), Maps.subMap(map, Range.lessThan(4)));
assertEquals(ImmutableSortedMap.of(2, 0, 4, 0, 6, 0), Maps.subMap(map, Range.lessThan(7)));
assertEquals(map, Maps.subMap(map, Range.lessThan(20)));
assertEquals(empty, Maps.subMap(map, Range.atMost(0)));
assertEquals(ImmutableSortedMap.of(2, 0, 4, 0), Maps.subMap(map, Range.atMost(4)));
assertEquals(ImmutableSortedMap.of(2, 0, 4, 0, 6, 0), Maps.subMap(map, Range.atMost(7)));
assertEquals(map, Maps.subMap(map, Range.atMost(20)));
}
@GwtIncompatible // NavigableMap
public void testSubMap_unboundedRange() {
ImmutableSortedMap<Integer, Integer> map = ImmutableSortedMap.of(2, 0, 4, 0, 6, 0, 8, 0, 10, 0);
assertEquals(map, Maps.subMap(map, Range.<Integer>all()));
}
@GwtIncompatible // NavigableMap
public void testSubMap_unnaturalOrdering() {
ImmutableSortedMap<Integer, Integer> map =
ImmutableSortedMap.<Integer, Integer>reverseOrder()
.put(2, 0)
.put(4, 0)
.put(6, 0)
.put(8, 0)
.put(10, 0)
.build();
assertThrows(IllegalArgumentException.class, () -> Maps.subMap(map, Range.closed(4, 8)));
// These results are all incorrect, but there's no way (short of iterating over the result)
// to verify that with an arbitrary ordering or comparator.
assertEquals(ImmutableSortedMap.of(2, 0, 4, 0), Maps.subMap(map, Range.atLeast(4)));
assertEquals(ImmutableSortedMap.of(8, 0, 10, 0), Maps.subMap(map, Range.atMost(8)));
assertEquals(
ImmutableSortedMap.of(2, 0, 4, 0, 6, 0, 8, 0, 10, 0),
Maps.subMap(map, Range.<Integer>all()));
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/collect/MapsTest.java |
"""
Tests for Platform against Mobile App Request
"""
import ddt
from django.test import TestCase
from mobile_api.mobile_platform import MobilePlatform
@ddt.ddt
class TestMobilePlatform(TestCase):
"""
Tests for platform against mobile app request
"""
@ddt.data(
("edX/org.edx.mobile (0.1.5; OS Version 9.2 (Build 13C75))", "iOS", "0.1.5"),
("edX/org.edx.mobile (1.01.1; OS Version 9.2 (Build 13C75))", "iOS", "1.01.1"),
("edX/org.edx.mobile (2.2.2; OS Version 9.2 (Build 13C75))", "iOS", "2.2.2"),
("edX/org.edx.mobile (3.3.3; OS Version 9.2 (Build 13C75))", "iOS", "3.3.3"),
("edX/org.edx.mobile (3.3.3.test; OS Version 9.2 (Build 13C75))", "iOS", "3.3.3.test"),
("edX/org.test-domain.mobile (0.1.5; OS Version 9.2 (Build 13C75))", "iOS", "0.1.5"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/1.1.1", "Android", "1.1.1"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/2.2.2", "Android", "2.2.2"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/3.3.3", "Android", "3.3.3"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/3.3.3.X", "Android", "3.3.3.X"),
)
@ddt.unpack
def test_platform_instance(self, user_agent, platform_name, version):
platform = MobilePlatform.get_instance(user_agent)
self.assertEqual(platform_name, platform.NAME)
self.assertEqual(version, platform.version)
@ddt.data(
("Mozilla/5.0 (Linux; Android 5.1; Nexus 5 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) "
"Version/4.0 Chrome/47.0.2526.100 Mobile Safari/537.36 edX/org.edx.mobile/2.0.0"),
("Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) "
"Mobile/13C75 edX/org.edx.mobile/2.2.1"),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 "
"Safari/537.36"),
"edX/org.edx.mobile (0.1.5.2.; OS Version 9.2 (Build 13C75))",
"edX/org.edx.mobile (0.1.5.2.5.1; OS Version 9.2 (Build 13C75))",
)
def test_non_mobile_app_requests(self, user_agent):
self.assertIsNone(MobilePlatform.get_instance(user_agent)) | unknown | codeparrot/codeparrot-clean | ||
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_READABILITY_CONTAINERCONTAINSCHECK_H
#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_READABILITY_CONTAINERCONTAINSCHECK_H
#include "../ClangTidyCheck.h"
namespace clang::tidy::readability {
/// Finds usages of `container.count()` and
/// `container.find() == container.end()` which should be replaced by a call
/// to the `container.contains()` method.
///
/// For the user-facing documentation see:
/// https://clang.llvm.org/extra/clang-tidy/checks/readability/container-contains.html
class ContainerContainsCheck : public ClangTidyCheck {
public:
ContainerContainsCheck(StringRef Name, ClangTidyContext *Context)
: ClangTidyCheck(Name, Context) {}
void registerMatchers(ast_matchers::MatchFinder *Finder) final;
void check(const ast_matchers::MatchFinder::MatchResult &Result) final;
bool isLanguageVersionSupported(const LangOptions &LO) const final {
return LO.CPlusPlus;
}
std::optional<TraversalKind> getCheckTraversalKind() const override {
return TK_IgnoreUnlessSpelledInSource;
}
};
} // namespace clang::tidy::readability
#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_READABILITY_CONTAINERCONTAINSCHECK_H | c | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/readability/ContainerContainsCheck.h |
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.buildpack.platform.build;
import java.util.Comparator;
import org.jspecify.annotations.Nullable;
import org.springframework.util.Assert;
/**
* A lifecycle version number comprised of a major, minor and patch value.
*
* @author Phillip Webb
*/
class LifecycleVersion implements Comparable<LifecycleVersion> {
private static final Comparator<LifecycleVersion> COMPARATOR = Comparator.comparingInt(LifecycleVersion::getMajor)
.thenComparingInt(LifecycleVersion::getMinor)
.thenComparing(LifecycleVersion::getPatch);
private final int major;
private final int minor;
private final int patch;
LifecycleVersion(int major, int minor, int patch) {
this.major = major;
this.minor = minor;
this.patch = patch;
}
@Override
public boolean equals(@Nullable Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
LifecycleVersion other = (LifecycleVersion) obj;
boolean result = true;
result = result && this.major == other.major;
result = result && this.minor == other.minor;
result = result && this.patch == other.patch;
return result;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + this.major;
result = prime * result + this.minor;
result = prime * result + this.patch;
return result;
}
@Override
public String toString() {
return "v" + this.major + "." + this.minor + "." + this.patch;
}
/**
* Return if this version is greater than or equal to the specified version.
* @param other the version to compare
* @return {@code true} if this version is greater than or equal to the specified
* version
*/
boolean isEqualOrGreaterThan(LifecycleVersion other) {
return compareTo(other) >= 0;
}
@Override
public int compareTo(LifecycleVersion other) {
return COMPARATOR.compare(this, other);
}
/**
* Return the major version number.
* @return the major version
*/
int getMajor() {
return this.major;
}
/**
* Return the minor version number.
* @return the minor version
*/
int getMinor() {
return this.minor;
}
/**
* Return the patch version number.
* @return the patch version
*/
int getPatch() {
return this.patch;
}
/**
* Factory method to parse a string into a {@link LifecycleVersion} instance.
* @param value the value to parse.
* @return the corresponding {@link LifecycleVersion}
* @throws IllegalArgumentException if the value could not be parsed
*/
static LifecycleVersion parse(String value) {
Assert.hasText(value, "'value' must not be empty");
String withoutPrefix = (value.startsWith("v") || value.startsWith("V")) ? value.substring(1) : value;
String[] components = withoutPrefix.split("\\.");
Assert.isTrue(components.length <= 3, () -> "'value' [%s] must be a valid version number".formatted(value));
int[] versions = new int[3];
for (int i = 0; i < components.length; i++) {
try {
versions[i] = Integer.parseInt(components[i]);
}
catch (NumberFormatException ex) {
throw new IllegalArgumentException("'value' [" + value + "] must be a valid version number", ex);
}
}
return new LifecycleVersion(versions[0], versions[1], versions[2]);
}
} | java | github | https://github.com/spring-projects/spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/build/LifecycleVersion.java |
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
try:
from PIL import Image
except ImportError:
Image = None
@python_2_unicode_compatible
class CaseTestModel(models.Model):
integer = models.IntegerField()
integer2 = models.IntegerField(null=True)
string = models.CharField(max_length=100, default='')
big_integer = models.BigIntegerField(null=True)
binary = models.BinaryField(default=b'')
boolean = models.BooleanField(default=False)
comma_separated_integer = models.CommaSeparatedIntegerField(max_length=100, default='')
date = models.DateField(null=True, db_column='date_field')
date_time = models.DateTimeField(null=True)
decimal = models.DecimalField(max_digits=2, decimal_places=1, null=True, db_column='decimal_field')
duration = models.DurationField(null=True)
email = models.EmailField(default='')
file = models.FileField(null=True, db_column='file_field')
file_path = models.FilePathField(null=True)
float = models.FloatField(null=True, db_column='float_field')
if Image:
image = models.ImageField(null=True)
generic_ip_address = models.GenericIPAddressField(null=True)
null_boolean = models.NullBooleanField()
positive_integer = models.PositiveIntegerField(null=True)
positive_small_integer = models.PositiveSmallIntegerField(null=True)
slug = models.SlugField(default='')
small_integer = models.SmallIntegerField(null=True)
text = models.TextField(default='')
time = models.TimeField(null=True, db_column='time_field')
url = models.URLField(default='')
uuid = models.UUIDField(null=True)
fk = models.ForeignKey('self', models.CASCADE, null=True)
def __str__(self):
return "%i, %s" % (self.integer, self.string)
@python_2_unicode_compatible
class O2OCaseTestModel(models.Model):
o2o = models.OneToOneField(CaseTestModel, models.CASCADE, related_name='o2o_rel')
integer = models.IntegerField()
def __str__(self):
return "%i, %s" % (self.id, self.o2o)
@python_2_unicode_compatible
class FKCaseTestModel(models.Model):
fk = models.ForeignKey(CaseTestModel, models.CASCADE, related_name='fk_rel')
integer = models.IntegerField()
def __str__(self):
return "%i, %s" % (self.id, self.fk)
@python_2_unicode_compatible
class Client(models.Model):
REGULAR = 'R'
GOLD = 'G'
PLATINUM = 'P'
ACCOUNT_TYPE_CHOICES = (
(REGULAR, 'Regular'),
(GOLD, 'Gold'),
(PLATINUM, 'Platinum'),
)
name = models.CharField(max_length=50)
registered_on = models.DateField()
account_type = models.CharField(
max_length=1,
choices=ACCOUNT_TYPE_CHOICES,
default=REGULAR,
)
def __str__(self):
return self.name | unknown | codeparrot/codeparrot-clean | ||
// generated with @7nohe/openapi-react-query-codegen@1.6.2
import { type QueryClient } from "@tanstack/react-query";
import { AssetService, AuthLinksService, BackfillService, CalendarService, ConfigService, ConnectionService, DagRunService, DagService, DagSourceService, DagStatsService, DagVersionService, DagWarningService, DashboardService, DependenciesService, EventLogService, ExperimentalService, ExtraLinksService, GanttService, GridService, ImportErrorService, JobService, LoginService, MonitorService, PluginService, PoolService, ProviderService, StructureService, TaskInstanceService, TaskService, TeamsService, VariableService, VersionService, XcomService } from "../requests/services.gen";
import { DagRunState, DagWarningType } from "../requests/types.gen";
import * as Common from "./common";
/**
* Get Assets
* Get assets.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.namePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.uriPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.dagIds
* @param data.onlyActive
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id, name, uri, created_at, updated_at`
* @returns AssetCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseAssetServiceGetAssetsData = (queryClient: QueryClient, { dagIds, limit, namePattern, offset, onlyActive, orderBy, uriPattern }: {
dagIds?: string[];
limit?: number;
namePattern?: string;
offset?: number;
onlyActive?: boolean;
orderBy?: string[];
uriPattern?: string;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseAssetServiceGetAssetsKeyFn({ dagIds, limit, namePattern, offset, onlyActive, orderBy, uriPattern }), queryFn: () => AssetService.getAssets({ dagIds, limit, namePattern, offset, onlyActive, orderBy, uriPattern }) });
/**
* Get Asset Aliases
* Get asset aliases.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.namePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id, name`
* @returns AssetAliasCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseAssetServiceGetAssetAliasesData = (queryClient: QueryClient, { limit, namePattern, offset, orderBy }: {
limit?: number;
namePattern?: string;
offset?: number;
orderBy?: string[];
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseAssetServiceGetAssetAliasesKeyFn({ limit, namePattern, offset, orderBy }), queryFn: () => AssetService.getAssetAliases({ limit, namePattern, offset, orderBy }) });
/**
* Get Asset Alias
* Get an asset alias.
* @param data The data for the request.
* @param data.assetAliasId
* @returns unknown Successful Response
* @throws ApiError
*/
export const ensureUseAssetServiceGetAssetAliasData = (queryClient: QueryClient, { assetAliasId }: {
assetAliasId: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseAssetServiceGetAssetAliasKeyFn({ assetAliasId }), queryFn: () => AssetService.getAssetAlias({ assetAliasId }) });
/**
* Get Asset Events
* Get asset events.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `source_task_id, source_dag_id, source_run_id, source_map_index, timestamp`
* @param data.assetId
* @param data.sourceDagId
* @param data.sourceTaskId
* @param data.sourceRunId
* @param data.sourceMapIndex
* @param data.namePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.timestampGte
* @param data.timestampGt
* @param data.timestampLte
* @param data.timestampLt
* @returns AssetEventCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseAssetServiceGetAssetEventsData = (queryClient: QueryClient, { assetId, limit, namePattern, offset, orderBy, sourceDagId, sourceMapIndex, sourceRunId, sourceTaskId, timestampGt, timestampGte, timestampLt, timestampLte }: {
assetId?: number;
limit?: number;
namePattern?: string;
offset?: number;
orderBy?: string[];
sourceDagId?: string;
sourceMapIndex?: number;
sourceRunId?: string;
sourceTaskId?: string;
timestampGt?: string;
timestampGte?: string;
timestampLt?: string;
timestampLte?: string;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseAssetServiceGetAssetEventsKeyFn({ assetId, limit, namePattern, offset, orderBy, sourceDagId, sourceMapIndex, sourceRunId, sourceTaskId, timestampGt, timestampGte, timestampLt, timestampLte }), queryFn: () => AssetService.getAssetEvents({ assetId, limit, namePattern, offset, orderBy, sourceDagId, sourceMapIndex, sourceRunId, sourceTaskId, timestampGt, timestampGte, timestampLt, timestampLte }) });
/**
* Get Asset Queued Events
* Get queued asset events for an asset.
* @param data The data for the request.
* @param data.assetId
* @param data.before
* @returns QueuedEventCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseAssetServiceGetAssetQueuedEventsData = (queryClient: QueryClient, { assetId, before }: {
assetId: number;
before?: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseAssetServiceGetAssetQueuedEventsKeyFn({ assetId, before }), queryFn: () => AssetService.getAssetQueuedEvents({ assetId, before }) });
/**
* Get Asset
* Get an asset.
* @param data The data for the request.
* @param data.assetId
* @returns AssetResponse Successful Response
* @throws ApiError
*/
export const ensureUseAssetServiceGetAssetData = (queryClient: QueryClient, { assetId }: {
assetId: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseAssetServiceGetAssetKeyFn({ assetId }), queryFn: () => AssetService.getAsset({ assetId }) });
/**
* Get Dag Asset Queued Events
* Get queued asset events for a DAG.
* @param data The data for the request.
* @param data.dagId
* @param data.before
* @returns QueuedEventCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseAssetServiceGetDagAssetQueuedEventsData = (queryClient: QueryClient, { before, dagId }: {
before?: string;
dagId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseAssetServiceGetDagAssetQueuedEventsKeyFn({ before, dagId }), queryFn: () => AssetService.getDagAssetQueuedEvents({ before, dagId }) });
/**
* Get Dag Asset Queued Event
* Get a queued asset event for a DAG.
* @param data The data for the request.
* @param data.dagId
* @param data.assetId
* @param data.before
* @returns QueuedEventResponse Successful Response
* @throws ApiError
*/
export const ensureUseAssetServiceGetDagAssetQueuedEventData = (queryClient: QueryClient, { assetId, before, dagId }: {
assetId: number;
before?: string;
dagId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseAssetServiceGetDagAssetQueuedEventKeyFn({ assetId, before, dagId }), queryFn: () => AssetService.getDagAssetQueuedEvent({ assetId, before, dagId }) });
/**
* Next Run Assets
* @param data The data for the request.
* @param data.dagId
* @returns unknown Successful Response
* @throws ApiError
*/
export const ensureUseAssetServiceNextRunAssetsData = (queryClient: QueryClient, { dagId }: {
dagId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseAssetServiceNextRunAssetsKeyFn({ dagId }), queryFn: () => AssetService.nextRunAssets({ dagId }) });
/**
* List Backfills
* @param data The data for the request.
* @param data.dagId
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id`
* @returns BackfillCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseBackfillServiceListBackfillsData = (queryClient: QueryClient, { dagId, limit, offset, orderBy }: {
dagId: string;
limit?: number;
offset?: number;
orderBy?: string[];
}) => queryClient.ensureQueryData({ queryKey: Common.UseBackfillServiceListBackfillsKeyFn({ dagId, limit, offset, orderBy }), queryFn: () => BackfillService.listBackfills({ dagId, limit, offset, orderBy }) });
/**
* Get Backfill
* @param data The data for the request.
* @param data.backfillId
* @returns BackfillResponse Successful Response
* @throws ApiError
*/
export const ensureUseBackfillServiceGetBackfillData = (queryClient: QueryClient, { backfillId }: {
backfillId: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseBackfillServiceGetBackfillKeyFn({ backfillId }), queryFn: () => BackfillService.getBackfill({ backfillId }) });
/**
* List Backfills Ui
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id`
* @param data.dagId
* @param data.active
* @returns BackfillCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseBackfillServiceListBackfillsUiData = (queryClient: QueryClient, { active, dagId, limit, offset, orderBy }: {
active?: boolean;
dagId?: string;
limit?: number;
offset?: number;
orderBy?: string[];
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseBackfillServiceListBackfillsUiKeyFn({ active, dagId, limit, offset, orderBy }), queryFn: () => BackfillService.listBackfillsUi({ active, dagId, limit, offset, orderBy }) });
/**
* Get Connection
* Get a connection entry.
* @param data The data for the request.
* @param data.connectionId
* @returns ConnectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseConnectionServiceGetConnectionData = (queryClient: QueryClient, { connectionId }: {
connectionId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseConnectionServiceGetConnectionKeyFn({ connectionId }), queryFn: () => ConnectionService.getConnection({ connectionId }) });
/**
* Get Connections
* Get all connection entries.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `conn_id, conn_type, description, host, port, id, team_name, connection_id`
* @param data.connectionIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @returns ConnectionCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseConnectionServiceGetConnectionsData = (queryClient: QueryClient, { connectionIdPattern, limit, offset, orderBy }: {
connectionIdPattern?: string;
limit?: number;
offset?: number;
orderBy?: string[];
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseConnectionServiceGetConnectionsKeyFn({ connectionIdPattern, limit, offset, orderBy }), queryFn: () => ConnectionService.getConnections({ connectionIdPattern, limit, offset, orderBy }) });
/**
* Hook Meta Data
* Retrieve information about available connection types (hook classes) and their parameters.
* @returns ConnectionHookMetaData Successful Response
* @throws ApiError
*/
export const ensureUseConnectionServiceHookMetaDataData = (queryClient: QueryClient) => queryClient.ensureQueryData({ queryKey: Common.UseConnectionServiceHookMetaDataKeyFn(), queryFn: () => ConnectionService.hookMetaData() });
/**
* Get Dag Run
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @returns DAGRunResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagRunServiceGetDagRunData = (queryClient: QueryClient, { dagId, dagRunId }: {
dagId: string;
dagRunId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseDagRunServiceGetDagRunKeyFn({ dagId, dagRunId }), queryFn: () => DagRunService.getDagRun({ dagId, dagRunId }) });
/**
* Get Upstream Asset Events
* If dag run is asset-triggered, return the asset events that triggered it.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @returns AssetEventCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagRunServiceGetUpstreamAssetEventsData = (queryClient: QueryClient, { dagId, dagRunId }: {
dagId: string;
dagRunId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseDagRunServiceGetUpstreamAssetEventsKeyFn({ dagId, dagRunId }), queryFn: () => DagRunService.getUpstreamAssetEvents({ dagId, dagRunId }) });
/**
* Get Dag Runs
* Get all DAG Runs.
*
* This endpoint allows specifying `~` as the dag_id to retrieve Dag Runs for all DAGs.
* @param data The data for the request.
* @param data.dagId
* @param data.limit
* @param data.offset
* @param data.runAfterGte
* @param data.runAfterGt
* @param data.runAfterLte
* @param data.runAfterLt
* @param data.logicalDateGte
* @param data.logicalDateGt
* @param data.logicalDateLte
* @param data.logicalDateLt
* @param data.startDateGte
* @param data.startDateGt
* @param data.startDateLte
* @param data.startDateLt
* @param data.endDateGte
* @param data.endDateGt
* @param data.endDateLte
* @param data.endDateLt
* @param data.durationGte
* @param data.durationGt
* @param data.durationLte
* @param data.durationLt
* @param data.updatedAtGte
* @param data.updatedAtGt
* @param data.updatedAtLte
* @param data.updatedAtLt
* @param data.confContains
* @param data.runType
* @param data.state
* @param data.dagVersion
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id, state, dag_id, run_id, logical_date, run_after, start_date, end_date, updated_at, conf, duration, dag_run_id`
* @param data.runIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.triggeringUserNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.dagIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @returns DAGRunCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagRunServiceGetDagRunsData = (queryClient: QueryClient, { confContains, dagId, dagIdPattern, dagVersion, durationGt, durationGte, durationLt, durationLte, endDateGt, endDateGte, endDateLt, endDateLte, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, offset, orderBy, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runIdPattern, runType, startDateGt, startDateGte, startDateLt, startDateLte, state, triggeringUserNamePattern, updatedAtGt, updatedAtGte, updatedAtLt, updatedAtLte }: {
confContains?: string;
dagId: string;
dagIdPattern?: string;
dagVersion?: number[];
durationGt?: number;
durationGte?: number;
durationLt?: number;
durationLte?: number;
endDateGt?: string;
endDateGte?: string;
endDateLt?: string;
endDateLte?: string;
limit?: number;
logicalDateGt?: string;
logicalDateGte?: string;
logicalDateLt?: string;
logicalDateLte?: string;
offset?: number;
orderBy?: string[];
runAfterGt?: string;
runAfterGte?: string;
runAfterLt?: string;
runAfterLte?: string;
runIdPattern?: string;
runType?: string[];
startDateGt?: string;
startDateGte?: string;
startDateLt?: string;
startDateLte?: string;
state?: string[];
triggeringUserNamePattern?: string;
updatedAtGt?: string;
updatedAtGte?: string;
updatedAtLt?: string;
updatedAtLte?: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseDagRunServiceGetDagRunsKeyFn({ confContains, dagId, dagIdPattern, dagVersion, durationGt, durationGte, durationLt, durationLte, endDateGt, endDateGte, endDateLt, endDateLte, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, offset, orderBy, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runIdPattern, runType, startDateGt, startDateGte, startDateLt, startDateLte, state, triggeringUserNamePattern, updatedAtGt, updatedAtGte, updatedAtLt, updatedAtLte }), queryFn: () => DagRunService.getDagRuns({ confContains, dagId, dagIdPattern, dagVersion, durationGt, durationGte, durationLt, durationLte, endDateGt, endDateGte, endDateLt, endDateLte, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, offset, orderBy, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runIdPattern, runType, startDateGt, startDateGte, startDateLt, startDateLte, state, triggeringUserNamePattern, updatedAtGt, updatedAtGte, updatedAtLt, updatedAtLte }) });
/**
* Experimental: Wait for a dag run to complete, and return task results if requested.
* 🚧 This is an experimental endpoint and may change or be removed without notice.Successful response are streamed as newline-delimited JSON (NDJSON). Each line is a JSON object representing the DAG run state.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.interval Seconds to wait between dag run state checks
* @param data.result Collect result XCom from task. Can be set multiple times.
* @returns unknown Successful Response
* @throws ApiError
*/
export const ensureUseDagRunServiceWaitDagRunUntilFinishedData = (queryClient: QueryClient, { dagId, dagRunId, interval, result }: {
dagId: string;
dagRunId: string;
interval: number;
result?: string[];
}) => queryClient.ensureQueryData({ queryKey: Common.UseDagRunServiceWaitDagRunUntilFinishedKeyFn({ dagId, dagRunId, interval, result }), queryFn: () => DagRunService.waitDagRunUntilFinished({ dagId, dagRunId, interval, result }) });
/**
* Experimental: Wait for a dag run to complete, and return task results if requested.
* 🚧 This is an experimental endpoint and may change or be removed without notice.Successful response are streamed as newline-delimited JSON (NDJSON). Each line is a JSON object representing the DAG run state.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.interval Seconds to wait between dag run state checks
* @param data.result Collect result XCom from task. Can be set multiple times.
* @returns unknown Successful Response
* @throws ApiError
*/
export const ensureUseExperimentalServiceWaitDagRunUntilFinishedData = (queryClient: QueryClient, { dagId, dagRunId, interval, result }: {
dagId: string;
dagRunId: string;
interval: number;
result?: string[];
}) => queryClient.ensureQueryData({ queryKey: Common.UseExperimentalServiceWaitDagRunUntilFinishedKeyFn({ dagId, dagRunId, interval, result }), queryFn: () => ExperimentalService.waitDagRunUntilFinished({ dagId, dagRunId, interval, result }) });
/**
* Get Dag Source
* Get source code using file token.
* @param data The data for the request.
* @param data.dagId
* @param data.versionNumber
* @param data.accept
* @returns DAGSourceResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagSourceServiceGetDagSourceData = (queryClient: QueryClient, { accept, dagId, versionNumber }: {
accept?: "application/json" | "text/plain" | "*/*";
dagId: string;
versionNumber?: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseDagSourceServiceGetDagSourceKeyFn({ accept, dagId, versionNumber }), queryFn: () => DagSourceService.getDagSource({ accept, dagId, versionNumber }) });
/**
* Get Dag Stats
* Get Dag statistics.
* @param data The data for the request.
* @param data.dagIds
* @returns DagStatsCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagStatsServiceGetDagStatsData = (queryClient: QueryClient, { dagIds }: {
dagIds?: string[];
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseDagStatsServiceGetDagStatsKeyFn({ dagIds }), queryFn: () => DagStatsService.getDagStats({ dagIds }) });
/**
* Get Config
* @param data The data for the request.
* @param data.section
* @param data.accept
* @returns Config Successful Response
* @throws ApiError
*/
export const ensureUseConfigServiceGetConfigData = (queryClient: QueryClient, { accept, section }: {
accept?: "application/json" | "text/plain" | "*/*";
section?: string;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseConfigServiceGetConfigKeyFn({ accept, section }), queryFn: () => ConfigService.getConfig({ accept, section }) });
/**
* Get Config Value
* @param data The data for the request.
* @param data.section
* @param data.option
* @param data.accept
* @returns Config Successful Response
* @throws ApiError
*/
export const ensureUseConfigServiceGetConfigValueData = (queryClient: QueryClient, { accept, option, section }: {
accept?: "application/json" | "text/plain" | "*/*";
option: string;
section: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseConfigServiceGetConfigValueKeyFn({ accept, option, section }), queryFn: () => ConfigService.getConfigValue({ accept, option, section }) });
/**
* Get Configs
* Get configs for UI.
* @returns ConfigResponse Successful Response
* @throws ApiError
*/
export const ensureUseConfigServiceGetConfigsData = (queryClient: QueryClient) => queryClient.ensureQueryData({ queryKey: Common.UseConfigServiceGetConfigsKeyFn(), queryFn: () => ConfigService.getConfigs() });
/**
* List Dag Warnings
* Get a list of DAG warnings.
* @param data The data for the request.
* @param data.dagId
* @param data.warningType
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `dag_id, warning_type, message, timestamp`
* @returns DAGWarningCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagWarningServiceListDagWarningsData = (queryClient: QueryClient, { dagId, limit, offset, orderBy, warningType }: {
dagId?: string;
limit?: number;
offset?: number;
orderBy?: string[];
warningType?: DagWarningType;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseDagWarningServiceListDagWarningsKeyFn({ dagId, limit, offset, orderBy, warningType }), queryFn: () => DagWarningService.listDagWarnings({ dagId, limit, offset, orderBy, warningType }) });
/**
* Get Dags
* Get all DAGs.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.tags
* @param data.tagsMatchMode
* @param data.owners
* @param data.dagIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.dagDisplayNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.excludeStale
* @param data.paused
* @param data.hasImportErrors Filter Dags by having import errors. Only Dags that have been successfully loaded before will be returned.
* @param data.lastDagRunState
* @param data.bundleName
* @param data.bundleVersion
* @param data.hasAssetSchedule Filter Dags with asset-based scheduling
* @param data.assetDependency Filter Dags by asset dependency (name or URI)
* @param data.dagRunStartDateGte
* @param data.dagRunStartDateGt
* @param data.dagRunStartDateLte
* @param data.dagRunStartDateLt
* @param data.dagRunEndDateGte
* @param data.dagRunEndDateGt
* @param data.dagRunEndDateLte
* @param data.dagRunEndDateLt
* @param data.dagRunState
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `dag_id, dag_display_name, next_dagrun, state, start_date, last_run_state, last_run_start_date`
* @param data.isFavorite
* @param data.timetableType
* @returns DAGCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagServiceGetDagsData = (queryClient: QueryClient, { assetDependency, bundleName, bundleVersion, dagDisplayNamePattern, dagIdPattern, dagRunEndDateGt, dagRunEndDateGte, dagRunEndDateLt, dagRunEndDateLte, dagRunStartDateGt, dagRunStartDateGte, dagRunStartDateLt, dagRunStartDateLte, dagRunState, excludeStale, hasAssetSchedule, hasImportErrors, isFavorite, lastDagRunState, limit, offset, orderBy, owners, paused, tags, tagsMatchMode, timetableType }: {
assetDependency?: string;
bundleName?: string;
bundleVersion?: string;
dagDisplayNamePattern?: string;
dagIdPattern?: string;
dagRunEndDateGt?: string;
dagRunEndDateGte?: string;
dagRunEndDateLt?: string;
dagRunEndDateLte?: string;
dagRunStartDateGt?: string;
dagRunStartDateGte?: string;
dagRunStartDateLt?: string;
dagRunStartDateLte?: string;
dagRunState?: string[];
excludeStale?: boolean;
hasAssetSchedule?: boolean;
hasImportErrors?: boolean;
isFavorite?: boolean;
lastDagRunState?: DagRunState;
limit?: number;
offset?: number;
orderBy?: string[];
owners?: string[];
paused?: boolean;
tags?: string[];
tagsMatchMode?: "any" | "all";
timetableType?: string[];
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseDagServiceGetDagsKeyFn({ assetDependency, bundleName, bundleVersion, dagDisplayNamePattern, dagIdPattern, dagRunEndDateGt, dagRunEndDateGte, dagRunEndDateLt, dagRunEndDateLte, dagRunStartDateGt, dagRunStartDateGte, dagRunStartDateLt, dagRunStartDateLte, dagRunState, excludeStale, hasAssetSchedule, hasImportErrors, isFavorite, lastDagRunState, limit, offset, orderBy, owners, paused, tags, tagsMatchMode, timetableType }), queryFn: () => DagService.getDags({ assetDependency, bundleName, bundleVersion, dagDisplayNamePattern, dagIdPattern, dagRunEndDateGt, dagRunEndDateGte, dagRunEndDateLt, dagRunEndDateLte, dagRunStartDateGt, dagRunStartDateGte, dagRunStartDateLt, dagRunStartDateLte, dagRunState, excludeStale, hasAssetSchedule, hasImportErrors, isFavorite, lastDagRunState, limit, offset, orderBy, owners, paused, tags, tagsMatchMode, timetableType }) });
/**
* Get Dag
* Get basic information about a DAG.
* @param data The data for the request.
* @param data.dagId
* @returns DAGResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagServiceGetDagData = (queryClient: QueryClient, { dagId }: {
dagId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseDagServiceGetDagKeyFn({ dagId }), queryFn: () => DagService.getDag({ dagId }) });
/**
* Get Dag Details
* Get details of DAG.
* @param data The data for the request.
* @param data.dagId
* @returns DAGDetailsResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagServiceGetDagDetailsData = (queryClient: QueryClient, { dagId }: {
dagId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseDagServiceGetDagDetailsKeyFn({ dagId }), queryFn: () => DagService.getDagDetails({ dagId }) });
/**
* Get Dag Tags
* Get all DAG tags.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `name`
* @param data.tagNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @returns DAGTagCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagServiceGetDagTagsData = (queryClient: QueryClient, { limit, offset, orderBy, tagNamePattern }: {
limit?: number;
offset?: number;
orderBy?: string[];
tagNamePattern?: string;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseDagServiceGetDagTagsKeyFn({ limit, offset, orderBy, tagNamePattern }), queryFn: () => DagService.getDagTags({ limit, offset, orderBy, tagNamePattern }) });
/**
* Get Dags
* Get DAGs with recent DagRun.
* @param data The data for the request.
* @param data.dagRunsLimit
* @param data.limit
* @param data.offset
* @param data.tags
* @param data.tagsMatchMode
* @param data.owners
* @param data.dagIds
* @param data.dagIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.dagDisplayNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.excludeStale
* @param data.paused
* @param data.hasImportErrors Filter Dags by having import errors. Only Dags that have been successfully loaded before will be returned.
* @param data.lastDagRunState
* @param data.bundleName
* @param data.bundleVersion
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `dag_id, dag_display_name, next_dagrun, state, start_date, last_run_state, last_run_start_date`
* @param data.isFavorite
* @param data.hasAssetSchedule Filter Dags with asset-based scheduling
* @param data.assetDependency Filter Dags by asset dependency (name or URI)
* @param data.hasPendingActions
* @returns DAGWithLatestDagRunsCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagServiceGetDagsUiData = (queryClient: QueryClient, { assetDependency, bundleName, bundleVersion, dagDisplayNamePattern, dagIdPattern, dagIds, dagRunsLimit, excludeStale, hasAssetSchedule, hasImportErrors, hasPendingActions, isFavorite, lastDagRunState, limit, offset, orderBy, owners, paused, tags, tagsMatchMode }: {
assetDependency?: string;
bundleName?: string;
bundleVersion?: string;
dagDisplayNamePattern?: string;
dagIdPattern?: string;
dagIds?: string[];
dagRunsLimit?: number;
excludeStale?: boolean;
hasAssetSchedule?: boolean;
hasImportErrors?: boolean;
hasPendingActions?: boolean;
isFavorite?: boolean;
lastDagRunState?: DagRunState;
limit?: number;
offset?: number;
orderBy?: string[];
owners?: string[];
paused?: boolean;
tags?: string[];
tagsMatchMode?: "any" | "all";
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseDagServiceGetDagsUiKeyFn({ assetDependency, bundleName, bundleVersion, dagDisplayNamePattern, dagIdPattern, dagIds, dagRunsLimit, excludeStale, hasAssetSchedule, hasImportErrors, hasPendingActions, isFavorite, lastDagRunState, limit, offset, orderBy, owners, paused, tags, tagsMatchMode }), queryFn: () => DagService.getDagsUi({ assetDependency, bundleName, bundleVersion, dagDisplayNamePattern, dagIdPattern, dagIds, dagRunsLimit, excludeStale, hasAssetSchedule, hasImportErrors, hasPendingActions, isFavorite, lastDagRunState, limit, offset, orderBy, owners, paused, tags, tagsMatchMode }) });
/**
* Get Latest Run Info
* Get latest run.
* @param data The data for the request.
* @param data.dagId
* @returns unknown Successful Response
* @throws ApiError
*/
export const ensureUseDagServiceGetLatestRunInfoData = (queryClient: QueryClient, { dagId }: {
dagId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseDagServiceGetLatestRunInfoKeyFn({ dagId }), queryFn: () => DagService.getLatestRunInfo({ dagId }) });
/**
* Get Event Log
* @param data The data for the request.
* @param data.eventLogId
* @returns EventLogResponse Successful Response
* @throws ApiError
*/
export const ensureUseEventLogServiceGetEventLogData = (queryClient: QueryClient, { eventLogId }: {
eventLogId: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseEventLogServiceGetEventLogKeyFn({ eventLogId }), queryFn: () => EventLogService.getEventLog({ eventLogId }) });
/**
* Get Event Logs
* Get all Event Logs.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id, dttm, dag_id, task_id, run_id, event, logical_date, owner, extra, when, event_log_id`
* @param data.dagId
* @param data.taskId
* @param data.runId
* @param data.mapIndex
* @param data.tryNumber
* @param data.owner
* @param data.event
* @param data.excludedEvents
* @param data.includedEvents
* @param data.before
* @param data.after
* @param data.dagIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.taskIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.runIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.ownerPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.eventPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @returns EventLogCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseEventLogServiceGetEventLogsData = (queryClient: QueryClient, { after, before, dagId, dagIdPattern, event, eventPattern, excludedEvents, includedEvents, limit, mapIndex, offset, orderBy, owner, ownerPattern, runId, runIdPattern, taskId, taskIdPattern, tryNumber }: {
after?: string;
before?: string;
dagId?: string;
dagIdPattern?: string;
event?: string;
eventPattern?: string;
excludedEvents?: string[];
includedEvents?: string[];
limit?: number;
mapIndex?: number;
offset?: number;
orderBy?: string[];
owner?: string;
ownerPattern?: string;
runId?: string;
runIdPattern?: string;
taskId?: string;
taskIdPattern?: string;
tryNumber?: number;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseEventLogServiceGetEventLogsKeyFn({ after, before, dagId, dagIdPattern, event, eventPattern, excludedEvents, includedEvents, limit, mapIndex, offset, orderBy, owner, ownerPattern, runId, runIdPattern, taskId, taskIdPattern, tryNumber }), queryFn: () => EventLogService.getEventLogs({ after, before, dagId, dagIdPattern, event, eventPattern, excludedEvents, includedEvents, limit, mapIndex, offset, orderBy, owner, ownerPattern, runId, runIdPattern, taskId, taskIdPattern, tryNumber }) });
/**
* Get Extra Links
* Get extra links for task instance.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.mapIndex
* @returns ExtraLinkCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseExtraLinksServiceGetExtraLinksData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId }: {
dagId: string;
dagRunId: string;
mapIndex?: number;
taskId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseExtraLinksServiceGetExtraLinksKeyFn({ dagId, dagRunId, mapIndex, taskId }), queryFn: () => ExtraLinksService.getExtraLinks({ dagId, dagRunId, mapIndex, taskId }) });
/**
* Get Extra Links
* Get extra links for task instance.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.mapIndex
* @returns ExtraLinkCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetExtraLinksData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId }: {
dagId: string;
dagRunId: string;
mapIndex?: number;
taskId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetExtraLinksKeyFn({ dagId, dagRunId, mapIndex, taskId }), queryFn: () => TaskInstanceService.getExtraLinks({ dagId, dagRunId, mapIndex, taskId }) });
/**
* Get Task Instance
* Get task instance.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @returns TaskInstanceResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetTaskInstanceData = (queryClient: QueryClient, { dagId, dagRunId, taskId }: {
dagId: string;
dagRunId: string;
taskId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetTaskInstanceKeyFn({ dagId, dagRunId, taskId }), queryFn: () => TaskInstanceService.getTaskInstance({ dagId, dagRunId, taskId }) });
/**
* Get Mapped Task Instances
* Get list of mapped task instances.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.runAfterGte
* @param data.runAfterGt
* @param data.runAfterLte
* @param data.runAfterLt
* @param data.logicalDateGte
* @param data.logicalDateGt
* @param data.logicalDateLte
* @param data.logicalDateLt
* @param data.startDateGte
* @param data.startDateGt
* @param data.startDateLte
* @param data.startDateLt
* @param data.endDateGte
* @param data.endDateGt
* @param data.endDateLte
* @param data.endDateLt
* @param data.updatedAtGte
* @param data.updatedAtGt
* @param data.updatedAtLte
* @param data.updatedAtLt
* @param data.durationGte
* @param data.durationGt
* @param data.durationLte
* @param data.durationLt
* @param data.state
* @param data.pool
* @param data.poolNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.queue
* @param data.queueNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.executor
* @param data.versionNumber
* @param data.tryNumber
* @param data.operator
* @param data.operatorNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.mapIndex
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id, state, duration, start_date, end_date, map_index, try_number, logical_date, run_after, data_interval_start, data_interval_end, rendered_map_index, operator, run_after, logical_date, data_interval_start, data_interval_end`
* @returns TaskInstanceCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetMappedTaskInstancesData = (queryClient: QueryClient, { dagId, dagRunId, durationGt, durationGte, durationLt, durationLte, endDateGt, endDateGte, endDateLt, endDateLte, executor, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, mapIndex, offset, operator, operatorNamePattern, orderBy, pool, poolNamePattern, queue, queueNamePattern, runAfterGt, runAfterGte, runAfterLt, runAfterLte, startDateGt, startDateGte, startDateLt, startDateLte, state, taskId, tryNumber, updatedAtGt, updatedAtGte, updatedAtLt, updatedAtLte, versionNumber }: {
dagId: string;
dagRunId: string;
durationGt?: number;
durationGte?: number;
durationLt?: number;
durationLte?: number;
endDateGt?: string;
endDateGte?: string;
endDateLt?: string;
endDateLte?: string;
executor?: string[];
limit?: number;
logicalDateGt?: string;
logicalDateGte?: string;
logicalDateLt?: string;
logicalDateLte?: string;
mapIndex?: number[];
offset?: number;
operator?: string[];
operatorNamePattern?: string;
orderBy?: string[];
pool?: string[];
poolNamePattern?: string;
queue?: string[];
queueNamePattern?: string;
runAfterGt?: string;
runAfterGte?: string;
runAfterLt?: string;
runAfterLte?: string;
startDateGt?: string;
startDateGte?: string;
startDateLt?: string;
startDateLte?: string;
state?: string[];
taskId: string;
tryNumber?: number[];
updatedAtGt?: string;
updatedAtGte?: string;
updatedAtLt?: string;
updatedAtLte?: string;
versionNumber?: number[];
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetMappedTaskInstancesKeyFn({ dagId, dagRunId, durationGt, durationGte, durationLt, durationLte, endDateGt, endDateGte, endDateLt, endDateLte, executor, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, mapIndex, offset, operator, operatorNamePattern, orderBy, pool, poolNamePattern, queue, queueNamePattern, runAfterGt, runAfterGte, runAfterLt, runAfterLte, startDateGt, startDateGte, startDateLt, startDateLte, state, taskId, tryNumber, updatedAtGt, updatedAtGte, updatedAtLt, updatedAtLte, versionNumber }), queryFn: () => TaskInstanceService.getMappedTaskInstances({ dagId, dagRunId, durationGt, durationGte, durationLt, durationLte, endDateGt, endDateGte, endDateLt, endDateLte, executor, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, mapIndex, offset, operator, operatorNamePattern, orderBy, pool, poolNamePattern, queue, queueNamePattern, runAfterGt, runAfterGte, runAfterLt, runAfterLte, startDateGt, startDateGte, startDateLt, startDateLte, state, taskId, tryNumber, updatedAtGt, updatedAtGte, updatedAtLt, updatedAtLte, versionNumber }) });
/**
* Get Task Instance Dependencies
* Get dependencies blocking task from getting scheduled.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.mapIndex
* @returns TaskDependencyCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetTaskInstanceDependenciesByMapIndexData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId }: {
dagId: string;
dagRunId: string;
mapIndex: number;
taskId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetTaskInstanceDependenciesByMapIndexKeyFn({ dagId, dagRunId, mapIndex, taskId }), queryFn: () => TaskInstanceService.getTaskInstanceDependenciesByMapIndex({ dagId, dagRunId, mapIndex, taskId }) });
/**
* Get Task Instance Dependencies
* Get dependencies blocking task from getting scheduled.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.mapIndex
* @returns TaskDependencyCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetTaskInstanceDependenciesData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId }: {
dagId: string;
dagRunId: string;
mapIndex?: number;
taskId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetTaskInstanceDependenciesKeyFn({ dagId, dagRunId, mapIndex, taskId }), queryFn: () => TaskInstanceService.getTaskInstanceDependencies({ dagId, dagRunId, mapIndex, taskId }) });
/**
* Get Task Instance Tries
* Get list of task instances history.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.mapIndex
* @returns TaskInstanceHistoryCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetTaskInstanceTriesData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId }: {
dagId: string;
dagRunId: string;
mapIndex?: number;
taskId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetTaskInstanceTriesKeyFn({ dagId, dagRunId, mapIndex, taskId }), queryFn: () => TaskInstanceService.getTaskInstanceTries({ dagId, dagRunId, mapIndex, taskId }) });
/**
* Get Mapped Task Instance Tries
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.mapIndex
* @returns TaskInstanceHistoryCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetMappedTaskInstanceTriesData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId }: {
dagId: string;
dagRunId: string;
mapIndex: number;
taskId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetMappedTaskInstanceTriesKeyFn({ dagId, dagRunId, mapIndex, taskId }), queryFn: () => TaskInstanceService.getMappedTaskInstanceTries({ dagId, dagRunId, mapIndex, taskId }) });
/**
* Get Mapped Task Instance
* Get task instance.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.mapIndex
* @returns TaskInstanceResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetMappedTaskInstanceData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId }: {
dagId: string;
dagRunId: string;
mapIndex: number;
taskId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetMappedTaskInstanceKeyFn({ dagId, dagRunId, mapIndex, taskId }), queryFn: () => TaskInstanceService.getMappedTaskInstance({ dagId, dagRunId, mapIndex, taskId }) });
/**
* Get Task Instances
* Get list of task instances.
*
* This endpoint allows specifying `~` as the dag_id, dag_run_id to retrieve Task Instances for all DAGs
* and DAG runs.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.runAfterGte
* @param data.runAfterGt
* @param data.runAfterLte
* @param data.runAfterLt
* @param data.logicalDateGte
* @param data.logicalDateGt
* @param data.logicalDateLte
* @param data.logicalDateLt
* @param data.startDateGte
* @param data.startDateGt
* @param data.startDateLte
* @param data.startDateLt
* @param data.endDateGte
* @param data.endDateGt
* @param data.endDateLte
* @param data.endDateLt
* @param data.updatedAtGte
* @param data.updatedAtGt
* @param data.updatedAtLte
* @param data.updatedAtLt
* @param data.durationGte
* @param data.durationGt
* @param data.durationLte
* @param data.durationLt
* @param data.taskDisplayNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.taskGroupId Filter by exact task group ID. Returns all tasks within the specified task group.
* @param data.dagIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.runIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.state
* @param data.pool
* @param data.poolNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.queue
* @param data.queueNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.executor
* @param data.versionNumber
* @param data.tryNumber
* @param data.operator
* @param data.operatorNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.mapIndex
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id, state, duration, start_date, end_date, map_index, try_number, logical_date, run_after, data_interval_start, data_interval_end, rendered_map_index, operator, logical_date, run_after, data_interval_start, data_interval_end`
* @returns TaskInstanceCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetTaskInstancesData = (queryClient: QueryClient, { dagId, dagIdPattern, dagRunId, durationGt, durationGte, durationLt, durationLte, endDateGt, endDateGte, endDateLt, endDateLte, executor, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, mapIndex, offset, operator, operatorNamePattern, orderBy, pool, poolNamePattern, queue, queueNamePattern, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runIdPattern, startDateGt, startDateGte, startDateLt, startDateLte, state, taskDisplayNamePattern, taskGroupId, taskId, tryNumber, updatedAtGt, updatedAtGte, updatedAtLt, updatedAtLte, versionNumber }: {
dagId: string;
dagIdPattern?: string;
dagRunId: string;
durationGt?: number;
durationGte?: number;
durationLt?: number;
durationLte?: number;
endDateGt?: string;
endDateGte?: string;
endDateLt?: string;
endDateLte?: string;
executor?: string[];
limit?: number;
logicalDateGt?: string;
logicalDateGte?: string;
logicalDateLt?: string;
logicalDateLte?: string;
mapIndex?: number[];
offset?: number;
operator?: string[];
operatorNamePattern?: string;
orderBy?: string[];
pool?: string[];
poolNamePattern?: string;
queue?: string[];
queueNamePattern?: string;
runAfterGt?: string;
runAfterGte?: string;
runAfterLt?: string;
runAfterLte?: string;
runIdPattern?: string;
startDateGt?: string;
startDateGte?: string;
startDateLt?: string;
startDateLte?: string;
state?: string[];
taskDisplayNamePattern?: string;
taskGroupId?: string;
taskId?: string;
tryNumber?: number[];
updatedAtGt?: string;
updatedAtGte?: string;
updatedAtLt?: string;
updatedAtLte?: string;
versionNumber?: number[];
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetTaskInstancesKeyFn({ dagId, dagIdPattern, dagRunId, durationGt, durationGte, durationLt, durationLte, endDateGt, endDateGte, endDateLt, endDateLte, executor, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, mapIndex, offset, operator, operatorNamePattern, orderBy, pool, poolNamePattern, queue, queueNamePattern, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runIdPattern, startDateGt, startDateGte, startDateLt, startDateLte, state, taskDisplayNamePattern, taskGroupId, taskId, tryNumber, updatedAtGt, updatedAtGte, updatedAtLt, updatedAtLte, versionNumber }), queryFn: () => TaskInstanceService.getTaskInstances({ dagId, dagIdPattern, dagRunId, durationGt, durationGte, durationLt, durationLte, endDateGt, endDateGte, endDateLt, endDateLte, executor, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, mapIndex, offset, operator, operatorNamePattern, orderBy, pool, poolNamePattern, queue, queueNamePattern, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runIdPattern, startDateGt, startDateGte, startDateLt, startDateLte, state, taskDisplayNamePattern, taskGroupId, taskId, tryNumber, updatedAtGt, updatedAtGte, updatedAtLt, updatedAtLte, versionNumber }) });
/**
* Get Task Instance Try Details
* Get task instance details by try number.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.taskTryNumber
* @param data.mapIndex
* @returns TaskInstanceHistoryResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetTaskInstanceTryDetailsData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId, taskTryNumber }: {
dagId: string;
dagRunId: string;
mapIndex?: number;
taskId: string;
taskTryNumber: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetTaskInstanceTryDetailsKeyFn({ dagId, dagRunId, mapIndex, taskId, taskTryNumber }), queryFn: () => TaskInstanceService.getTaskInstanceTryDetails({ dagId, dagRunId, mapIndex, taskId, taskTryNumber }) });
/**
* Get Mapped Task Instance Try Details
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.taskTryNumber
* @param data.mapIndex
* @returns TaskInstanceHistoryResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetMappedTaskInstanceTryDetailsData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId, taskTryNumber }: {
dagId: string;
dagRunId: string;
mapIndex: number;
taskId: string;
taskTryNumber: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetMappedTaskInstanceTryDetailsKeyFn({ dagId, dagRunId, mapIndex, taskId, taskTryNumber }), queryFn: () => TaskInstanceService.getMappedTaskInstanceTryDetails({ dagId, dagRunId, mapIndex, taskId, taskTryNumber }) });
/**
* Get Log
* Get logs for a specific task instance.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.tryNumber
* @param data.fullContent
* @param data.mapIndex
* @param data.token
* @param data.accept
* @returns TaskInstancesLogResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetLogData = (queryClient: QueryClient, { accept, dagId, dagRunId, fullContent, mapIndex, taskId, token, tryNumber }: {
accept?: "application/json" | "*/*" | "application/x-ndjson";
dagId: string;
dagRunId: string;
fullContent?: boolean;
mapIndex?: number;
taskId: string;
token?: string;
tryNumber: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetLogKeyFn({ accept, dagId, dagRunId, fullContent, mapIndex, taskId, token, tryNumber }), queryFn: () => TaskInstanceService.getLog({ accept, dagId, dagRunId, fullContent, mapIndex, taskId, token, tryNumber }) });
/**
* Get External Log Url
* Get external log URL for a specific task instance.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.tryNumber
* @param data.mapIndex
* @returns ExternalLogUrlResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetExternalLogUrlData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId, tryNumber }: {
dagId: string;
dagRunId: string;
mapIndex?: number;
taskId: string;
tryNumber: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetExternalLogUrlKeyFn({ dagId, dagRunId, mapIndex, taskId, tryNumber }), queryFn: () => TaskInstanceService.getExternalLogUrl({ dagId, dagRunId, mapIndex, taskId, tryNumber }) });
/**
* Get Hitl Detail
* Get a Human-in-the-loop detail of a specific task instance.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.mapIndex
* @returns HITLDetail Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetHitlDetailData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId }: {
dagId: string;
dagRunId: string;
mapIndex: number;
taskId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetHitlDetailKeyFn({ dagId, dagRunId, mapIndex, taskId }), queryFn: () => TaskInstanceService.getHitlDetail({ dagId, dagRunId, mapIndex, taskId }) });
/**
* Get Hitl Detail Try Detail
* Get a Human-in-the-loop detail of a specific task instance.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.mapIndex
* @param data.tryNumber
* @returns HITLDetailHistory Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetHitlDetailTryDetailData = (queryClient: QueryClient, { dagId, dagRunId, mapIndex, taskId, tryNumber }: {
dagId: string;
dagRunId: string;
mapIndex: number;
taskId: string;
tryNumber: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetHitlDetailTryDetailKeyFn({ dagId, dagRunId, mapIndex, taskId, tryNumber }), queryFn: () => TaskInstanceService.getHitlDetailTryDetail({ dagId, dagRunId, mapIndex, taskId, tryNumber }) });
/**
* Get Hitl Details
* Get Human-in-the-loop details.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `ti_id, subject, responded_at, created_at, responded_by_user_id, responded_by_user_name, dag_id, run_id, task_display_name, run_after, rendered_map_index, task_instance_operator, task_instance_state`
* @param data.dagIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.taskId
* @param data.taskIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.mapIndex
* @param data.state
* @param data.responseReceived
* @param data.respondedByUserId
* @param data.respondedByUserName
* @param data.subjectSearch SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.bodySearch SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.createdAtGte
* @param data.createdAtGt
* @param data.createdAtLte
* @param data.createdAtLt
* @returns HITLDetailCollection Successful Response
* @throws ApiError
*/
export const ensureUseTaskInstanceServiceGetHitlDetailsData = (queryClient: QueryClient, { bodySearch, createdAtGt, createdAtGte, createdAtLt, createdAtLte, dagId, dagIdPattern, dagRunId, limit, mapIndex, offset, orderBy, respondedByUserId, respondedByUserName, responseReceived, state, subjectSearch, taskId, taskIdPattern }: {
bodySearch?: string;
createdAtGt?: string;
createdAtGte?: string;
createdAtLt?: string;
createdAtLte?: string;
dagId: string;
dagIdPattern?: string;
dagRunId: string;
limit?: number;
mapIndex?: number;
offset?: number;
orderBy?: string[];
respondedByUserId?: string[];
respondedByUserName?: string[];
responseReceived?: boolean;
state?: string[];
subjectSearch?: string;
taskId?: string;
taskIdPattern?: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskInstanceServiceGetHitlDetailsKeyFn({ bodySearch, createdAtGt, createdAtGte, createdAtLt, createdAtLte, dagId, dagIdPattern, dagRunId, limit, mapIndex, offset, orderBy, respondedByUserId, respondedByUserName, responseReceived, state, subjectSearch, taskId, taskIdPattern }), queryFn: () => TaskInstanceService.getHitlDetails({ bodySearch, createdAtGt, createdAtGte, createdAtLt, createdAtLte, dagId, dagIdPattern, dagRunId, limit, mapIndex, offset, orderBy, respondedByUserId, respondedByUserName, responseReceived, state, subjectSearch, taskId, taskIdPattern }) });
/**
* Get Import Error
* Get an import error.
* @param data The data for the request.
* @param data.importErrorId
* @returns ImportErrorResponse Successful Response
* @throws ApiError
*/
export const ensureUseImportErrorServiceGetImportErrorData = (queryClient: QueryClient, { importErrorId }: {
importErrorId: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseImportErrorServiceGetImportErrorKeyFn({ importErrorId }), queryFn: () => ImportErrorService.getImportError({ importErrorId }) });
/**
* Get Import Errors
* Get all import errors.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id, timestamp, filename, bundle_name, stacktrace, import_error_id`
* @param data.filenamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @returns ImportErrorCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseImportErrorServiceGetImportErrorsData = (queryClient: QueryClient, { filenamePattern, limit, offset, orderBy }: {
filenamePattern?: string;
limit?: number;
offset?: number;
orderBy?: string[];
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseImportErrorServiceGetImportErrorsKeyFn({ filenamePattern, limit, offset, orderBy }), queryFn: () => ImportErrorService.getImportErrors({ filenamePattern, limit, offset, orderBy }) });
/**
* Get Jobs
* Get all jobs.
* @param data The data for the request.
* @param data.isAlive
* @param data.startDateGte
* @param data.startDateGt
* @param data.startDateLte
* @param data.startDateLt
* @param data.endDateGte
* @param data.endDateGt
* @param data.endDateLte
* @param data.endDateLt
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id, dag_id, state, job_type, start_date, end_date, latest_heartbeat, executor_class, hostname, unixname`
* @param data.jobState
* @param data.jobType
* @param data.hostname
* @param data.executorClass
* @returns JobCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseJobServiceGetJobsData = (queryClient: QueryClient, { endDateGt, endDateGte, endDateLt, endDateLte, executorClass, hostname, isAlive, jobState, jobType, limit, offset, orderBy, startDateGt, startDateGte, startDateLt, startDateLte }: {
endDateGt?: string;
endDateGte?: string;
endDateLt?: string;
endDateLte?: string;
executorClass?: string;
hostname?: string;
isAlive?: boolean;
jobState?: string;
jobType?: string;
limit?: number;
offset?: number;
orderBy?: string[];
startDateGt?: string;
startDateGte?: string;
startDateLt?: string;
startDateLte?: string;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseJobServiceGetJobsKeyFn({ endDateGt, endDateGte, endDateLt, endDateLte, executorClass, hostname, isAlive, jobState, jobType, limit, offset, orderBy, startDateGt, startDateGte, startDateLt, startDateLte }), queryFn: () => JobService.getJobs({ endDateGt, endDateGte, endDateLt, endDateLte, executorClass, hostname, isAlive, jobState, jobType, limit, offset, orderBy, startDateGt, startDateGte, startDateLt, startDateLte }) });
/**
* Get Plugins
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @returns PluginCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUsePluginServiceGetPluginsData = (queryClient: QueryClient, { limit, offset }: {
limit?: number;
offset?: number;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UsePluginServiceGetPluginsKeyFn({ limit, offset }), queryFn: () => PluginService.getPlugins({ limit, offset }) });
/**
* Import Errors
* @returns PluginImportErrorCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUsePluginServiceImportErrorsData = (queryClient: QueryClient) => queryClient.ensureQueryData({ queryKey: Common.UsePluginServiceImportErrorsKeyFn(), queryFn: () => PluginService.importErrors() });
/**
* Get Pool
* Get a pool.
* @param data The data for the request.
* @param data.poolName
* @returns PoolResponse Successful Response
* @throws ApiError
*/
export const ensureUsePoolServiceGetPoolData = (queryClient: QueryClient, { poolName }: {
poolName: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UsePoolServiceGetPoolKeyFn({ poolName }), queryFn: () => PoolService.getPool({ poolName }) });
/**
* Get Pools
* Get all pools entries.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id, pool, name`
* @param data.poolNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @returns PoolCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUsePoolServiceGetPoolsData = (queryClient: QueryClient, { limit, offset, orderBy, poolNamePattern }: {
limit?: number;
offset?: number;
orderBy?: string[];
poolNamePattern?: string;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UsePoolServiceGetPoolsKeyFn({ limit, offset, orderBy, poolNamePattern }), queryFn: () => PoolService.getPools({ limit, offset, orderBy, poolNamePattern }) });
/**
* Get Providers
* Get providers.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @returns ProviderCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseProviderServiceGetProvidersData = (queryClient: QueryClient, { limit, offset }: {
limit?: number;
offset?: number;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseProviderServiceGetProvidersKeyFn({ limit, offset }), queryFn: () => ProviderService.getProviders({ limit, offset }) });
/**
* Get Xcom Entry
* Get an XCom entry.
* @param data The data for the request.
* @param data.dagId
* @param data.taskId
* @param data.dagRunId
* @param data.xcomKey
* @param data.mapIndex
* @param data.deserialize
* @param data.stringify
* @returns unknown Successful Response
* @throws ApiError
*/
export const ensureUseXcomServiceGetXcomEntryData = (queryClient: QueryClient, { dagId, dagRunId, deserialize, mapIndex, stringify, taskId, xcomKey }: {
dagId: string;
dagRunId: string;
deserialize?: boolean;
mapIndex?: number;
stringify?: boolean;
taskId: string;
xcomKey: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseXcomServiceGetXcomEntryKeyFn({ dagId, dagRunId, deserialize, mapIndex, stringify, taskId, xcomKey }), queryFn: () => XcomService.getXcomEntry({ dagId, dagRunId, deserialize, mapIndex, stringify, taskId, xcomKey }) });
/**
* Get Xcom Entries
* Get all XCom entries.
*
* This endpoint allows specifying `~` as the dag_id, dag_run_id, task_id to retrieve XCom entries for all DAGs.
* @param data The data for the request.
* @param data.dagId
* @param data.dagRunId
* @param data.taskId
* @param data.xcomKey
* @param data.mapIndex
* @param data.limit
* @param data.offset
* @param data.xcomKeyPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.dagDisplayNamePattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.runIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.taskIdPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @param data.mapIndexFilter
* @param data.logicalDateGte
* @param data.logicalDateGt
* @param data.logicalDateLte
* @param data.logicalDateLt
* @param data.runAfterGte
* @param data.runAfterGt
* @param data.runAfterLte
* @param data.runAfterLt
* @returns XComCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseXcomServiceGetXcomEntriesData = (queryClient: QueryClient, { dagDisplayNamePattern, dagId, dagRunId, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, mapIndex, mapIndexFilter, offset, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runIdPattern, taskId, taskIdPattern, xcomKey, xcomKeyPattern }: {
dagDisplayNamePattern?: string;
dagId: string;
dagRunId: string;
limit?: number;
logicalDateGt?: string;
logicalDateGte?: string;
logicalDateLt?: string;
logicalDateLte?: string;
mapIndex?: number;
mapIndexFilter?: number;
offset?: number;
runAfterGt?: string;
runAfterGte?: string;
runAfterLt?: string;
runAfterLte?: string;
runIdPattern?: string;
taskId: string;
taskIdPattern?: string;
xcomKey?: string;
xcomKeyPattern?: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseXcomServiceGetXcomEntriesKeyFn({ dagDisplayNamePattern, dagId, dagRunId, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, mapIndex, mapIndexFilter, offset, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runIdPattern, taskId, taskIdPattern, xcomKey, xcomKeyPattern }), queryFn: () => XcomService.getXcomEntries({ dagDisplayNamePattern, dagId, dagRunId, limit, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte, mapIndex, mapIndexFilter, offset, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runIdPattern, taskId, taskIdPattern, xcomKey, xcomKeyPattern }) });
/**
* Get Tasks
* Get tasks for DAG.
* @param data The data for the request.
* @param data.dagId
* @param data.orderBy
* @returns TaskCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskServiceGetTasksData = (queryClient: QueryClient, { dagId, orderBy }: {
dagId: string;
orderBy?: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskServiceGetTasksKeyFn({ dagId, orderBy }), queryFn: () => TaskService.getTasks({ dagId, orderBy }) });
/**
* Get Task
* Get simplified representation of a task.
* @param data The data for the request.
* @param data.dagId
* @param data.taskId
* @returns TaskResponse Successful Response
* @throws ApiError
*/
export const ensureUseTaskServiceGetTaskData = (queryClient: QueryClient, { dagId, taskId }: {
dagId: string;
taskId: unknown;
}) => queryClient.ensureQueryData({ queryKey: Common.UseTaskServiceGetTaskKeyFn({ dagId, taskId }), queryFn: () => TaskService.getTask({ dagId, taskId }) });
/**
* Get Variable
* Get a variable entry.
* @param data The data for the request.
* @param data.variableKey
* @returns VariableResponse Successful Response
* @throws ApiError
*/
export const ensureUseVariableServiceGetVariableData = (queryClient: QueryClient, { variableKey }: {
variableKey: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseVariableServiceGetVariableKeyFn({ variableKey }), queryFn: () => VariableService.getVariable({ variableKey }) });
/**
* Get Variables
* Get all Variables entries.
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `key, id, _val, description, is_encrypted, team_name`
* @param data.variableKeyPattern SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @returns VariableCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseVariableServiceGetVariablesData = (queryClient: QueryClient, { limit, offset, orderBy, variableKeyPattern }: {
limit?: number;
offset?: number;
orderBy?: string[];
variableKeyPattern?: string;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseVariableServiceGetVariablesKeyFn({ limit, offset, orderBy, variableKeyPattern }), queryFn: () => VariableService.getVariables({ limit, offset, orderBy, variableKeyPattern }) });
/**
* Get Dag Version
* Get one Dag Version.
* @param data The data for the request.
* @param data.dagId
* @param data.versionNumber
* @returns DagVersionResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagVersionServiceGetDagVersionData = (queryClient: QueryClient, { dagId, versionNumber }: {
dagId: string;
versionNumber: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseDagVersionServiceGetDagVersionKeyFn({ dagId, versionNumber }), queryFn: () => DagVersionService.getDagVersion({ dagId, versionNumber }) });
/**
* Get Dag Versions
* Get all DAG Versions.
*
* This endpoint allows specifying `~` as the dag_id to retrieve DAG Versions for all DAGs.
* @param data The data for the request.
* @param data.dagId
* @param data.limit
* @param data.offset
* @param data.versionNumber
* @param data.bundleName
* @param data.bundleVersion
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `id, version_number, bundle_name, bundle_version`
* @returns DAGVersionCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseDagVersionServiceGetDagVersionsData = (queryClient: QueryClient, { bundleName, bundleVersion, dagId, limit, offset, orderBy, versionNumber }: {
bundleName?: string;
bundleVersion?: string;
dagId: string;
limit?: number;
offset?: number;
orderBy?: string[];
versionNumber?: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseDagVersionServiceGetDagVersionsKeyFn({ bundleName, bundleVersion, dagId, limit, offset, orderBy, versionNumber }), queryFn: () => DagVersionService.getDagVersions({ bundleName, bundleVersion, dagId, limit, offset, orderBy, versionNumber }) });
/**
* Get Health
* @returns HealthInfoResponse Successful Response
* @throws ApiError
*/
export const ensureUseMonitorServiceGetHealthData = (queryClient: QueryClient) => queryClient.ensureQueryData({ queryKey: Common.UseMonitorServiceGetHealthKeyFn(), queryFn: () => MonitorService.getHealth() });
/**
* Get Version
* Get version information.
* @returns VersionInfo Successful Response
* @throws ApiError
*/
export const ensureUseVersionServiceGetVersionData = (queryClient: QueryClient) => queryClient.ensureQueryData({ queryKey: Common.UseVersionServiceGetVersionKeyFn(), queryFn: () => VersionService.getVersion() });
/**
* Login
* Redirect to the login URL depending on the AuthManager configured.
* @param data The data for the request.
* @param data.next
* @returns unknown Successful Response
* @throws ApiError
*/
export const ensureUseLoginServiceLoginData = (queryClient: QueryClient, { next }: {
next?: string;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseLoginServiceLoginKeyFn({ next }), queryFn: () => LoginService.login({ next }) });
/**
* Logout
* Logout the user.
* @returns unknown Successful Response
* @throws ApiError
*/
export const ensureUseLoginServiceLogoutData = (queryClient: QueryClient) => queryClient.ensureQueryData({ queryKey: Common.UseLoginServiceLogoutKeyFn(), queryFn: () => LoginService.logout() });
/**
* Get Auth Menus
* @returns MenuItemCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseAuthLinksServiceGetAuthMenusData = (queryClient: QueryClient) => queryClient.ensureQueryData({ queryKey: Common.UseAuthLinksServiceGetAuthMenusKeyFn(), queryFn: () => AuthLinksService.getAuthMenus() });
/**
* Get Current User Info
* Convienently get the current authenticated user information.
* @returns AuthenticatedMeResponse Successful Response
* @throws ApiError
*/
export const ensureUseAuthLinksServiceGetCurrentUserInfoData = (queryClient: QueryClient) => queryClient.ensureQueryData({ queryKey: Common.UseAuthLinksServiceGetCurrentUserInfoKeyFn(), queryFn: () => AuthLinksService.getCurrentUserInfo() });
/**
* Get Dependencies
* Dependencies graph.
* @param data The data for the request.
* @param data.nodeId
* @param data.dependencyType
* @returns BaseGraphResponse Successful Response
* @throws ApiError
*/
export const ensureUseDependenciesServiceGetDependenciesData = (queryClient: QueryClient, { dependencyType, nodeId }: {
dependencyType?: "scheduling" | "data";
nodeId?: string;
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseDependenciesServiceGetDependenciesKeyFn({ dependencyType, nodeId }), queryFn: () => DependenciesService.getDependencies({ dependencyType, nodeId }) });
/**
* Historical Metrics
* Return cluster activity historical metrics.
* @param data The data for the request.
* @param data.startDate
* @param data.endDate
* @returns HistoricalMetricDataResponse Successful Response
* @throws ApiError
*/
export const ensureUseDashboardServiceHistoricalMetricsData = (queryClient: QueryClient, { endDate, startDate }: {
endDate?: string;
startDate: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseDashboardServiceHistoricalMetricsKeyFn({ endDate, startDate }), queryFn: () => DashboardService.historicalMetrics({ endDate, startDate }) });
/**
* Dag Stats
* Return basic DAG stats with counts of DAGs in various states.
* @returns DashboardDagStatsResponse Successful Response
* @throws ApiError
*/
export const ensureUseDashboardServiceDagStatsData = (queryClient: QueryClient) => queryClient.ensureQueryData({ queryKey: Common.UseDashboardServiceDagStatsKeyFn(), queryFn: () => DashboardService.dagStats() });
/**
* Structure Data
* Get Structure Data.
* @param data The data for the request.
* @param data.dagId
* @param data.includeUpstream
* @param data.includeDownstream
* @param data.depth
* @param data.root
* @param data.externalDependencies
* @param data.versionNumber
* @returns StructureDataResponse Successful Response
* @throws ApiError
*/
export const ensureUseStructureServiceStructureDataData = (queryClient: QueryClient, { dagId, depth, externalDependencies, includeDownstream, includeUpstream, root, versionNumber }: {
dagId: string;
depth?: number;
externalDependencies?: boolean;
includeDownstream?: boolean;
includeUpstream?: boolean;
root?: string;
versionNumber?: number;
}) => queryClient.ensureQueryData({ queryKey: Common.UseStructureServiceStructureDataKeyFn({ dagId, depth, externalDependencies, includeDownstream, includeUpstream, root, versionNumber }), queryFn: () => StructureService.structureData({ dagId, depth, externalDependencies, includeDownstream, includeUpstream, root, versionNumber }) });
/**
* Get Dag Structure
* Return dag structure for grid view.
* @param data The data for the request.
* @param data.dagId
* @param data.includeUpstream
* @param data.includeDownstream
* @param data.depth
* @param data.root
* @param data.offset
* @param data.limit
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `run_after, logical_date, start_date, end_date`
* @param data.runAfterGte
* @param data.runAfterGt
* @param data.runAfterLte
* @param data.runAfterLt
* @param data.runType
* @param data.state
* @param data.triggeringUser SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @returns GridNodeResponse Successful Response
* @throws ApiError
*/
export const ensureUseGridServiceGetDagStructureData = (queryClient: QueryClient, { dagId, depth, includeDownstream, includeUpstream, limit, offset, orderBy, root, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runType, state, triggeringUser }: {
dagId: string;
depth?: number;
includeDownstream?: boolean;
includeUpstream?: boolean;
limit?: number;
offset?: number;
orderBy?: string[];
root?: string;
runAfterGt?: string;
runAfterGte?: string;
runAfterLt?: string;
runAfterLte?: string;
runType?: string[];
state?: string[];
triggeringUser?: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseGridServiceGetDagStructureKeyFn({ dagId, depth, includeDownstream, includeUpstream, limit, offset, orderBy, root, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runType, state, triggeringUser }), queryFn: () => GridService.getDagStructure({ dagId, depth, includeDownstream, includeUpstream, limit, offset, orderBy, root, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runType, state, triggeringUser }) });
/**
* Get Grid Runs
* Get info about a run for the grid.
* @param data The data for the request.
* @param data.dagId
* @param data.offset
* @param data.limit
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `run_after, logical_date, start_date, end_date`
* @param data.runAfterGte
* @param data.runAfterGt
* @param data.runAfterLte
* @param data.runAfterLt
* @param data.runType
* @param data.state
* @param data.triggeringUser SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). or the pipe `|` operator for OR logic (e.g. `dag1 | dag2`). Regular expressions are **not** supported.
* @returns GridRunsResponse Successful Response
* @throws ApiError
*/
export const ensureUseGridServiceGetGridRunsData = (queryClient: QueryClient, { dagId, limit, offset, orderBy, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runType, state, triggeringUser }: {
dagId: string;
limit?: number;
offset?: number;
orderBy?: string[];
runAfterGt?: string;
runAfterGte?: string;
runAfterLt?: string;
runAfterLte?: string;
runType?: string[];
state?: string[];
triggeringUser?: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseGridServiceGetGridRunsKeyFn({ dagId, limit, offset, orderBy, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runType, state, triggeringUser }), queryFn: () => GridService.getGridRuns({ dagId, limit, offset, orderBy, runAfterGt, runAfterGte, runAfterLt, runAfterLte, runType, state, triggeringUser }) });
/**
* Get Grid Ti Summaries
* Get states for TIs / "groups" of TIs.
*
* Essentially this is to know what color to put in the squares in the grid.
*
* The tricky part here is that we aggregate the state for groups and mapped tasks.
*
* We don't add all the TIs for mapped TIs -- we only add one entry for the mapped task and
* its state is an aggregate of its TI states.
*
* And for task groups, we add a "task" for that which is not really a task but is just
* an entry that represents the group (so that we can show a filled in box when the group
* is not expanded) and its state is an agg of those within it.
* @param data The data for the request.
* @param data.dagId
* @param data.runId
* @returns GridTISummaries Successful Response
* @throws ApiError
*/
export const ensureUseGridServiceGetGridTiSummariesData = (queryClient: QueryClient, { dagId, runId }: {
dagId: string;
runId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseGridServiceGetGridTiSummariesKeyFn({ dagId, runId }), queryFn: () => GridService.getGridTiSummaries({ dagId, runId }) });
/**
* Get Gantt Data
* Get all task instance tries for Gantt chart.
* @param data The data for the request.
* @param data.dagId
* @param data.runId
* @returns GanttResponse Successful Response
* @throws ApiError
*/
export const ensureUseGanttServiceGetGanttDataData = (queryClient: QueryClient, { dagId, runId }: {
dagId: string;
runId: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseGanttServiceGetGanttDataKeyFn({ dagId, runId }), queryFn: () => GanttService.getGanttData({ dagId, runId }) });
/**
* Get Calendar
* Get calendar data for a DAG including historical and planned DAG runs.
* @param data The data for the request.
* @param data.dagId
* @param data.granularity
* @param data.logicalDateGte
* @param data.logicalDateGt
* @param data.logicalDateLte
* @param data.logicalDateLt
* @returns CalendarTimeRangeCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseCalendarServiceGetCalendarData = (queryClient: QueryClient, { dagId, granularity, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte }: {
dagId: string;
granularity?: "hourly" | "daily";
logicalDateGt?: string;
logicalDateGte?: string;
logicalDateLt?: string;
logicalDateLte?: string;
}) => queryClient.ensureQueryData({ queryKey: Common.UseCalendarServiceGetCalendarKeyFn({ dagId, granularity, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte }), queryFn: () => CalendarService.getCalendar({ dagId, granularity, logicalDateGt, logicalDateGte, logicalDateLt, logicalDateLte }) });
/**
* List Teams
* @param data The data for the request.
* @param data.limit
* @param data.offset
* @param data.orderBy Attributes to order by, multi criteria sort is supported. Prefix with `-` for descending order. Supported attributes: `name`
* @returns TeamCollectionResponse Successful Response
* @throws ApiError
*/
export const ensureUseTeamsServiceListTeamsData = (queryClient: QueryClient, { limit, offset, orderBy }: {
limit?: number;
offset?: number;
orderBy?: string[];
} = {}) => queryClient.ensureQueryData({ queryKey: Common.UseTeamsServiceListTeamsKeyFn({ limit, offset, orderBy }), queryFn: () => TeamsService.listTeams({ limit, offset, orderBy }) }); | typescript | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/openapi-gen/queries/ensureQueryData.ts |
from io import StringIO
import flask
def test_suppressed_exception_logging():
class SuppressedFlask(flask.Flask):
def log_exception(self, ctx, exc_info):
pass
out = StringIO()
app = SuppressedFlask(__name__)
@app.route("/")
def index():
raise Exception("test")
rv = app.test_client().get("/", errors_stream=out)
assert rv.status_code == 500
assert b"Internal Server Error" in rv.data
assert not out.getvalue() | python | github | https://github.com/pallets/flask | tests/test_subclassing.py |
"""SCons.Tool.cyglink
Customization of gnulink for Cygwin (http://www.cygwin.com/)
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
import re
import os
import SCons.Action
import SCons.Util
import SCons.Tool
import gnulink
import link
def _lib_generator(target, source, env, for_signature, **kw):
try: cmd = kw['cmd']
except KeyError: cmd = SCons.Util.CLVar(['$SHLINK'])
try: vp = kw['varprefix']
except KeyError: vp = 'SHLIB'
dll = env.FindIxes(target, '%sPREFIX' % vp, '%sSUFFIX' % vp)
if dll: cmd.extend(['-o', dll])
cmd.extend(['$SHLINKFLAGS', '$__%sVERSIONFLAGS' % vp, '$__RPATH'])
implib = env.FindIxes(target, 'IMPLIBPREFIX', 'IMPLIBSUFFIX')
if implib:
cmd.extend([
'-Wl,--out-implib='+implib.get_string(for_signature),
'-Wl,--export-all-symbols',
'-Wl,--enable-auto-import',
'-Wl,--whole-archive', '$SOURCES',
'-Wl,--no-whole-archive', '$_LIBDIRFLAGS', '$_LIBFLAGS'
])
else:
cmd.extend(['$SOURCES', '$_LIBDIRFLAGS', '$_LIBFLAGS'])
return [cmd]
def shlib_generator(target, source, env, for_signature):
return _lib_generator(target, source, env, for_signature,
varprefix='SHLIB',
cmd = SCons.Util.CLVar(['$SHLINK']))
def ldmod_generator(target, source, env, for_signature):
return _lib_generator(target, source, env, for_signature,
varprefix='LDMODULE',
cmd = SCons.Util.CLVar(['$LDMODULE']))
def _lib_emitter(target, source, env, **kw):
Verbose = False
if Verbose:
print "_lib_emitter: target[0]=%r" % target[0].get_path()
try: vp = kw['varprefix']
except KeyError: vp = 'SHLIB'
try: libtype = kw['libtype']
except KeyError: libtype = 'ShLib'
dll = env.FindIxes(target, '%sPREFIX' % vp, '%sSUFFIX' % vp)
no_import_lib = env.get('no_import_lib', 0)
if Verbose:
print "_lib_emitter: dll=%r" % dll.get_path()
if not dll or len(target) > 1:
raise SCons.Errors.UserError("A shared library should have exactly one target with the suffix: %s" % env.subst("$%sSUFFIX" % vp))
# Remove any "lib" after the prefix
pre = env.subst('$%sPREFIX' % vp)
if dll.name[len(pre):len(pre)+3] == 'lib':
dll.name = pre + dll.name[len(pre)+3:]
if Verbose:
print "_lib_emitter: dll.name=%r" % dll.name
orig_target = target
target = [env.fs.File(dll)]
target[0].attributes.shared = 1
if Verbose:
print "_lib_emitter: after target=[env.fs.File(dll)]: target[0]=%r" % target[0].get_path()
# Append an import lib target
if not no_import_lib:
# Create list of target libraries as strings
target_strings = env.ReplaceIxes(orig_target[0],
'%sPREFIX' % vp, '%sSUFFIX' % vp,
'IMPLIBPREFIX', 'IMPLIBSUFFIX')
if Verbose:
print "_lib_emitter: target_strings=%r" % target_strings
implib_target = env.fs.File(target_strings)
if Verbose:
print "_lib_emitter: implib_target=%r" % implib_target.get_path()
implib_target.attributes.shared = 1
target.append(implib_target)
symlinks = SCons.Tool.ImpLibSymlinkGenerator(env, implib_target,
implib_libtype=libtype,
generator_libtype=libtype+'ImpLib')
if Verbose:
print "_lib_emitter: implib symlinks=%r" % SCons.Tool.StringizeLibSymlinks(symlinks)
if symlinks:
SCons.Tool.EmitLibSymlinks(env, symlinks, implib_target, clean_targets = target[0])
implib_target.attributes.shliblinks = symlinks
return (target, source)
def shlib_emitter(target, source, env):
return _lib_emitter(target, source, env, varprefix='SHLIB', libtype='ShLib')
def ldmod_emitter(target, source, env):
return _lib_emitter(target, source, env, varprefix='LDMODULE', libtype='LdMod')
def _versioned_lib_suffix(env, suffix, version):
"""Generate versioned shared library suffix from a unversioned one.
If suffix='.dll', and version='0.1.2', then it returns '-0-1-2.dll'"""
Verbose = False
if Verbose:
print "_versioned_lib_suffix: suffix= ", suffix
print "_versioned_lib_suffix: version= ", version
cygversion = re.sub('\.', '-', version)
if not suffix.startswith('-' + cygversion):
suffix = '-' + cygversion + suffix
if Verbose:
print "_versioned_lib_suffix: return suffix= ", suffix
return suffix
def _versioned_implib_name(env, libnode, version, prefix, suffix, **kw):
return link._versioned_lib_name(env, libnode, version, prefix, suffix,
SCons.Tool.ImpLibPrefixGenerator,
SCons.Tool.ImpLibSuffixGenerator,
implib_libtype=kw['libtype'])
def _versioned_implib_symlinks(env, libnode, version, prefix, suffix, **kw):
"""Generate link names that should be created for a versioned shared library.
Returns a list in the form [ (link, linktarget), ... ]
"""
Verbose = False
if Verbose:
print "_versioned_implib_symlinks: libnode=%r" % libnode.get_path()
print "_versioned_implib_symlinks: version=%r" % version
try: libtype = kw['libtype']
except KeyError: libtype = 'ShLib'
linkdir = os.path.dirname(libnode.get_path())
if Verbose:
print "_versioned_implib_symlinks: linkdir=%r" % linkdir
name = SCons.Tool.ImpLibNameGenerator(env, libnode,
implib_libtype=libtype,
generator_libtype=libtype+'ImpLib')
if Verbose:
print "_versioned_implib_symlinks: name=%r" % name
major = version.split('.')[0]
link0 = env.fs.File(os.path.join(linkdir, name))
symlinks = [(link0, libnode)]
if Verbose:
print "_versioned_implib_symlinks: return symlinks=%r" % SCons.Tool.StringizeLibSymlinks(symlinks)
return symlinks
shlib_action = SCons.Action.Action(shlib_generator, generator=1)
ldmod_action = SCons.Action.Action(ldmod_generator, generator=1)
def generate(env):
"""Add Builders and construction variables for cyglink to an Environment."""
gnulink.generate(env)
env['LINKFLAGS'] = SCons.Util.CLVar('-Wl,-no-undefined')
env['SHLINKCOM'] = shlib_action
env['LDMODULECOM'] = ldmod_action
env.Append(SHLIBEMITTER = [shlib_emitter])
env.Append(LDMODULEEMITTER = [ldmod_emitter])
env['SHLIBPREFIX'] = 'cyg'
env['SHLIBSUFFIX'] = '.dll'
env['IMPLIBPREFIX'] = 'lib'
env['IMPLIBSUFFIX'] = '.dll.a'
# Variables used by versioned shared libraries
env['_SHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS'
env['_LDMODULEVERSIONFLAGS'] = '$LDMODULEVERSIONFLAGS'
# SHLIBVERSIONFLAGS and LDMODULEVERSIONFLAGS are same as in gnulink...
# LINKCALLBACKS are NOT inherited from gnulink
env['LINKCALLBACKS'] = {
'VersionedShLibSuffix' : _versioned_lib_suffix,
'VersionedLdModSuffix' : _versioned_lib_suffix,
'VersionedImpLibSuffix' : _versioned_lib_suffix,
'VersionedShLibName' : link._versioned_shlib_name,
'VersionedLdModName' : link._versioned_ldmod_name,
'VersionedShLibImpLibName' : lambda *args: _versioned_implib_name(*args, libtype='ShLib'),
'VersionedLdModImpLibName' : lambda *args: _versioned_implib_name(*args, libtype='LdMod'),
'VersionedShLibImpLibSymlinks' : lambda *args: _versioned_implib_symlinks(*args, libtype='ShLib'),
'VersionedLdModImpLibSymlinks' : lambda *args: _versioned_implib_symlinks(*args, libtype='LdMod'),
}
# these variables were set by gnulink but are not used in cyglink
try: del env['_SHLIBSONAME']
except KeyError: pass
try: del env['_LDMODULESONAME']
except KeyError: pass
def exists(env):
return gnulink.exists(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
- hosts: localhost
gather_facts: false
vars_files:
- vaulted.yml
tasks:
- name: see if we can decrypt
assert:
that:
- control is defined
- symlink == 'this is a test' | unknown | github | https://github.com/ansible/ansible | test/integration/targets/ansible-vault/symlink.yml |
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<p align="center">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/transformers-logo-dark.svg">
<source media="(prefers-color-scheme: light)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/transformers-logo-light.svg">
<img alt="Hugging Face Transformers Library" src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/transformers-logo-light.svg" width="352" height="59" style="max-width: 100%;">
</picture>
<br/>
<br/>
</p>
<p align="center">
<a href="https://huggingface.com/models"><img alt="Checkpoints on Hub" src="https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/models&color=brightgreen"></a>
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
</p>
<h4 align="center">
<p>
<a href="https://github.com/huggingface/transformers/blob/main/README.md">English</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Português</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_it.md">Italiano</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ar.md">العربية</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ur.md">اردو</a> |
<b>বাংলা</b> |
</p>
</h4>
<h3 align="center">
<p>ইনফারেন্স ও ট্রেনিংয়ের জন্য আধুনিকতম (State-of-the-art) প্রি-ট্রেইন্ড মডেলসমূহ</p>
</h3>
<h3 align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/transformers_as_a_model_definition.png"/>
</h3>
**Transformers** হলো একটা ফ্রেমওয়ার্ক যেটা দিয়ে টেক্সট, কম্পিউটার ভিশন, অডিও, ভিডিও আর মাল্টিমোডাল—সব ধরনের মডেল তৈরি আর চালানো যায়। এটা ট্রেইনিং আর ইনফারেন্স – দুই কাজেই ব্যবহার করা হয়।
Transformers মডেলের ডেফিনিশন এক জায়গায় রাখে। এর মানে হলো, একবার কোনো মডেল `transformers`-এ সাপোর্ট পেলেই সেটা সহজে বিভিন্ন ট্রেইনিং ফ্রেমওয়ার্ক (Axolotl, Unsloth, DeepSpeed, FSDP, PyTorch-Lightning ইত্যাদি), ইনফারেন্স ইঞ্জিন (vLLM, SGLang, TGI ইত্যাদি) আর অন্যান্য লাইব্রেরি (llama.cpp, mlx ইত্যাদি)-তে ব্যবহার করা যায়।
আমরা চাই নতুন আর আধুনিক মডেলগুলো সবাই ব্যবহার করতে পারে। তাই মডেলের ডেফিনিশন রাখা হয়েছে সহজ, কাস্টমাইজযোগ্য আর পারফরম্যান্স-ফ্রেন্ডলি।
এখন পর্যন্ত [Hugging Face Hub](https://huggingface.com/models)-এ ১০ লাখেরও বেশি Transformers [মডেল চেকপয়েন্ট](https://huggingface.co/models?library=transformers&sort=trending) আছে, যেগুলো যেকোনো সময় ব্যবহার করা যায়।
আজই [Hub](https://huggingface.com/) থেকে একটা মডেল বেছে নিন আর Transformers দিয়ে শুরু করুন।
## ইনস্টলেশন
Transformers Python 3.10+ সহ কাজ করে, এবং [PyTorch](https://pytorch.org/get-started/locally/) 2.4+।
[venv](https://docs.python.org/3/library/venv.html) বা [uv](https://docs.astral.sh/uv/) ব্যবহার করে একটি ভার্চুয়াল এনভায়রনমেন্ট তৈরি এবং সক্রিয় করুন।
```py
# venv
python -m venv .my-env
source .my-env/bin/activate
# uv
uv venv .my-env
source .my-env/bin/activate
```
আপনার ভার্চুয়াল পরিবেশে Transformers ইনস্টল করুন।
```py
# pip
pip install "transformers[torch]"
# uv
uv pip install "transformers[torch]"
```
যদি আপনি লাইব্রেরির সর্বশেষ পরিবর্তনগুলি চান বা অবদান রাখতে আগ্রহী হন তবে উৎস থেকে Transformers ইনস্টল করুন। তবে, সর্বশেষ সংস্করণটি স্থিতিশীল নাও হতে পারে। যদি আপনি কোনো ত্রুটির সম্মুখীন হন তবে নির্দ্বিধায় একটি [issue](https://github.com/huggingface/transformers/issues) খুলুন।
```Shell
git clone [https://github.com/huggingface/transformers.git](https://github.com/huggingface/transformers.git)
cd transformers
# pip
pip install .[torch]
# uv
uv pip install .[torch]
```
## কুইকস্টার্ট
Transformers ব্যবহার শুরু করুন এখনই [Pipeline](https://huggingface.co/docs/transformers/pipeline_tutorial) API দিয়ে। `Pipeline` হলো একটি হাই-লেভেল ইনফারেন্স ক্লাস, যা টেক্সট, অডিও, ভিশন এবং মাল্টিমোডাল টাস্ক সাপোর্ট করে। এটি ইনপুট প্রিপ্রসেসিং করে এবং সঠিক আউটপুট রিটার্ন করে।
একটি পাইপলাইন তৈরি করুন এবং টেক্সট জেনারেশনের জন্য কোন মডেল ব্যবহার করবেন তা নির্দিষ্ট করুন। মডেলটি ডাউনলোড হয়ে ক্যাশে রাখা হবে, ফলে পরে সহজেই আবার ব্যবহার করতে পারবেন। সবশেষে, মডেলকে প্রম্পট করার জন্য কিছু টেক্সট দিন।
```py
from transformers import pipeline
pipeline = pipeline(task="text-generation", model="Qwen/Qwen2.5-1.5B")
pipeline("the secret to baking a really good cake is ")
[{'generated_text': 'the secret to baking a really good cake is 1) to use the right ingredients and 2) to follow the recipe exactly. the recipe for the cake is as follows: 1 cup of sugar, 1 cup of flour, 1 cup of milk, 1 cup of butter, 1 cup of eggs, 1 cup of chocolate chips. if you want to make 2 cakes, how much sugar do you need? To make 2 cakes, you will need 2 cups of sugar.'}]
```
মডেলের সাথে চ্যাট করতে হলেও ব্যবহার প্যাটার্ন একই। শুধু পার্থক্য হলো, আপনাকে একটি চ্যাট হিস্ট্রি তৈরি করতে হবে (যা `Pipeline`-এ ইনপুট হিসেবে যাবে) আপনার আর সিস্টেমের মধ্যে।
> [!TIP]
> আপনি সরাসরি কমান্ড লাইন থেকেও একটি মডেলের সাথে চ্যাট করতে পারেন।
> ```Shell
> transformers chat Qwen/Qwen2.5-0.5B-Instruct
> ```
```Python
import torch
from transformers import pipeline
chat = [
{"role": "system", "content": "You are a sassy, wise-cracking robot as imagined by Hollywood circa 1986."},
{"role": "user", "content": "Hey, can you tell me any fun things to do in New York?"}
]
pipeline = pipeline(task="text-generation", model="meta-llama/Meta-Llama-3-8B-Instruct", dtype=torch.bfloat16, device_map="auto")
response = pipeline(chat, max_new_tokens=512)
print(response[0]["generated_text"][-1]["content"])
বিভিন্ন মোডালিটি এবং কাজের জন্য Pipeline কিভাবে কাজ করে তা দেখতে নিচের উদাহরণগুলো সম্প্রসারণ করুন।
```
<details>
<summary>অটোমেটিক স্পিচ রিকগনিশন (ASR)</summary>
```Python
from transformers import pipeline
pipeline = pipeline(task="automatic-speech-recognition", model="openai/whisper-large-v3")
pipeline("[https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac](https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac)")
{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'}
```
</details>
<details>
<summary>ইমেজ ক্লাসিফিকেশন</summary>
<h3 align="center">
<a><img src="https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png"></a>
</h3>
```py
from transformers import pipeline
pipeline = pipeline(task="image-classification", model="facebook/dinov2-small-imagenet1k-1-layer")
pipeline("[https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png](https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png)")
[{'label': 'macaw', 'score': 0.997848391532898},
{'label': 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
'score': 0.0016551691805943847},
{'label': 'lorikeet', 'score': 0.00018523589824326336},
{'label': 'African grey, African gray, Psittacus erithacus',
'score': 7.85409429227002e-05},
{'label': 'quail', 'score': 5.502637941390276e-05}]
```
</details>
<details>
<summary>ভিজুয়াল কোয়েশ্চন আনসারিং</summary>
<h3 align="center">
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/idefics-few-shot.jpg"></a>
</h3>
```py
from transformers import pipeline
pipeline = pipeline(task="visual-question-answering", model="Salesforce/blip-vqa-base")
pipeline(
image="[https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/idefics-few-shot.jpg](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/idefics-few-shot.jpg)",
question="What is in the image?",
)
[{'answer': 'statue of liberty'}]
```
</details>
## কেন Transformers ব্যবহার করবেন?
1. সহজে ব্যবহারযোগ্য সর্বাধুনিক মডেল:
* ন্যাচারাল ল্যাঙ্গুয়েজ আন্ডারস্ট্যান্ডিং ও জেনারেশন, কম্পিউটার ভিশন, অডিও, ভিডিও এবং মাল্টিমোডাল টাস্কে উচ্চ পারফরম্যান্স।
* গবেষক, ইঞ্জিনিয়ার এবং ডেভেলপারদের জন্য সহজে শুরু করার সুযোগ।
* মাত্র তিনটি ক্লাস শিখলেই ব্যবহার করা যায়।
* সব প্রি-ট্রেইন্ড মডেলের জন্য একটি একীভূত API।
2. কম কম্পিউট খরচ, ছোট কার্বন ফুটপ্রিন্ট:
* শূন্য থেকে ট্রেইন না করে ট্রেইন্ড মডেল শেয়ার করুন।
* কম্পিউট টাইম ও প্রোডাকশন খরচ কমান।
* সব ধরনের মোডালিটির জন্য ১০ লক্ষ+ প্রি-ট্রেইন্ড চেকপয়েন্টসহ ডজনখানেক মডেল আর্কিটেকচার।
3. মডেলের লাইফসাইকেলের প্রতিটি ধাপে সঠিক ফ্রেমওয়ার্ক বেছে নিন:
* মাত্র ৩ লাইনের কোডে সর্বাধুনিক মডেল ট্রেইন করুন।
* সহজে PyTorch / JAX / TF2.0 এর মধ্যে মডেল স্থানান্তর করুন।
* ট্রেইনিং, ইভ্যালুয়েশন ও প্রোডাকশনের জন্য আলাদা ফ্রেমওয়ার্ক ব্যবহার করুন।
4. সহজেই মডেল বা উদাহরণ কাস্টমাইজ করুন:
* প্রতিটি আর্কিটেকচারের জন্য এমন উদাহরণ দেওয়া আছে যা মূল লেখকদের প্রকাশিত ফলাফল পুনরুত্পাদন করতে সক্ষম।
* মডেলের অভ্যন্তরীণ অংশগুলো যতটা সম্ভব একভাবে এক্সপোজ করা হয়েছে।
* দ্রুত এক্সপেরিমেন্টের জন্য লাইব্রেরি ছাড়াও মডেল ফাইল ব্যবহার করা যায়।
<a target="_blank" href="https://huggingface.co/enterprise">
<img alt="Hugging Face Enterprise Hub" src="https://github.com/user-attachments/assets/247fb16d-d251-4583-96c4-d3d76dda4925">
</a><br>
## কেন Transformers ব্যবহার করবেন না?
* এই লাইব্রেরি নিউরাল নেটওয়ার্কের জন্য ব্লক-মডিউল টুলবক্স নয়। মডেল ফাইলের কোডে অতিরিক্ত অ্যাবস্ট্র্যাকশন intentionally করা হয়নি, যাতে গবেষকরা দ্রুত প্রতিটি মডেলের উপর কাজ করতে পারে কোনো অতিরিক্ত ফাইল বা স্তরে না গিয়ে।
* ট্রেইনিং API মূলত Transformers-এর PyTorch মডেলের সাথে কাজ করার জন্য অপটিমাইজ করা হয়েছে। সাধারণ মেশিন লার্নিং লুপের জন্য, [Accelerate](https://huggingface.co/docs/accelerate) এর মতো অন্য লাইব্রেরি ব্যবহার করা উচিত।
* [উদাহরণ স্ক্রিপ্টগুলো](https://github.com/huggingface/transformers/tree/main/examples) শুধু *উদাহরণ*। এগুলো সরাসরি আপনার ব্যবহারের ক্ষেত্রে কাজ নাও করতে পারে, তাই কোড সামঞ্জস্য করতে হতে পারে।
## Transformers দিয়ে ১০০টি প্রজেক্ট
Transformers শুধু প্রি-ট্রেইন্ড মডেল ব্যবহার করার টুলকিট নয়, এটি একটি কমিউনিটি, যা Hugging Face Hub-এর চারপাশে তৈরি। আমরা চাই যে ডেভেলপার, গবেষক, শিক্ষার্থী, অধ্যাপক, ইঞ্জিনিয়ার বা যে কেউ তাদের স্বপ্নের প্রজেক্ট তৈরি করতে পারে।
Transformers 100,000 স্টার উদযাপন করতে আমরা কমিউনিটিকে তুলে ধরতে [awesome-transformers](./awesome-transformers.md) পেজ তৈরি করেছি, যেখানে Transformers দিয়ে তৈরি ১০০টি অসাধারণ প্রজেক্ট তালিকাভুক্ত আছে।
আপনার কোনো প্রজেক্ট আছে যা তালিকায় থাকা উচিত মনে করেন? তাহলে PR খুলে যুক্ত করুন।
## উদাহরণ মডেল
আপনি আমাদের অধিকাংশ মডেল সরাসরি তাদের [Hub মডেল পেজ](https://huggingface.co/models) থেকে পরীক্ষা করতে পারেন।
নিচের প্রতিটি মোডালিটি এক্সপ্যান্ড করে বিভিন্ন ব্যবহার কেসের জন্য কয়েকটি উদাহরণ মডেল দেখুন।
<details>
<summary>অডিও</summary>
* [Whisper](https://huggingface.co/openai/whisper-large-v3-turbo) দিয়ে অডিও ক্লাসিফিকেশন
* [Moonshine](https://huggingface.co/UsefulSensors/moonshine) দিয়ে অটোমেটিক স্পিচ রিকগনিশন
* [Wav2Vec2](https://huggingface.co/superb/wav2vec2-base-superb-ks) দিয়ে কীওয়ার্ড স্পটিং
* [Moshi](https://huggingface.co/kyutai/moshiko-pytorch-bf16) দিয়ে স্পিচ-টু-স্পিচ জেনারেশন
* [MusicGen](https://huggingface.co/facebook/musicgen-large) দিয়ে টেক্সট-টু-অডিও
* [Bark](https://huggingface.co/suno/bark) দিয়ে টেক্সট-টু-স্পিচ
</details>
<details>
<summary>কম্পিউটার ভিশন</summary>
* [SAM](https://huggingface.co/facebook/sam-vit-base) দিয়ে স্বয়ংক্রিয় মাস্ক জেনারেশন
* [DepthPro](https://huggingface.co/apple/DepthPro-hf) দিয়ে গভীরতা অনুমান
* [DINO v2](https://huggingface.co/facebook/dinov2-base) দিয়ে চিত্র শ্রেণীকরণ
* [SuperPoint](https://huggingface.co/magic-leap-community/superpoint) দিয়ে কীপয়েন্ট সনাক্তকরণ
* [SuperGlue](https://huggingface.co/magic-leap-community/superglue_outdoor) দিয়ে কীপয়েন্ট ম্যাচিং
* [RT-DETRv2](https://huggingface.co/PekingU/rtdetr_v2_r50vd) দিয়ে অবজেক্ট সনাক্তকরণ
* [VitPose](https://huggingface.co/usyd-community/vitpose-base-simple) দিয়ে পোস অনুমান
* [OneFormer](https://huggingface.co/shi-labs/oneformer_ade20k_swin_large) দিয়ে ইউনিভার্সাল সেগমেন্টেশন
* [VideoMAE](https://huggingface.co/MCG-NJU/videomae-large) দিয়ে ভিডিও শ্রেণীকরণ
</details>
<details>
<summary>মাল্টিমোডাল</summary>
* [Qwen2-Audio](https://huggingface.co/Qwen/Qwen2-Audio-7B) দিয়ে অডিও বা টেক্সট থেকে টেক্সট জেনারেশন
* [LayoutLMv3](https://huggingface.co/microsoft/layoutlmv3-base) দিয়ে ডকুমেন্ট প্রশ্নোত্তর
* [Qwen-VL](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct) দিয়ে ইমেজ বা টেক্সট থেকে টেক্সট জেনারেশন
* [BLIP-2](https://huggingface.co/Salesforce/blip2-opt-2.7b) দিয়ে ইমেজ ক্যাপশনিং
* [GOT-OCR2](https://huggingface.co/stepfun-ai/GOT-OCR-2.0-hf) দিয়ে OCR-ভিত্তিক ডকুমেন্ট আন্ডারস্ট্যান্ডিং
* [TAPAS](https://huggingface.co/google/tapas-base) দিয়ে টেবিল প্রশ্নোত্তর
* [Emu3](https://huggingface.co/BAAI/Emu3-Gen) দিয়ে ইউনিফাইড মাল্টিমোডাল আন্ডারস্ট্যান্ডিং এবং জেনারেশন
* [Llava-OneVision](https://huggingface.co/llava-hf/llava-onevision-qwen2-0.5b-ov-hf) দিয়ে ভিশন থেকে টেক্সট
* [Llava](https://huggingface.co/llava-hf/llava-1.5-7b-hf) দিয়ে ভিজুয়াল কোয়েশ্চন আনসারিং
* [Kosmos-2](https://huggingface.co/microsoft/kosmos-2-patch14-224) দিয়ে ভিজুয়াল রেফারিং এক্সপ্রেশন সেগমেন্টেশন
</details>
<details>
<summary>NLP</summary>
* [ModernBERT](https://huggingface.co/answerdotai/ModernBERT-base) দিয়ে মাস্কড ওয়ার্ড কমপ্লিশন
* [Gemma](https://huggingface.co/google/gemma-2-2b) দিয়ে নাম্বড এন্টিটি রিকগনিশন
* [Mixtral](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) দিয়ে প্রশ্নোত্তর
* [BART](https://huggingface.co/facebook/bart-large-cnn) দিয়ে সারসংক্ষেপ (Summarization)
* [T5](https://huggingface.co/google-t5/t5-base) দিয়ে অনুবাদ
* [Llama](https://huggingface.co/meta-llama/Llama-3.2-1B) দিয়ে টেক্সট জেনারেশন
* [Qwen](https://huggingface.co/Qwen/Qwen2.5-0.5B) দিয়ে টেক্সট ক্লাসিফিকেশন
</details>
## সাইটেশন
আমাদের [একটি পেপার](https://www.aclweb.org/anthology/2020.emnlp-demos.6/) আছে যা আপনি 🤗 Transformers লাইব্রেরির জন্য রেফারেন্স হিসেবে ব্যবহার করতে পারেন।
```bibtex
@inproceedings{wolf-etal-2020-transformers,
title = "Transformers: State-of-the-Art Natural Language Processing",
author = "Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and Rémi Louf and Morgan Funtowicz and Joe Davison and Sam Shleifer and Patrick von Platen and Clara Ma and Yacine Jernite and Julien Plu and Canwen Xu and Teven Le Scao and Sylvain Gugger and Mariama Drame and Quentin Lhoest and Alexander M. Rush",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
month = oct,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-demos.6",
pages = "38--45"
}
``` | unknown | github | https://github.com/huggingface/transformers | i18n/README_bn.md |
import type * as postcss from 'postcss'
import { atRule, comment, decl, rule, type AstNode } from '../../tailwindcss/src/ast'
import { createLineTable, type LineTable } from '../../tailwindcss/src/source-maps/line-table'
import type { Source, SourceLocation } from '../../tailwindcss/src/source-maps/source'
import { DefaultMap } from '../../tailwindcss/src/utils/default-map'
const EXCLAMATION_MARK = 0x21
export function cssAstToPostCssAst(
postcss: postcss.Postcss,
ast: AstNode[],
source?: postcss.Source,
): postcss.Root {
let inputMap = new DefaultMap<Source, postcss.Input>((src) => {
return new postcss.Input(src.code, {
map: source?.input.map,
from: src.file ?? undefined,
})
})
let lineTables = new DefaultMap<Source, LineTable>((src) => createLineTable(src.code))
let root = postcss.root()
root.source = source
function toSource(loc: SourceLocation | undefined): postcss.Source | undefined {
// Use the fallback if this node has no location info in the AST
if (!loc) return
if (!loc[0]) return
let table = lineTables.get(loc[0])
let start = table.find(loc[1])
let end = table.find(loc[2])
return {
input: inputMap.get(loc[0]),
start: {
line: start.line,
column: start.column + 1,
offset: loc[1],
},
end: {
line: end.line,
column: end.column + 1,
offset: loc[2],
},
}
}
function updateSource(astNode: postcss.ChildNode, loc: SourceLocation | undefined) {
let source = toSource(loc)
// The `source` property on PostCSS nodes must be defined if present because
// `toJSON()` reads each property and tries to read from source.input if it
// sees a `source` property. This means for a missing or otherwise absent
// source it must be *missing* from the object rather than just `undefined`
if (source) {
astNode.source = source
} else {
delete astNode.source
}
}
function transform(node: AstNode, parent: postcss.Container) {
// Declaration
if (node.kind === 'declaration') {
let astNode = postcss.decl({
prop: node.property,
value: node.value ?? '',
important: node.important,
})
updateSource(astNode, node.src)
parent.append(astNode)
}
// Rule
else if (node.kind === 'rule') {
let astNode = postcss.rule({ selector: node.selector })
updateSource(astNode, node.src)
astNode.raws.semicolon = true
parent.append(astNode)
for (let child of node.nodes) {
transform(child, astNode)
}
}
// AtRule
else if (node.kind === 'at-rule') {
let astNode = postcss.atRule({ name: node.name.slice(1), params: node.params })
updateSource(astNode, node.src)
astNode.raws.semicolon = true
parent.append(astNode)
for (let child of node.nodes) {
transform(child, astNode)
}
}
// Comment
else if (node.kind === 'comment') {
let astNode = postcss.comment({ text: node.value })
// Spaces are encoded in our node.value already, no need to add additional
// spaces.
astNode.raws.left = ''
astNode.raws.right = ''
updateSource(astNode, node.src)
parent.append(astNode)
}
// AtRoot & Context should not happen
else if (node.kind === 'at-root' || node.kind === 'context') {
}
// Unknown
else {
node satisfies never
}
}
for (let node of ast) {
transform(node, root)
}
return root
}
export function postCssAstToCssAst(root: postcss.Root): AstNode[] {
let inputMap = new DefaultMap<postcss.Input, Source>((input) => ({
file: input.file ?? input.id ?? null,
code: input.css,
}))
function toSource(node: postcss.ChildNode): SourceLocation | undefined {
let source = node.source
if (!source) return
let input = source.input
if (!input) return
if (source.start === undefined) return
if (source.end === undefined) return
return [inputMap.get(input), source.start.offset, source.end.offset]
}
function transform(
node: postcss.ChildNode,
parent: Extract<AstNode, { nodes: AstNode[] }>['nodes'],
) {
// Declaration
if (node.type === 'decl') {
let astNode = decl(node.prop, node.value, node.important)
astNode.src = toSource(node)
parent.push(astNode)
}
// Rule
else if (node.type === 'rule') {
let astNode = rule(node.selector)
astNode.src = toSource(node)
node.each((child) => transform(child, astNode.nodes))
parent.push(astNode)
}
// AtRule
else if (node.type === 'atrule') {
let astNode = atRule(`@${node.name}`, node.params)
astNode.src = toSource(node)
node.each((child) => transform(child, astNode.nodes))
parent.push(astNode)
}
// Comment
else if (node.type === 'comment') {
if (node.text.charCodeAt(0) !== EXCLAMATION_MARK) return
let astNode = comment(node.text)
astNode.src = toSource(node)
parent.push(astNode)
}
// Unknown
else {
node satisfies never
}
}
let ast: AstNode[] = []
root.each((node) => transform(node, ast))
return ast
} | typescript | github | https://github.com/tailwindlabs/tailwindcss | packages/@tailwindcss-postcss/src/ast.ts |
# dts-test
Tests TypeScript types to ensure the types remain as expected.
- This directory is included in the root `tsconfig.json`, where package imports are aliased to `src` directories, so in IDEs and the `pnpm check` script the types are validated against source code.
- When running `tsc` with `packages-private/dts-test/tsconfig.test.json`, packages are resolved using normal `node` resolution, so the types are validated against actual **built** types. This requires the types to be built first via `pnpm build-dts`. | unknown | github | https://github.com/vuejs/core | packages-private/dts-test/README.md |
# frozen_string_literal: true
# :markup: markdown
module ActionCable
module Channel
module PeriodicTimers
extend ActiveSupport::Concern
included do
class_attribute :periodic_timers, instance_reader: false, default: []
after_subscribe :start_periodic_timers
after_unsubscribe :stop_periodic_timers
end
module ClassMethods
# Periodically performs a task on the channel, like updating an online user
# counter, polling a backend for new status messages, sending regular
# "heartbeat" messages, or doing some internal work and giving progress updates.
#
# Pass a method name or lambda argument or provide a block to call. Specify the
# calling period in seconds using the `every:` keyword argument.
#
# periodically :transmit_progress, every: 5.seconds
#
# periodically every: 3.minutes do
# transmit action: :update_count, count: current_count
# end
#
def periodically(callback_or_method_name = nil, every:, &block)
callback =
if block_given?
raise ArgumentError, "Pass a block or provide a callback arg, not both" if callback_or_method_name
block
else
case callback_or_method_name
when Proc
callback_or_method_name
when Symbol
-> { __send__ callback_or_method_name }
else
raise ArgumentError, "Expected a Symbol method name or a Proc, got #{callback_or_method_name.inspect}"
end
end
unless every.kind_of?(Numeric) && every > 0
raise ArgumentError, "Expected every: to be a positive number of seconds, got #{every.inspect}"
end
self.periodic_timers += [[ callback, every: every ]]
end
end
private
def active_periodic_timers
@active_periodic_timers ||= []
end
def start_periodic_timers
self.class.periodic_timers.each do |callback, options|
active_periodic_timers << start_periodic_timer(callback, every: options.fetch(:every))
end
end
def start_periodic_timer(callback, every:)
connection.server.event_loop.timer every do
connection.worker_pool.async_exec self, connection: connection, &callback
end
end
def stop_periodic_timers
active_periodic_timers.each { |timer| timer.shutdown }
active_periodic_timers.clear
end
end
end
end | ruby | github | https://github.com/rails/rails | actioncable/lib/action_cable/channel/periodic_timers.rb |
# XXX TO DO:
# - popup menu
# - support partial or total redisplay
# - key bindings (instead of quick-n-dirty bindings on Canvas):
# - up/down arrow keys to move focus around
# - ditto for page up/down, home/end
# - left/right arrows to expand/collapse & move out/in
# - more doc strings
# - add icons for "file", "module", "class", "method"; better "python" icon
# - callback for selection???
# - multiple-item selection
# - tooltips
# - redo geometry without magic numbers
# - keep track of object ids to allow more careful cleaning
# - optimize tree redraw after expand of subnode
import os
from tkinter import *
from idlelib import ZoomHeight
from idlelib.configHandler import idleConf
ICONDIR = "Icons"
# Look for Icons subdirectory in the same directory as this module
try:
_icondir = os.path.join(os.path.dirname(__file__), ICONDIR)
except NameError:
_icondir = ICONDIR
if os.path.isdir(_icondir):
ICONDIR = _icondir
elif not os.path.isdir(ICONDIR):
raise RuntimeError("can't find icon directory (%r)" % (ICONDIR,))
def listicons(icondir=ICONDIR):
"""Utility to display the available icons."""
root = Tk()
import glob
list = glob.glob(os.path.join(icondir, "*.gif"))
list.sort()
images = []
row = column = 0
for file in list:
name = os.path.splitext(os.path.basename(file))[0]
image = PhotoImage(file=file, master=root)
images.append(image)
label = Label(root, image=image, bd=1, relief="raised")
label.grid(row=row, column=column)
label = Label(root, text=name)
label.grid(row=row+1, column=column)
column = column + 1
if column >= 10:
row = row+2
column = 0
root.images = images
class TreeNode:
def __init__(self, canvas, parent, item):
self.canvas = canvas
self.parent = parent
self.item = item
self.state = 'collapsed'
self.selected = False
self.children = []
self.x = self.y = None
self.iconimages = {} # cache of PhotoImage instances for icons
def destroy(self):
for c in self.children[:]:
self.children.remove(c)
c.destroy()
self.parent = None
def geticonimage(self, name):
try:
return self.iconimages[name]
except KeyError:
pass
file, ext = os.path.splitext(name)
ext = ext or ".gif"
fullname = os.path.join(ICONDIR, file + ext)
image = PhotoImage(master=self.canvas, file=fullname)
self.iconimages[name] = image
return image
def select(self, event=None):
if self.selected:
return
self.deselectall()
self.selected = True
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselect(self, event=None):
if not self.selected:
return
self.selected = False
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselectall(self):
if self.parent:
self.parent.deselectall()
else:
self.deselecttree()
def deselecttree(self):
if self.selected:
self.deselect()
for child in self.children:
child.deselecttree()
def flip(self, event=None):
if self.state == 'expanded':
self.collapse()
else:
self.expand()
self.item.OnDoubleClick()
return "break"
def expand(self, event=None):
if not self.item._IsExpandable():
return
if self.state != 'expanded':
self.state = 'expanded'
self.update()
self.view()
def collapse(self, event=None):
if self.state != 'collapsed':
self.state = 'collapsed'
self.update()
def view(self):
top = self.y - 2
bottom = self.lastvisiblechild().y + 17
height = bottom - top
visible_top = self.canvas.canvasy(0)
visible_height = self.canvas.winfo_height()
visible_bottom = self.canvas.canvasy(visible_height)
if visible_top <= top and bottom <= visible_bottom:
return
x0, y0, x1, y1 = self.canvas._getints(self.canvas['scrollregion'])
if top >= visible_top and height <= visible_height:
fraction = top + height - visible_height
else:
fraction = top
fraction = float(fraction) / y1
self.canvas.yview_moveto(fraction)
def lastvisiblechild(self):
if self.children and self.state == 'expanded':
return self.children[-1].lastvisiblechild()
else:
return self
def update(self):
if self.parent:
self.parent.update()
else:
oldcursor = self.canvas['cursor']
self.canvas['cursor'] = "watch"
self.canvas.update()
self.canvas.delete(ALL) # XXX could be more subtle
self.draw(7, 2)
x0, y0, x1, y1 = self.canvas.bbox(ALL)
self.canvas.configure(scrollregion=(0, 0, x1, y1))
self.canvas['cursor'] = oldcursor
def draw(self, x, y):
# XXX This hard-codes too many geometry constants!
self.x, self.y = x, y
self.drawicon()
self.drawtext()
if self.state != 'expanded':
return y+17
# draw children
if not self.children:
sublist = self.item._GetSubList()
if not sublist:
# _IsExpandable() was mistaken; that's allowed
return y+17
for item in sublist:
child = self.__class__(self.canvas, self, item)
self.children.append(child)
cx = x+20
cy = y+17
cylast = 0
for child in self.children:
cylast = cy
self.canvas.create_line(x+9, cy+7, cx, cy+7, fill="gray50")
cy = child.draw(cx, cy)
if child.item._IsExpandable():
if child.state == 'expanded':
iconname = "minusnode"
callback = child.collapse
else:
iconname = "plusnode"
callback = child.expand
image = self.geticonimage(iconname)
id = self.canvas.create_image(x+9, cylast+7, image=image)
# XXX This leaks bindings until canvas is deleted:
self.canvas.tag_bind(id, "<1>", callback)
self.canvas.tag_bind(id, "<Double-1>", lambda x: None)
id = self.canvas.create_line(x+9, y+10, x+9, cylast+7,
##stipple="gray50", # XXX Seems broken in Tk 8.0.x
fill="gray50")
self.canvas.tag_lower(id) # XXX .lower(id) before Python 1.5.2
return cy
def drawicon(self):
if self.selected:
imagename = (self.item.GetSelectedIconName() or
self.item.GetIconName() or
"openfolder")
else:
imagename = self.item.GetIconName() or "folder"
image = self.geticonimage(imagename)
id = self.canvas.create_image(self.x, self.y, anchor="nw", image=image)
self.image_id = id
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
def drawtext(self):
textx = self.x+20-1
texty = self.y-1
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
label = self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.GetOption('main','Theme','name')
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.text_id = id
def select_or_edit(self, event=None):
if self.selected and self.item.IsEditable():
self.edit(event)
else:
self.select(event)
def edit(self, event=None):
self.entry = Entry(self.label, bd=0, highlightthickness=1, width=0)
self.entry.insert(0, self.label['text'])
self.entry.selection_range(0, END)
self.entry.pack(ipadx=5)
self.entry.focus_set()
self.entry.bind("<Return>", self.edit_finish)
self.entry.bind("<Escape>", self.edit_cancel)
def edit_finish(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
text = entry.get()
entry.destroy()
if text and text != self.item.GetText():
self.item.SetText(text)
text = self.item.GetText()
self.label['text'] = text
self.drawtext()
self.canvas.focus_set()
def edit_cancel(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
entry.destroy()
self.drawtext()
self.canvas.focus_set()
class TreeItem:
"""Abstract class representing tree items.
Methods should typically be overridden, otherwise a default action
is used.
"""
def __init__(self):
"""Constructor. Do whatever you need to do."""
def GetText(self):
"""Return text string to display."""
def GetLabelText(self):
"""Return label text string to display in front of text (if any)."""
expandable = None
def _IsExpandable(self):
"""Do not override! Called by TreeNode."""
if self.expandable is None:
self.expandable = self.IsExpandable()
return self.expandable
def IsExpandable(self):
"""Return whether there are subitems."""
return 1
def _GetSubList(self):
"""Do not override! Called by TreeNode."""
if not self.IsExpandable():
return []
sublist = self.GetSubList()
if not sublist:
self.expandable = 0
return sublist
def IsEditable(self):
"""Return whether the item's text may be edited."""
def SetText(self, text):
"""Change the item's text (if it is editable)."""
def GetIconName(self):
"""Return name of icon to be displayed normally."""
def GetSelectedIconName(self):
"""Return name of icon to be displayed when selected."""
def GetSubList(self):
"""Return list of items forming sublist."""
def OnDoubleClick(self):
"""Called on a double-click on the item."""
# Example application
class FileTreeItem(TreeItem):
"""Example TreeItem subclass -- browse the file system."""
def __init__(self, path):
self.path = path
def GetText(self):
return os.path.basename(self.path) or self.path
def IsEditable(self):
return os.path.basename(self.path) != ""
def SetText(self, text):
newpath = os.path.dirname(self.path)
newpath = os.path.join(newpath, text)
if os.path.dirname(newpath) != os.path.dirname(self.path):
return
try:
os.rename(self.path, newpath)
self.path = newpath
except OSError:
pass
def GetIconName(self):
if not self.IsExpandable():
return "python" # XXX wish there was a "file" icon
def IsExpandable(self):
return os.path.isdir(self.path)
def GetSubList(self):
try:
names = os.listdir(self.path)
except OSError:
return []
names.sort(key = os.path.normcase)
sublist = []
for name in names:
item = FileTreeItem(os.path.join(self.path, name))
sublist.append(item)
return sublist
# A canvas widget with scroll bars and some useful bindings
class ScrolledCanvas:
def __init__(self, master, **opts):
if 'yscrollincrement' not in opts:
opts['yscrollincrement'] = 17
self.master = master
self.frame = Frame(master)
self.frame.rowconfigure(0, weight=1)
self.frame.columnconfigure(0, weight=1)
self.canvas = Canvas(self.frame, **opts)
self.canvas.grid(row=0, column=0, sticky="nsew")
self.vbar = Scrollbar(self.frame, name="vbar")
self.vbar.grid(row=0, column=1, sticky="nse")
self.hbar = Scrollbar(self.frame, name="hbar", orient="horizontal")
self.hbar.grid(row=1, column=0, sticky="ews")
self.canvas['yscrollcommand'] = self.vbar.set
self.vbar['command'] = self.canvas.yview
self.canvas['xscrollcommand'] = self.hbar.set
self.hbar['command'] = self.canvas.xview
self.canvas.bind("<Key-Prior>", self.page_up)
self.canvas.bind("<Key-Next>", self.page_down)
self.canvas.bind("<Key-Up>", self.unit_up)
self.canvas.bind("<Key-Down>", self.unit_down)
#if isinstance(master, Toplevel) or isinstance(master, Tk):
self.canvas.bind("<Alt-Key-2>", self.zoom_height)
self.canvas.focus_set()
def page_up(self, event):
self.canvas.yview_scroll(-1, "page")
return "break"
def page_down(self, event):
self.canvas.yview_scroll(1, "page")
return "break"
def unit_up(self, event):
self.canvas.yview_scroll(-1, "unit")
return "break"
def unit_down(self, event):
self.canvas.yview_scroll(1, "unit")
return "break"
def zoom_height(self, event):
ZoomHeight.zoom_height(self.master)
return "break"
def _tree_widget(parent):
root = Tk()
root.title("Test TreeWidget")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both", side=LEFT)
item = FileTreeItem(os.getcwd())
node = TreeNode(sc.canvas, None, item)
node.expand()
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_tree_widget) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
from pygments.util import get_choice_opt, iteritems
from pygments import unistring as uni
from pygments.lexers.web import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
class CSharpLexer(RegexLexer):
"""
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 0.8
"""
name = 'C#'
aliases = ['csharp', 'c#']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers,
# see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|async|await|base|break|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|lock|new|null|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
r'descending|from|group|into|orderby|select|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For `Nemerle <http://nemerle.org>`_ source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 1.5
"""
name = 'Nemerle'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = dict(
none = '@?[_a-zA-Z][a-zA-Z0-9_]*',
basic = ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
full = ('@?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo',
'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
)
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'\$\s*"', String, 'splice-string'),
(r'\$\s*<#', String, 'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)\s*(' + cs_ident + ':)?', Keyword),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)\s*(' + cs_ident + r'\??)',
bygroups(Punctuation, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
"""
name = 'Boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Text),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\/|[^/\s])/', String.Regex),
(r'@/(\\\\|\\/|[^/])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\"|[^"]*?)"', String.Double),
(r"'(\\\\|\\'|[^']*?)'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9\.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'namespace': [
('[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Text),
(r'\n', Text),
(r'rem\b.*?\n', Comment),
(r"'.*?\n", Comment),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[\(\){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
(r'(?<!\.)(AddHandler|Alias|'
r'ByRef|ByVal|Call|Case|Catch|CBool|CByte|CChar|CDate|'
r'CDec|CDbl|CInt|CLng|CObj|Continue|CSByte|CShort|'
r'CSng|CStr|CType|CUInt|CULng|CUShort|Declare|'
r'Default|Delegate|DirectCast|Do|Each|Else|ElseIf|'
r'EndIf|Erase|Error|Event|Exit|False|Finally|For|'
r'Friend|Get|Global|GoSub|GoTo|Handles|If|'
r'Implements|Inherits|Interface|'
r'Let|Lib|Loop|Me|MustInherit|'
r'MustOverride|MyBase|MyClass|Narrowing|New|Next|'
r'Not|Nothing|NotInheritable|NotOverridable|Of|On|'
r'Operator|Option|Optional|Overloads|Overridable|'
r'Overrides|ParamArray|Partial|Private|Protected|'
r'Public|RaiseEvent|ReadOnly|ReDim|RemoveHandler|Resume|'
r'Return|Select|Set|Shadows|Shared|Single|'
r'Static|Step|Stop|SyncLock|Then|'
r'Throw|To|True|Try|TryCast|Wend|'
r'Using|When|While|Widening|With|WithEvents|'
r'WriteOnly)\b', Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Text), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Text), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>]',
Operator),
('"', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'_\n', Text), # Line continuation
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(r'[a-z_][a-z0-9_]*', Name.Variable, '#pop'),
(r'', Text, '#pop'), # any other syntax
],
'funcname': [
(r'[a-z_][a-z0-9_]*', Name.Function, '#pop'),
],
'classname': [
(r'[a-z_][a-z0-9_]*', Name.Class, '#pop'),
],
'namespace': [
(r'[a-z_][a-z0-9_.]*', Name.Namespace, '#pop'),
],
'end': [
(r'\s+', Text),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
(r'', Text, '#pop'),
]
}
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
#TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highligting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(CSharpAspxLexer, self).__init__(CSharpLexer,GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highligting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(VbNetAspxLexer, self).__init__(VbNetLexer,GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the F# language (version 3.0).
.. versionadded:: 1.5
"""
name = 'FSharp'
aliases = ['fsharp']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.',
'->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-',
'<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]',
'_', '`', '{', '\|\]', '\|', '}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][A-Za-z0-9_\']*)', Name),
(r'///.*?\n', String.Doc),
(r'//.*?\n', Comment.Single),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([a-zA-Z0-9_.]+)',
bygroups(Keyword, Text, Name.Namespace)),
(r'\b(let!?)(\s+)([a-zA-Z0-9_]+)',
bygroups(Keyword, Text, Name.Variable)),
(r'\b(type)(\s+)([a-zA-Z0-9_]+)',
bygroups(Keyword, Text, Name.Class)),
(r'\b(member|override)(\s+)([a-zA-Z0-9_]+)(\.)([a-zA-Z0-9_]+)',
bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
Comment.Preproc),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Binary),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][A-Za-z0-9_\']*', Name, '#pop'),
(r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'),
# e.g. dictionary index access
(r'', Text, '#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
} | unknown | codeparrot/codeparrot-clean | ||
:host {
display: block;
max-height: 50vh;
overflow-y: auto;
overscroll-behavior: contain;
i {
font-size: 1.2rem;
}
.title {
margin-inline: 1rem;
margin-block-start: 1rem;
margin-block-end: 0;
font-size: 0.875rem;
font-weight: 600;
}
.history-results {
list-style-type: none;
padding-inline: 0;
padding-block: 0.75rem;
margin: 0;
li {
border-inline-start: 2px solid var(--senary-contrast);
margin-inline-start: 1rem;
display: flex;
align-items: center;
gap: 0.5rem;
padding-inline-end: 1rem;
&.active {
background-color: var(--octonary-contrast);
border-inline-start: 2px solid var(--primary-contrast);
}
a {
padding-inline: 0.75rem;
padding-block: 1rem;
flex: 1;
display: flex;
align-items: center;
color: var(--secondary-contrast);
transition: color 0.3s ease;
white-space: nowrap;
overflow: hidden;
.type-icon {
margin-inline-end: 0.75rem;
}
span {
text-overflow: ellipsis;
overflow: hidden;
&.sub-label {
flex: 1;
}
}
&:hover {
color: var(--primary-contrast);
}
}
button {
color: var(--secondary-contrast);
transition: color 0.3s ease;
&:hover {
color: var(--vivid-pink);
}
}
}
}
} | unknown | github | https://github.com/angular/angular | adev/shared-docs/components/search-history/search-history.component.scss |
from copy import deepcopy
from django.contrib import admin
from django.http import QueryDict
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from adminsortable2.admin import SortableInlineAdminMixin
from ..models import Category, Comment, NavItem, Page, StaticPage
__all__ = ['CategoryAdmin', 'PageAdmin', 'NavItemInline']
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['pk', 'title', 'slug']
list_editable = ['title', 'slug']
search_fields = ['title']
fields = ['title', 'slug']
prepopulated_fields = {"slug": ("title",)}
class BasePageAdmin(admin.ModelAdmin):
list_display = ('cover_thumb', 'title', 'status', 'parent')
list_display_links = ('cover_thumb', 'title')
list_editable = ('status',)
list_filter = ('status',)
prepopulated_fields = {"slug": ("title",)}
# prepopulate fields using changelist's filters
prepopulated_filters = ('parent',)
search_fields = ('title',)
fieldsets = [
('', {
'fields': ['title', 'slug', 'cover', 'content'],
}),
(_('Publication Settings'), {
'fields': ['status', 'parent'],
}),
]
change_form_template = 'admin/aircox/page_change_form.html'
def cover_thumb(self, obj):
return mark_safe('<img src="{}"/>'.format(obj.cover.icons['64'])) \
if obj.cover else ''
def get_changeform_initial_data(self, request):
data = super().get_changeform_initial_data(request)
filters = QueryDict(request.GET.get('_changelist_filters', ''))
data['parent'] = filters.get('parent', None)
return data
def _get_common_context(self, query, extra_context=None):
extra_context = extra_context or {}
parent = query.get('parent', None)
extra_context['parent'] = None if parent is None else \
Page.objects.get_subclass(id=parent)
return extra_context
def render_change_form(self, request, context, *args, **kwargs):
if context['original'] and not 'parent' in context:
context['parent'] = context['original'].parent
return super().render_change_form(request, context, *args, **kwargs)
def add_view(self, request, form_url='', extra_context=None):
filters = QueryDict(request.GET.get('_changelist_filters', ''))
extra_context = self._get_common_context(filters, extra_context)
return super().add_view(request, form_url, extra_context)
def changelist_view(self, request, extra_context=None):
extra_context = self._get_common_context(request.GET, extra_context)
return super().changelist_view(request, extra_context)
class PageAdmin(BasePageAdmin):
change_list_template = 'admin/aircox/page_change_list.html'
list_display = BasePageAdmin.list_display + ('category',)
list_editable = BasePageAdmin.list_editable + ('category',)
list_filter = BasePageAdmin.list_editable + ('category',)
search_fields = ('category__title',)
fieldsets = deepcopy(BasePageAdmin.fieldsets)
fieldsets[0][1]['fields'].insert(fieldsets[0][1]['fields'].index('slug') + 1, 'category')
fieldsets[1][1]['fields'] += ('featured', 'allow_comments')
@admin.register(StaticPage)
class StaticPageAdmin(BasePageAdmin):
list_display = BasePageAdmin.list_display + ('attach_to',)
fieldsets = deepcopy(BasePageAdmin.fieldsets)
fieldsets[1][1]['fields'] += ('attach_to',)
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('page_title', 'date', 'nickname')
list_filter = ('date',)
search_fields = ('page__title', 'nickname')
def page_title(self, obj):
return obj.page.title
class NavItemInline(SortableInlineAdminMixin, admin.TabularInline):
model = NavItem | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import urllib
import matplotlib.cbook as cbook
from PIL import Image
import matplotlib.pyplot as plt
import warnings
import random
import math
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
from divisiveKmeans import DivisiveKmeans
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['condor_2014-11-11']
classification_collection = db["condor_classifications"]
subject_collection = db["condor_subjects"]
relations = []
one = []
#print subject_collection.count({"classification_count":{"$gt":1}})
for subject in subject_collection.find({"classification_count":{"$gt":1}}):
#if not("USFWS photos/Remote Feeding Site Photos/Remote Feeding Photos_2008/Bitter Creek/NRFS/NRFS 4.16-4.17.2008=CORA, 17CACO/" in subject["metadata"]["file"]):
if not("USFWS photos/Remote Feeding Site Photos/Remote Feeding Photos_2011/Bitter Creek/BC 34.929570, -119.363840 Dec 17-Jan 8, 2011-12" in subject["metadata"]["file"]):
continue
zooniverse_id = subject["zooniverse_id"]
#print zooniverse_id
# print subject["metadata"]["file"]
# print subject["location"]["standard"]
annotation_list = []
user_list = []
for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})):
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
for animal in markings.values():
scale = 1.875
x = scale*float(animal["x"])
y = scale*float(animal["y"])
try:
animal_type = animal["animal"]
#if not(animal_type in ["carcassOrScale","carcass"]):
if animal_type == "condor":
annotation_list.append((x,y))
user_list.append(user_index)
except KeyError:
annotation_list.append((x,y))
user_list.append(user_index)
except ValueError:
pass
user_identified_condors,clusters = DivisiveKmeans(3).fit2(annotation_list,user_list,debug=True)
#print len(user_identified_condors)
tt = 0
if len(user_identified_condors) > 1:
for c1_index in range(len(clusters)):
for c2_index in range(c1_index+1,len(clusters)):
condor1 = user_identified_condors[c1_index]
condor2 = user_identified_condors[c2_index]
dist = math.sqrt((condor1[0]-condor2[0])**2+(condor1[1]-condor2[1])**2)
users_1 = [user_list[annotation_list.index(pt)] for pt in clusters[c1_index]]
users_2 = [user_list[annotation_list.index(pt)] for pt in clusters[c2_index]]
overlap = [u for u in users_1 if u in users_2]
if len(overlap) <= 1:
relations.append((dist,len(overlap),c1_index,c2_index))
tt += 1
#relations.sort(key= lambda x:x[0])
if tt > 0:
one.append(zooniverse_id)
print tt
print len(relations)
x = zip(*relations)[0]
n, bins, patches = plt.hist(x, 20)
print bins
print one
plt.show() | unknown | codeparrot/codeparrot-clean | ||
#include <c10/core/impl/HermeticPyObjectTLS.h>
namespace c10::impl {
thread_local static std::atomic<bool> hermeticPyObjectState{false};
std::atomic<bool> HermeticPyObjectTLS::haveState_{false};
void HermeticPyObjectTLS::set_state(bool state) {
hermeticPyObjectState = state;
}
bool HermeticPyObjectTLS::get_tls_state() {
return hermeticPyObjectState;
}
void HermeticPyObjectTLS::init_state() {
haveState_ = true;
}
} // namespace c10::impl | cpp | github | https://github.com/pytorch/pytorch | c10/core/impl/HermeticPyObjectTLS.cpp |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
DOCUMENTATION = '''
---
module: gce_tag
version_added: "2.0"
short_description: add or remove tag(s) to/from GCE instance
description:
- This module can add or remove tags U(https://cloud.google.com/compute/docs/instances/#tags)
to/from GCE instance.
options:
instance_name:
description:
- the name of the GCE instance to add/remove tags
required: true
default: null
aliases: []
tags:
description:
- comma-separated list of tags to add or remove
required: true
default: null
aliases: []
state:
description:
- desired state of the tags
required: false
default: "present"
choices: ["present", "absent"]
aliases: []
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
aliases: []
service_account_email:
description:
- service account email
required: false
default: null
aliases: []
pem_file:
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud"
author: "Do Hoang Khiem (dohoangkhiem@gmail.com)"
'''
EXAMPLES = '''
# Add tags 'http-server', 'https-server', 'staging' to instance name 'staging-server' in zone us-central1-a.
- gce_tag:
instance_name: staging-server
tags: http-server,https-server,staging
zone: us-central1-a
state: present
# Remove tags 'foo', 'bar' from instance 'test-server' in default zone (us-central1-a)
- gce_tag:
instance_name: test-server
tags: foo,bar
state: absent
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, InvalidRequestError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def add_tags(gce, module, instance_name, tags):
"""Add tags to instance."""
zone = module.params.get('zone')
if not instance_name:
module.fail_json(msg='Must supply instance_name', changed=False)
if not tags:
module.fail_json(msg='Must supply tags', changed=False)
tags = [x.lower() for x in tags]
try:
node = gce.ex_get_node(instance_name, zone=zone)
except ResourceNotFoundError:
module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
node_tags = node.extra['tags']
changed = False
tags_changed = []
for t in tags:
if t not in node_tags:
changed = True
node_tags.append(t)
tags_changed.append(t)
if not changed:
return False, None
try:
gce.ex_set_node_tags(node, node_tags)
return True, tags_changed
except (GoogleBaseError, InvalidRequestError) as e:
module.fail_json(msg=str(e), changed=False)
def remove_tags(gce, module, instance_name, tags):
"""Remove tags from instance."""
zone = module.params.get('zone')
if not instance_name:
module.fail_json(msg='Must supply instance_name', changed=False)
if not tags:
module.fail_json(msg='Must supply tags', changed=False)
tags = [x.lower() for x in tags]
try:
node = gce.ex_get_node(instance_name, zone=zone)
except ResourceNotFoundError:
module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
node_tags = node.extra['tags']
changed = False
tags_changed = []
for t in tags:
if t in node_tags:
node_tags.remove(t)
changed = True
tags_changed.append(t)
if not changed:
return False, None
try:
gce.ex_set_node_tags(node, node_tags)
return True, tags_changed
except (GoogleBaseError, InvalidRequestError) as e:
module.fail_json(msg=str(e), changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
instance_name=dict(required=True),
tags=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(),
project_id=dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support is required.')
instance_name = module.params.get('instance_name')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
changed = False
if not zone:
module.fail_json(msg='Must specify "zone"', changed=False)
if not tags:
module.fail_json(msg='Must specify "tags"', changed=False)
gce = gce_connect(module)
# add tags to instance.
if state == 'present':
changed, tags_changed = add_tags(gce, module, instance_name, tags)
# remove tags from instance
if state == 'absent':
changed, tags_changed = remove_tags(gce, module, instance_name, tags)
module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import signal
import sys
import time
from random import random, shuffle
from tempfile import mkstemp
from eventlet import spawn, patcher, Timeout, TimeoutError
from swift.container.server import DATADIR
from swift.common.bufferedhttp import http_connect
from swift.common.db import ContainerBroker
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, whataremyips
from swift.common.daemon import Daemon
class ContainerUpdater(Daemon):
"""Update container information in account listings."""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='container-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
self.account_ring = None
self.concurrency = int(conf.get('concurrency', 4))
self.slowdown = float(conf.get('slowdown', 0.01))
self.node_timeout = int(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.no_changes = 0
self.successes = 0
self.failures = 0
self.account_suppressions = {}
self.account_suppression_time = \
float(conf.get('account_suppression_time', 60))
self.new_account_suppressions = None
def get_account_ring(self):
"""Get the account ring. Load it if it hasn't been yet."""
if not self.account_ring:
self.logger.debug(
_('Loading account ring from %s'), self.account_ring_path)
self.account_ring = Ring(self.account_ring_path)
return self.account_ring
def get_paths(self):
"""
Get paths to all of the partitions on each drive to be processed.
:returns: a list of paths
"""
paths = []
for device in os.listdir(self.devices):
dev_path = os.path.join(self.devices, device)
if self.mount_check and not os.path.ismount(dev_path):
self.logger.warn(_('%s is not mounted'), device)
continue
con_path = os.path.join(dev_path, DATADIR)
if not os.path.exists(con_path):
continue
for partition in os.listdir(con_path):
paths.append(os.path.join(con_path, partition))
shuffle(paths)
return paths
def _load_suppressions(self, filename):
try:
with open(filename, 'r') as tmpfile:
for line in tmpfile:
account, until = line.split()
until = float(until)
self.account_suppressions[account] = until
except Exception:
self.logger.exception(
_('ERROR with loading suppressions from %s: ') % filename)
finally:
os.unlink(filename)
def run_forever(self, *args, **kwargs):
"""
Run the updator continuously.
"""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin container update sweep'))
begin = time.time()
now = time.time()
expired_suppressions = \
[a for a, u in self.account_suppressions.iteritems() if u < now]
for account in expired_suppressions:
del self.account_suppressions[account]
pid2filename = {}
# read from account ring to ensure it's fresh
self.get_account_ring().get_nodes('')
for path in self.get_paths():
while len(pid2filename) >= self.concurrency:
pid = os.wait()[0]
try:
self._load_suppressions(pid2filename[pid])
finally:
del pid2filename[pid]
fd, tmpfilename = mkstemp()
os.close(fd)
pid = os.fork()
if pid:
pid2filename[pid] = tmpfilename
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
patcher.monkey_patch(all=False, socket=True)
self.no_changes = 0
self.successes = 0
self.failures = 0
self.new_account_suppressions = open(tmpfilename, 'w')
forkbegin = time.time()
self.container_sweep(path)
elapsed = time.time() - forkbegin
self.logger.debug(
_('Container update sweep of %(path)s completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s '
'failures, %(no_change)s with no changes'),
{'path': path, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures,
'no_change': self.no_changes})
sys.exit()
while pid2filename:
pid = os.wait()[0]
try:
self._load_suppressions(pid2filename[pid])
finally:
del pid2filename[pid]
elapsed = time.time() - begin
self.logger.info(_('Container update sweep completed: %.02fs'),
elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""
Run the updater once.
"""
patcher.monkey_patch(all=False, socket=True)
self.logger.info(_('Begin container update single threaded sweep'))
begin = time.time()
self.no_changes = 0
self.successes = 0
self.failures = 0
for path in self.get_paths():
self.container_sweep(path)
elapsed = time.time() - begin
self.logger.info(_('Container update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures, '
'%(no_change)s with no changes'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures, 'no_change': self.no_changes})
def container_sweep(self, path):
"""
Walk the path looking for container DBs and process them.
:param path: path to walk
"""
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith('.db'):
self.process_container(os.path.join(root, file))
time.sleep(self.slowdown)
def process_container(self, dbfile):
"""
Process a container, and update the information in the account.
:param dbfile: container DB to process
"""
broker = ContainerBroker(dbfile, logger=self.logger)
info = broker.get_info()
# Don't send updates if the container was auto-created since it
# definitely doesn't have up to date statistics.
if float(info['put_timestamp']) <= 0:
return
if self.account_suppressions.get(info['account'], 0) > time.time():
return
if info['put_timestamp'] > info['reported_put_timestamp'] or \
info['delete_timestamp'] > info['reported_delete_timestamp'] \
or info['object_count'] != info['reported_object_count'] or \
info['bytes_used'] != info['reported_bytes_used']:
container = '/%s/%s' % (info['account'], info['container'])
part, nodes = self.get_account_ring().get_nodes(info['account'])
events = [spawn(self.container_report, node, part, container,
info['put_timestamp'], info['delete_timestamp'],
info['object_count'], info['bytes_used'])
for node in nodes]
successes = 0
failures = 0
for event in events:
if 200 <= event.wait() < 300:
successes += 1
else:
failures += 1
if successes > failures:
self.successes += 1
self.logger.debug(
_('Update report sent for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
broker.reported(info['put_timestamp'],
info['delete_timestamp'], info['object_count'],
info['bytes_used'])
else:
self.failures += 1
self.logger.debug(
_('Update report failed for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
self.account_suppressions[info['account']] = until = \
time.time() + self.account_suppression_time
if self.new_account_suppressions:
print >>self.new_account_suppressions, \
info['account'], until
else:
self.no_changes += 1
def container_report(self, node, part, container, put_timestamp,
delete_timestamp, count, bytes):
"""
Report container info to an account server.
:param node: node dictionary from the account ring
:param part: partition the account is on
:param container: container name
:param put_timestamp: put timestamp
:param delete_timestamp: delete timestamp
:param count: object count in the container
:param bytes: bytes used in the container
"""
with ConnectionTimeout(self.conn_timeout):
try:
conn = http_connect(
node['ip'], node['port'], node['device'], part,
'PUT', container,
headers={'X-Put-Timestamp': put_timestamp,
'X-Delete-Timestamp': delete_timestamp,
'X-Object-Count': count,
'X-Bytes-Used': bytes,
'X-Account-Override-Deleted': 'yes'})
except (Exception, TimeoutError):
self.logger.exception(_('ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later): '), node)
return 500
with Timeout(self.node_timeout):
try:
resp = conn.getresponse()
resp.read()
return resp.status
except (Exception, TimeoutError):
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.exception(
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
return 500 | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.41
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Address(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, id_customer=None, company=None, address1=None, postcode=None, city=None, id_country=None, id_state=None, date_add=None, date_upd=None):
"""
Address - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'id_customer': 'int',
'company': 'str',
'address1': 'str',
'postcode': 'str',
'city': 'str',
'id_country': 'int',
'id_state': 'int',
'date_add': 'str',
'date_upd': 'str'
}
self.attribute_map = {
'id': 'id',
'id_customer': 'id_customer',
'company': 'company',
'address1': 'address1',
'postcode': 'postcode',
'city': 'city',
'id_country': 'id_country',
'id_state': 'id_state',
'date_add': 'date_add',
'date_upd': 'date_upd'
}
self._id = id
self._id_customer = id_customer
self._company = company
self._address1 = address1
self._postcode = postcode
self._city = city
self._id_country = id_country
self._id_state = id_state
self._date_add = date_add
self._date_upd = date_upd
@property
def id(self):
"""
Gets the id of this Address.
:return: The id of this Address.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Address.
:param id: The id of this Address.
:type: int
"""
self._id = id
@property
def id_customer(self):
"""
Gets the id_customer of this Address.
:return: The id_customer of this Address.
:rtype: int
"""
return self._id_customer
@id_customer.setter
def id_customer(self, id_customer):
"""
Sets the id_customer of this Address.
:param id_customer: The id_customer of this Address.
:type: int
"""
self._id_customer = id_customer
@property
def company(self):
"""
Gets the company of this Address.
:return: The company of this Address.
:rtype: str
"""
return self._company
@company.setter
def company(self, company):
"""
Sets the company of this Address.
:param company: The company of this Address.
:type: str
"""
self._company = company
@property
def address1(self):
"""
Gets the address1 of this Address.
:return: The address1 of this Address.
:rtype: str
"""
return self._address1
@address1.setter
def address1(self, address1):
"""
Sets the address1 of this Address.
:param address1: The address1 of this Address.
:type: str
"""
self._address1 = address1
@property
def postcode(self):
"""
Gets the postcode of this Address.
:return: The postcode of this Address.
:rtype: str
"""
return self._postcode
@postcode.setter
def postcode(self, postcode):
"""
Sets the postcode of this Address.
:param postcode: The postcode of this Address.
:type: str
"""
self._postcode = postcode
@property
def city(self):
"""
Gets the city of this Address.
:return: The city of this Address.
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""
Sets the city of this Address.
:param city: The city of this Address.
:type: str
"""
self._city = city
@property
def id_country(self):
"""
Gets the id_country of this Address.
:return: The id_country of this Address.
:rtype: int
"""
return self._id_country
@id_country.setter
def id_country(self, id_country):
"""
Sets the id_country of this Address.
:param id_country: The id_country of this Address.
:type: int
"""
self._id_country = id_country
@property
def id_state(self):
"""
Gets the id_state of this Address.
:return: The id_state of this Address.
:rtype: int
"""
return self._id_state
@id_state.setter
def id_state(self, id_state):
"""
Sets the id_state of this Address.
:param id_state: The id_state of this Address.
:type: int
"""
self._id_state = id_state
@property
def date_add(self):
"""
Gets the date_add of this Address.
:return: The date_add of this Address.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this Address.
:param date_add: The date_add of this Address.
:type: str
"""
self._date_add = date_add
@property
def date_upd(self):
"""
Gets the date_upd of this Address.
:return: The date_upd of this Address.
:rtype: str
"""
return self._date_upd
@date_upd.setter
def date_upd(self, date_upd):
"""
Sets the date_upd of this Address.
:param date_upd: The date_upd of this Address.
:type: str
"""
self._date_upd = date_upd
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | unknown | codeparrot/codeparrot-clean | ||
test_kind: multi_stmt_txn_passthrough
selector:
roots:
- jstests/core/**/*.js
exclude_files:
# These tests already run with transactions.
- jstests/core/txns/**/*.js
##
## Limitations with the way the runner file injects transactions.
##
# These tests expects some statements to error, which will cause txns to abort entirely.
- jstests/core/**/bulk_api_ordered.js
- jstests/core/**/bulk_api_unordered.js
- jstests/core/**/commands_with_uuid.js
- jstests/core/**/dbcase.js
- jstests/core/**/dbcase2.js
- jstests/core/**/explain_execution_error.js
- jstests/core/**/expr.js
- jstests/core/**/find9.js
- jstests/core/**/find_and_modify_invalid_query_params.js
- jstests/core/**/find_getmore_bsonsize.js
- jstests/core/**/find_getmore_cmd.js
- jstests/core/**/geo_allowedcomparisons.js
- jstests/core/**/geo_big_polygon2.js
- jstests/core/**/geonear_key.js
- jstests/core/**/in.js
- jstests/core/**/index8.js # No explicit check for failed command.
- jstests/core/**/index_decimal.js
- jstests/core/**/index_large_and_small_dates.js
- jstests/core/**/index_multiple_compatibility.js
- jstests/core/**/index_partial_write_ops.js
- jstests/core/**/indexa.js # No explicit check for failed command.
- jstests/core/**/indexes_multiple_commands.js
- jstests/core/**/js2.js
- jstests/core/query/json_schema/json_schema.js
- jstests/core/**/mr_bigobject.js
- jstests/core/**/not2.js
- jstests/core/**/null_query_semantics.js
- jstests/core/**/or1.js
- jstests/core/**/or2.js
- jstests/core/**/or3.js
- jstests/core/**/orj.js
- jstests/core/**/ref.js
- jstests/core/**/ref4.js
- jstests/core/**/regex_limit.js
- jstests/core/**/remove_undefined.js
- jstests/core/**/set7.js
- jstests/core/**/sortb.js
- jstests/core/**/sortf.js
- jstests/core/**/sortg.js
- jstests/core/**/sortj.js
- jstests/core/**/sort_with_meta_operator.js
- jstests/core/**/tailable_skip_limit.js
- jstests/core/**/type_array.js
- jstests/core/**/uniqueness.js
- jstests/core/**/unset2.js
- jstests/core/**/update_addToSet.js
- jstests/core/**/update_array_offset_positional.js
- jstests/core/**/update_arrayFilters.js
- jstests/core/**/update_find_and_modify_id.js
- jstests/core/**/update_modifier_pop.js
- jstests/core/**/update_dollar_fields.js
- jstests/core/**/update_fail_halts_modifications.js
# Reads from system.views.
- jstests/core/catalog/views/views_drop.js
##
## Some aggregation stages don't support snapshot readconcern.
##
# explain (requires read concern local)
- jstests/core/**/agg_hint.js
- jstests/core/**/and.js
- jstests/core/**/query/collation/collation.js
- jstests/core/**/explain_shell_helpers.js
- jstests/core/**/index_partial_read_ops.js
- jstests/core/**/explain_server_params.js
- jstests/core/**/query/explain/optimized_match_explain.js
- jstests/core/**/sort_array.js
- jstests/core/views/views_collation.js
# $listSessions
- jstests/core/**/list_all_local_sessions.js
- jstests/core/**/list_all_sessions.js
- jstests/core/**/list_sessions.js
# $collStats
- jstests/core/**/operation_latency_histogram.js
- jstests/core/catalog/collstats/views_coll_stats.js
- jstests/core/catalog/views/views_stats.js
# Errors expected to happen in tests, which can cause transactions to get aborted.
# So when the test tries to inspect the documents it can be out of sync (relative
# to test run without multi statement transactions).
- jstests/core/**/bulk_api_ordered.js
- jstests/core/**/bulk_api_unordered.js
- jstests/core/**/doc_validation.js
- jstests/core/**/doc_validation_options.js
- jstests/core/**/query/field_name_validation.js
- jstests/core/**/insert_illegal_doc.js
- jstests/core/**/push_sort.js
- jstests/core/**/update_arrayFilters.js
- jstests/core/**/update_dbref.js
- jstests/core/**/update_positional_no_array_elem.js
- jstests/core/**/write_result.js
- jstests/core/**/query/project/positional_projection.js
# Trick for bypassing mongo shell validation in the test doesn't work because txn_override
# retry logic will hit the shell validation.
- jstests/core/**/invalid_db_name.js
# Multiple writes in a txn, some of which fail because the collection doesn't exist.
# We create the collection and retry the last write, but previous writes would have
# still failed.
- jstests/core/**/dbref1.js
- jstests/core/**/dbref2.js
- jstests/core/**/ref3.js
- jstests/core/**/update_mod_dotted.js
##
## Error: Unable to acquire lock within a max lock request timeout of '0ms' milliseconds
##
# Collection drops done through applyOps are not converted to w:majority
- jstests/core/catalog/views/invalid_system_views.js
##
## Misc. reasons.
##
# SERVER-34868 Cannot run a legacy query on a session.
- jstests/core/**/query/exhaust.js
# SERVER-34772 Tailable Cursors are not allowed with snapshot readconcern.
- jstests/core/**/awaitdata_getmore_cmd.js
- jstests/core/**/getmore_cmd_maxtimems.js
- jstests/core/**/tailable_cursor_invalidation.js
- jstests/core/**/tailable_getmore_batch_size.js
# Wrong count for top info (WriteLock)
- jstests/core/**/query/top/top.js
# Expects collection to not have been created
- jstests/core/**/insert_id_undefined.js
# Creates sessions explicitly, resulting in txns being run through different sessions
# using a single txnNumber.
- jstests/core/query/json_schema/misc_validation.js
- jstests/core/catalog/views/views_all_commands.js
# Committing a transaction when the server is fsync locked fails.
- jstests/core/**/fsync.js
# Expects legacy errors ($err).
- jstests/core/**/constructors.js
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/**/commands_namespace_parsing.js
- jstests/core/**/drop_collection_cursors.js
- jstests/core/**/geo_s2cursorlimitskip.js
- jstests/core/**/getmore_invalidated_cursors.js
- jstests/core/**/getmore_invalidated_documents.js
- jstests/core/**/query/kill_cursors.js
- jstests/core/**/list_indexes.js
- jstests/core/**/oro.js
- jstests/core/**/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/**/awaitdata_getmore_cmd.js
- jstests/core/**/compact_keeps_indexes.js
- jstests/core/**/count10.js
- jstests/core/**/count_plan_summary.js
- jstests/core/**/coveredIndex3.js
- jstests/core/**/crud_ops_do_not_throw_locktimeout.js
- jstests/core/**/currentop.js
- jstests/core/**/distinct3.js
- jstests/core/**/find_and_modify_concurrent_update.js
- jstests/core/**/fsync.js
- jstests/core/**/geo_update_btree.js
- jstests/core/**/loadserverscripts.js
- jstests/core/**/mr_killop.js
- jstests/core/**/remove_concurrent_inserts.js
- jstests/core/**/remove_adjacent_index_keys.js
- jstests/core/**/shellstartparallel.js
- jstests/core/**/update_namespace_details.js
# Command expects to see result from parallel operation.
# E.g. Suppose the following sequence of events: op1, join() op2 in parallel shell, op3.
# op3 will still be using the snapshot from op1, and not see op2 at all.
- jstests/core/**/benchrun_pipeline_updates.js
- jstests/core/**/cursora.js
# Expect drops/creates to fail or have a certain response:
- jstests/core/**/explain_upsert.js
# Expect certain responses, but retries of successfully completed commands may return
# different values:
- jstests/core/**/create_indexes.js
- jstests/core/**/objid5.js
# Expect results to return in a certain order, secondaries may apply ops out of order.
- jstests/core/**/coveredIndex1.js
- jstests/core/**/sortc.js
- jstests/core/**/bench_test*.js # benchRun() used for writes
- jstests/core/**/benchrun_pipeline_updates.js # benchRun() used for writes
- jstests/core/**/connection_string_validation.js # Does not expect a replica set connection string.
- jstests/core/**/explain_large_bounds.js # Stepdown can timeout waiting for global lock.
- jstests/core/**/list_collections_filter.js # Temporary collections are dropped on failover.
- jstests/core/**/startup_log.js # Checks pid, which is different on each server.
# Creates new mongo connection but won't retry connecting.
- jstests/core/**/shell_connection_strings.js
# Does not support tojson of command objects.
- jstests/core/**/query/function_prototype_bson_type.js
# Examines _id of upserted document.
- jstests/core/**/find_and_modify_new_upsert_sort.js
- jstests/core/**/upsert_shell.js
# TODO SERVER-31242: findAndModify no-op retry should respect the fields option.
- jstests/core/**/crud_api.js
- jstests/core/**/find_and_modify.js
- jstests/core/**/find_and_modify_fields.js
- jstests/core/**/find_and_modify_positional_new_remove.js
- jstests/core/**/project_with_collation.js
exclude_with_any_tags:
- assumes_standalone_mongod
##
# The next four tags correspond to the special errors thrown by the auto_retry_on_network_error.js
# override when it refuses to run a certain command. Above each tag are the message(s) that cause
# the tag to be warranted.
##
# "Refusing to run a test that issues a getMore command since if a network error occurs during
# it then we won't know whether the cursor was advanced or not"
- requires_getmore
# "Refusing to run a test that issues non-retryable write operations since the test likely makes
# assertions on the write results and can lead to spurious failures if a network error occurs"
- requires_non_retryable_writes
# "Refusing to run a test that issues commands that are not blindly retryable"
# "Refusing to run a test that issues an aggregation command with $out because it is not
# retryable"
- requires_non_retryable_commands
# "Refusing to run a test that issues commands that may return different values after a failover"
# "Refusing to run a test that issues an aggregation command with explain because it may return
# incomplete results"
# "Refusing to run a test that issues an aggregation command with $listLocalCursors or
# $listLocalSessions because they rely on in-memory state that may not survive failovers"
# "Refusing to run a test that issues a mapReduce command, because it calls std::terminate() if
# interrupted by a stepdown"
- does_not_support_stepdowns
##
# The next three tags correspond to the special errors thrown by the
# set_read_and_write_concerns.js override when it refuses to replace the readConcern or
# writeConcern of a particular command. Above each tag are the message(s) that cause the tag to be
# warranted.
##
# "Cowardly refusing to override read concern of command: ..."
- assumes_read_concern_unchanged
# "writeConcern is not allowed within a multi-statement transaction"
- assumes_write_concern_unchanged
##
# The next tag corresponds to long running-operations, as they may exhaust their number
# of retries and result in a network error being thrown.
- operations_longer_than_stepdown_interval
# This tag corresponds to operations which are run long enough to exceed the stepdown interval
# when grouped into transactions
- operations_longer_than_stepdown_interval_in_txns
# Transactions are not allowed to operate on capped collections.
- requires_capped
- requires_profiling
# Retrying a query can change whether a plan cache entry is active.
- inspects_whether_plan_cache_entry_is_active
- does_not_support_transactions
# Operations in the main test shell aren't guaranteed to be causally consistent with operations
# performed earlier in a parallel shell if multiple nodes are electable because the latest
# operation and cluster times aren't shared between shells.
# "Cowardly refusing to run test with network retries enabled when it uses startParallelShell()"
- uses_parallel_shell
# Transaction-continuing commands must use the same API parameters as the first command, so tests
# that use API parameters cannot be run with transactions.
- uses_api_parameters
- does_not_support_causal_consistency
- requires_timeseries # Transactions not supported
##
# The next three tags correspond to the special errors thrown by the
# fail_unclean_shutdown_incompatible_commands.js override when it refuses to run commands that are
# inaccurate after an unclean shutdown. Above each tag is the message that causes the tag to be
# warranted.
##
# "Cowardly fail if fastcount is run with a mongod that had an unclean shutdown: ..."
- requires_fastcount
# "Cowardly fail if dbStats is run with a mongod that had an unclean shutdown: ..."
- requires_dbstats
# "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..."
- requires_collstats
executor:
archive:
hooks:
- CheckReplOplogs
- CheckReplDBHash
- ValidateCollections
config:
shell_options:
eval: >-
globalThis.testingReplication = true;
await import('jstests/libs/override_methods/network_error_and_txn_override.js');
globalThis.db = connect(TestData.connectionString);
await import("jstests/libs/override_methods/enable_sessions.js");
await import('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
await import("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
await import("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
global_vars:
TestData:
logRetryAttempts: true
networkErrorAndTxnOverrideConfig:
retryOnNetworkErrors: true
wrapCRUDinTransactions: true
overrideRetryAttempts: 3
sessionOptions:
# Read your own writes is not guaranteed without causal consistency if all nodes are
# electable.
causalConsistency: true
retryWrites: true
runningWithStepdowns: true
# We specify nodb so the shell used by each test will attempt to connect after loading the
# retry logic in auto_retry_on_network_error.js.
nodb: ""
hooks:
# We use a stepdown interval of 15 seconds because we will retry all commands in a transaction
# so we need to allow time for at most 10 operations to be re-run and then re-committed. If
# too many network errors occur when re-running a transaction we will run out of retries.
- class: ContinuousStepdown
stepdown_interval_ms: 15000
randomize_kill: true
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
# on the secondaries, so we run the ValidateCollections hook after it to ensure we're
# validating the entire contents of the collection.
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: ValidateCollections
shell_options:
global_vars:
TestData:
skipEnforceFastCountOnValidate: true
- class: CleanEveryN
n: 20
fixture:
class: ReplicaSetFixture
mongod_options:
syncdelay: 5
wiredTigerEngineConfigString: debug_mode=(table_logging=true)
set_parameters:
logComponentVerbosity:
verbosity: 0
command: 2
replication:
election: 4
heartbeats: 2
initialSync: 2
rollback: 2
storage:
recovery: 2
transaction: 4
enableTestCommands: 1
enableElectionHandoff: 0
all_nodes_electable: true
num_nodes: 3
replset_config_options:
settings:
catchUpTimeoutMillis: 0 | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_stepdown_terminate_jscore_passthrough.yml |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package transit
import (
"context"
"crypto/x509"
"encoding/pem"
"fmt"
"strconv"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/keysutil"
"github.com/hashicorp/vault/sdk/logical"
)
const WrappingKeyName = "wrapping-key"
func (b *backend) pathWrappingKey() *framework.Path {
return &framework.Path{
Pattern: "wrapping_key",
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixTransit,
OperationSuffix: "wrapping-key",
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathWrappingKeyRead,
},
HelpSynopsis: pathWrappingKeyHelpSyn,
HelpDescription: pathWrappingKeyHelpDesc,
}
}
func (b *backend) pathWrappingKeyRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) {
p, err := b.getWrappingKey(ctx, req.Storage)
if err != nil {
return nil, err
}
wrappingKey := p.Keys[strconv.Itoa(p.LatestVersion)]
derBytes, err := x509.MarshalPKIXPublicKey(wrappingKey.RSAKey.Public())
if err != nil {
return nil, fmt.Errorf("error marshaling RSA public key: %w", err)
}
pemBlock := &pem.Block{
Type: "PUBLIC KEY",
Bytes: derBytes,
}
pemBytes := pem.EncodeToMemory(pemBlock)
if pemBytes == nil || len(pemBytes) == 0 {
return nil, fmt.Errorf("failed to PEM-encode RSA public key")
}
publicKeyString := string(pemBytes)
resp := &logical.Response{
Data: map[string]interface{}{
"public_key": publicKeyString,
},
}
return resp, nil
}
func (b *backend) getWrappingKey(ctx context.Context, storage logical.Storage) (*keysutil.Policy, error) {
polReq := keysutil.PolicyRequest{
Upsert: true,
Storage: storage,
Name: fmt.Sprintf("import/%s", WrappingKeyName),
KeyType: keysutil.KeyType_RSA4096,
Derived: false,
Convergent: false,
Exportable: false,
AllowPlaintextBackup: false,
AutoRotatePeriod: 0,
}
p, _, err := b.GetPolicy(ctx, polReq, b.GetRandomReader())
if err != nil {
return nil, err
}
if p == nil {
return nil, fmt.Errorf("error retrieving wrapping key: returned policy was nil")
}
if b.System().CachingDisabled() {
p.Unlock()
}
return p, nil
}
const (
pathWrappingKeyHelpSyn = "Returns the public key to use for wrapping imported keys"
pathWrappingKeyHelpDesc = "This path is used to retrieve the RSA-4096 wrapping key " +
"for wrapping keys that are being imported into transit."
) | go | github | https://github.com/hashicorp/vault | builtin/logical/transit/path_wrapping_key.go |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
'''
SendHeadersTest -- test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
'''
direct_fetch_response_time = 0.05
class BaseNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_inv = None
self.last_headers = None
self.last_block = None
self.last_getdata = None
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, conn, message):
self.last_headers = message
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_close(self, conn):
self.disconnected = True
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
assert(wait_until(test_function, timeout=60))
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if self.last_inv != None:
compare_inv = [x.hash for x in self.last_inv.inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if self.last_headers != None:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
if hash_headers != expect_headers:
success = False
self.last_inv = None
self.last_headers = None
return success
# Syncing helpers
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
# "sendheaders" message.
class InvNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
# TestNode: This peer is the one we use for most of the testing.
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
class SendHeadersTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
connect_nodes(self.nodes[0], 1)
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.p2p_connections:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = InvNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
print("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip, timeout=5)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256], timeout=5)
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
print("Part 1: success!")
print("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders(timeout=5)
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[ inv_node.send_block_inv(x.sha256) for x in blocks ]
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert_equal(inv_node.last_inv, None)
assert_equal(inv_node.last_headers, None)
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
print("Part 2: success!")
print("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
print("Part 3: success!")
print("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
print("Part 4: success!")
# Now deliver all those blocks we announced.
[ test_node.send_message(msg_block(x)) for x in blocks ]
print("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_getdata = None
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders(timeout=1)
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders(timeout=1)
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i%len(blocks)]])
test_node.wait_for_getheaders(timeout=1)
# Eventually this stops working.
with mininode_lock:
self.last_getheaders = None
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
with mininode_lock:
self.last_getheaders = True
print("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert_equal(inv_node.last_getdata, None)
if __name__ == '__main__':
SendHeadersTest().main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CalendarProvider',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.IntegerField(verbose_name=b'\xe7\xbc\x96\xe7\xa0\x81')),
('name', models.CharField(default=None, max_length=30, verbose_name=b'\xe5\x90\x8d\xe7\xa7\xb0')),
('hot_index', models.SmallIntegerField(default=0, verbose_name=b'\xe8\xbe\xa3\xe5\xba\xa6\xe6\x8c\x87\xe6\x95\xb0')),
('is_special', models.SmallIntegerField(verbose_name=b'\xe7\x89\xb9\xe8\x89\xb2\xe8\x8f\x9c')),
('unit', models.CharField(default='\u4f8b', max_length=30, verbose_name=b'\xe5\xba\xa6\xe9\x87\x8f\xe5\x8d\x95\xe4\xbd\x8d')),
('normal_price', models.SmallIntegerField(verbose_name=b'\xe6\xad\xa3\xe4\xbb\xb7')),
('vip_price', models.SmallIntegerField(verbose_name=b'VIP\xe4\xbc\x9a\xe5\x91\x98\xe4\xbb\xb7\xe6\xa0\xbc')),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Sources:
Croatian Counties: http://en.wikipedia.org/wiki/ISO_3166-2:HR
Croatia doesn't have official abbreviations for counties.
The ones provided are in common use.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
HR_COUNTY_CHOICES = (
('GZG', _('Grad Zagreb')),
('BBŽ', _('Bjelovarsko-bilogorska županija')),
('BPŽ', _('Brodsko-posavska županija')),
('DNŽ', _('Dubrovačko-neretvanska županija')),
('IŽ', _('Istarska županija')),
('KŽ', _('Karlovačka županija')),
('KKŽ', _('Koprivničko-križevačka županija')),
('KZŽ', _('Krapinsko-zagorska županija')),
('LSŽ', _('Ličko-senjska županija')),
('MŽ', _('Međimurska županija')),
('OBŽ', _('Osječko-baranjska županija')),
('PSŽ', _('Požeško-slavonska županija')),
('PGŽ', _('Primorsko-goranska županija')),
('SMŽ', _('Sisačko-moslavačka županija')),
('SDŽ', _('Splitsko-dalmatinska županija')),
('ŠKŽ', _('Šibensko-kninska županija')),
('VŽ', _('Varaždinska županija')),
('VPŽ', _('Virovitičko-podravska županija')),
('VSŽ', _('Vukovarsko-srijemska županija')),
('ZDŽ', _('Zadarska županija')),
('ZGŽ', _('Zagrebačka županija')),
)
"""
Sources:
http://hr.wikipedia.org/wiki/Dodatak:Popis_registracijskih_oznaka_za_cestovna_vozila_u_Hrvatskoj
Only common license plate prefixes are provided. Special cases and obsolete prefixes are omitted.
"""
HR_LICENSE_PLATE_PREFIX_CHOICES = (
('BJ', 'BJ'),
('BM', 'BM'),
('ČK', 'ČK'),
('DA', 'DA'),
('DE', 'DE'),
('DJ', 'DJ'),
('DU', 'DU'),
('GS', 'GS'),
('IM', 'IM'),
('KA', 'KA'),
('KC', 'KC'),
('KR', 'KR'),
('KT', 'KT'),
('KŽ', 'KŽ'),
('MA', 'MA'),
('NA', 'NA'),
('NG', 'NG'),
('OG', 'OG'),
('OS', 'OS'),
('PU', 'PU'),
('PŽ', 'PŽ'),
('RI', 'RI'),
('SB', 'SB'),
('SK', 'SK'),
('SL', 'SL'),
('ST', 'ST'),
('ŠI', 'ŠI'),
('VK', 'VK'),
('VT', 'VT'),
('VU', 'VU'),
('VŽ', 'VŽ'),
('ZD', 'ZD'),
('ZG', 'ZG'),
('ŽU', 'ŽU'),
)
"""
The list includes county and cellular network phone number prefixes.
"""
HR_PHONE_NUMBER_PREFIX_CHOICES = (
('1', '01'),
('20', '020'),
('21', '021'),
('22', '022'),
('23', '023'),
('31', '031'),
('32', '032'),
('33', '033'),
('34', '034'),
('35', '035'),
('40', '040'),
('42', '042'),
('43', '043'),
('44', '044'),
('47', '047'),
('48', '048'),
('49', '049'),
('51', '051'),
('52', '052'),
('53', '053'),
('91', '091'),
('92', '092'),
('95', '095'),
('97', '097'),
('98', '098'),
('99', '099'),
) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.repository import Repository
from pants.backend.jvm.scala_artifact import ScalaArtifact
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_jar_dependency import ScalaJarDependency
from pants.backend.jvm.tasks.check_published_deps import CheckPublishedDeps
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class CheckPublishedDepsTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'target': Target,
'jar_library': JarLibrary,
'java_library': JavaLibrary,
},
objects={
'artifact': Artifact,
'jar': JarDependency,
'scala_artifact': ScalaArtifact,
'scala_jar': ScalaJarDependency,
'repo': Repository(name='repo',
url='http://www.www.com',
push_db_basedir=os.path.join(self.build_root, 'repo')),
}
)
@classmethod
def task_type(cls):
return CheckPublishedDeps
def assert_console_output(self, *args, **kwargs):
# Ensure that JarPublish's repos option is set, as CheckPublishedDeps consults it.
self.set_options_for_scope('publish.jar', repos={})
return super(CheckPublishedDepsTest, self).assert_console_output(*args, **kwargs)
def setUp(self):
super(CheckPublishedDepsTest, self).setUp()
self.create_file('repo/org.name/lib1/publish.properties', dedent("""
revision.major.org.name%lib1=2
revision.minor.org.name%lib1=0
revision.patch.org.name%lib1=0
revision.sha.org.name%lib1=12345
"""))
self.create_file('repo/org.name/lib2/publish.properties', dedent("""
revision.major.org.name%lib2=2
revision.minor.org.name%lib2=0
revision.patch.org.name%lib2=0
revision.sha.org.name%lib2=12345
"""))
self.add_to_build_file('provider/BUILD', dedent("""
java_library(name='lib1',
provides=artifact(
org='org.name',
name='lib1',
repo=repo),
sources=[])
java_library(name='lib2',
provides=artifact(
org='org.name',
name='lib2',
repo=repo),
sources=[])
"""))
self.add_to_build_file('outdated/BUILD', dedent("""
jar_library(name='outdated',
jars=[jar(org='org.name', name='lib1', rev='1.0.0')]
)
"""))
self.add_to_build_file('uptodate/BUILD', dedent("""
jar_library(name='uptodate',
jars=[jar(org='org.name', name='lib2', rev='2.0.0')]
)
"""))
self.add_to_build_file('both/BUILD', dedent("""
target(name='both',
dependencies=[
'outdated',
'uptodate',
]
)
"""))
def test_all_up_to_date(self):
self.assert_console_output(
targets=[self.target('uptodate')]
)
def test_print_up_to_date_and_outdated(self):
self.assert_console_output(
'outdated org.name#lib1 1.0.0 latest 2.0.0',
'up-to-date org.name#lib2 2.0.0',
targets=[self.target('both')],
options={'print_uptodate': True}
)
def test_outdated(self):
self.assert_console_output(
'outdated org.name#lib1 1.0.0 latest 2.0.0',
targets=[self.target('outdated')]
) | unknown | codeparrot/codeparrot-clean | ||
'use strict'
/**
* Module dependencies.
*/
var cookieSession = require('cookie-session');
var express = require('../../');
var app = module.exports = express();
// add req.session cookie support
app.use(cookieSession({ secret: 'manny is cool' }));
// do something with the session
app.get('/', function (req, res) {
req.session.count = (req.session.count || 0) + 1
res.send('viewed ' + req.session.count + ' times\n')
})
/* istanbul ignore next */
if (!module.parent) {
app.listen(3000);
console.log('Express started on port 3000');
} | javascript | github | https://github.com/expressjs/express | examples/cookie-sessions/index.js |
# -*- coding: utf-8 -*-
#
## This file is part of Zenodo.
## Copyright (C) 2014 CERN.
##
## Zenodo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Zenodo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Zenodo. If not, see <http://www.gnu.org/licenses/>.
##
## In applying this licence, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
"""JSON for LD producer."""
def produce(self, fields=None):
"""Export the record in JSON-LD format.
@param tags: list of tags to include in the output, if None or
empty list all available tags will be included.
"""
from invenio.base.utils import try_to_eval
from invenio.modules.jsonalchemy.parser import get_producer_rules
from invenio.modules.jsonalchemy.registry import functions
if not fields:
fields = self.keys()
out = {}
for field in fields:
if field.startswith('__') or self.get(field) is None:
continue
json_id = self.meta_metadata[field]['json_id']
values = self.get(field)
if not isinstance(values, (list, tuple)):
values = (values, )
for value in values:
try:
rules = get_producer_rules(json_id, 'json_for_ld', 'recordext')
for rule in rules:
tags = rule[0] if isinstance(rule[0], tuple) \
else (rule[0], )
if tags and not any([
tag in tags
for tag in self.meta_metadata[field]['function']]):
continue
tmp_dict = {}
for key, subfield in rule[1].items():
if not subfield:
tmp_dict[key] = value
else:
try:
tmp_dict[key] = value[subfield]
except:
try:
tmp_dict[key] = try_to_eval(
subfield,
functions(
self.additional_info.namespace
),
value=value, self=self)
except ImportError:
pass
except Exception as e:
self.continuable_errors.append(
"Producer CError - Unable to produce "
"'%s'.\n %s" % (field, str(e)))
if tmp_dict:
for k, v in tmp_dict.items():
if isinstance(v, list):
if k not in out:
out[k] = []
for element in v:
out[k].append(element)
else:
out[k] = v
except KeyError as e:
self.continuable_errors.append(
"Producer CError - Unable to produce '%s'"
" (No rule found).\n %s"
% (field, str(e)))
return out | unknown | codeparrot/codeparrot-clean | ||
from flask import Markup, url_for
from flask_appbuilder.models.mixins import AuditMixin, FileColumn
from sqlalchemy import Table, Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship
from flask_appbuilder import Model
from flask_appbuilder.filemanager import get_file_original_name
"""
You can use the extra Flask-AppBuilder fields and Mixin's
AuditMixin will add automatic timestamp of created and modified by who
"""
class Project(AuditMixin, Model):
__tablename__ = "project"
id = Column(Integer, primary_key=True)
name = Column(String(150), unique=True, nullable=False)
class ProjectFiles(Model):
__tablename__ = "project_files"
id = Column(Integer, primary_key=True)
project_id = Column(Integer, ForeignKey('project.id'))
project = relationship("Project")
file = Column(FileColumn, nullable=False)
description = Column(String(150))
def download(self):
return Markup(
'<a href="' + url_for('ProjectFilesModelView.download', filename=str(self.file)) + '">Download</a>')
def file_name(self):
return get_file_original_name(str(self.file)) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import DR3DNS
from element import Element
from draw import StyleRefElement
# Autogenerated
def Cube(**args):
return StyleRefElement(qname = (DR3DNS,'cube'), **args)
def Extrude(**args):
return StyleRefElement(qname = (DR3DNS,'extrude'), **args)
def Light(Element):
return StyleRefElement(qname = (DR3DNS,'light'), **args)
def Rotate(**args):
return StyleRefElement(qname = (DR3DNS,'rotate'), **args)
def Scene(**args):
return StyleRefElement(qname = (DR3DNS,'scene'), **args)
def Sphere(**args):
return StyleRefElement(qname = (DR3DNS,'sphere'), **args) | unknown | codeparrot/codeparrot-clean | ||
import warnings
from functools import wraps
from django.contrib.sites.models import get_current_site
from django.core import urlresolvers
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils import six
def x_robots_tag(func):
@wraps(func)
def inner(request, *args, **kwargs):
response = func(request, *args, **kwargs)
response['X-Robots-Tag'] = 'noindex, noodp, noarchive'
return response
return inner
@x_robots_tag
def index(request, sitemaps,
template_name='sitemap_index.xml', content_type='application/xml',
sitemap_url_name='django.contrib.sitemaps.views.sitemap',
mimetype=None):
if mimetype:
warnings.warn("The mimetype keyword argument is deprecated, use "
"content_type instead", DeprecationWarning, stacklevel=2)
content_type = mimetype
req_protocol = 'https' if request.is_secure() else 'http'
req_site = get_current_site(request)
sites = []
for section, site in sitemaps.items():
if callable(site):
site = site()
protocol = req_protocol if site.protocol is None else site.protocol
sitemap_url = urlresolvers.reverse(
sitemap_url_name, kwargs={'section': section})
absolute_url = '%s://%s%s' % (protocol, req_site.domain, sitemap_url)
sites.append(absolute_url)
for page in range(2, site.paginator.num_pages + 1):
sites.append('%s?p=%s' % (absolute_url, page))
return TemplateResponse(request, template_name, {'sitemaps': sites},
content_type=content_type)
@x_robots_tag
def sitemap(request, sitemaps, section=None,
template_name='sitemap.xml', content_type='application/xml',
mimetype=None):
if mimetype:
warnings.warn("The mimetype keyword argument is deprecated, use "
"content_type instead", DeprecationWarning, stacklevel=2)
content_type = mimetype
req_protocol = 'https' if request.is_secure() else 'http'
req_site = get_current_site(request)
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps = [sitemaps[section]]
else:
maps = list(six.itervalues(sitemaps))
page = request.GET.get("p", 1)
urls = []
for site in maps:
try:
if callable(site):
site = site()
urls.extend(site.get_urls(page=page, site=req_site,
protocol=req_protocol))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
return TemplateResponse(request, template_name, {'urlset': urls},
content_type=content_type) | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package remote
import (
"context"
"log"
"time"
)
// IntegrationContext is a set of data that is useful when performing HCP Terraform integration operations
type IntegrationContext struct {
StopContext context.Context
CancelContext context.Context
}
func (s *IntegrationContext) Poll(backoffMinInterval float64, backoffMaxInterval float64, every func(i int) (bool, error)) error {
for i := 0; ; i++ {
select {
case <-s.StopContext.Done():
log.Print("IntegrationContext.Poll: StopContext.Done() called")
return s.StopContext.Err()
case <-s.CancelContext.Done():
log.Print("IntegrationContext.Poll: CancelContext.Done() called")
return s.CancelContext.Err()
case <-time.After(backoff(backoffMinInterval, backoffMaxInterval, i)):
// blocks for a time between min and max
}
cont, err := every(i)
if !cont {
return err
}
}
} | go | github | https://github.com/hashicorp/terraform | internal/backend/remote/cloud_integration.go |
# -*- coding: ISO-8859-1 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "$Revision: 194 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2008-04-18 18:59:53 +0200 (Fr, 18 Apr 2008) $"
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.units import inch
from reportlab.platypus import Frame
import ho.pisa as pisa
def test(filename):
# Convert HTML to "Reportlab Story" structure
story = pisa.pisaStory("""
<h1>Sample</h1>
<p>Hello <b>World</b>!</p>
""" * 20).story
# Draw to Canvas
c = Canvas(filename)
f = Frame(inch, inch, 6*inch, 9*inch, showBoundary=1)
f.addFromList(story,c)
c.save()
# Show PDF
pisa.startViewer(filename)
if __name__=="__main__":
test('story2canvas.pdf') | unknown | codeparrot/codeparrot-clean | ||
#ryan g coleman, ryangc@mail.med.upenn.edu
#copyright 2006-7 ryan g coleman, kim sharp crystal.med.upenn.edu
#geometric primitives like distance functions and such
import math
useNumeric = True # use numeric, if available
useNumpy = False
try: # to use numeric
import Numeric
import Matrix
import LinearAlgebra
except ImportError: # fallback to numpy if possible
try:
import numpy
useNumpy = True
except ImportError: # otherwise fallback to hard coded single use code
useNumeric = False # found a simple matrix class in pure python
import pMatrix
#http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189971
def distL2(a, b):
'''no error checking, very fast, should use everywhere'''
sum = 0.
for count in xrange(len(a)):
sum += (b[count] - a[count])**2.
return math.sqrt(sum) # is this faster than **0.5?
def distL2Squared3(a, b):
'''no error checking, unrolled loop'''
return (b[0] - a[0])**2. + (b[1] - a[1])**2. + (b[2] - a[2])**2.
def distL2Squared(a, b):
'''no error checking, very fast, should use everywhere, doesn't square root'''
sum = 0.
for count in xrange(len(a)):
sum += (b[count]-a[count])**2.
return sum
def dist(a, b, metric='L2'):
'''a and b should be lists of equal length (any dimension)
calculates distance needed and returns it (L1,L2,LINF,L2SQUARED).
these new versions are twice the speed of using list comprehensions.'''
if metric == 'L2':
sum = 0.
for count in xrange(len(a)):
sum += (b[count]-a[count])**2.
return sum**0.5
elif metric == 'LINF':
max = 0.
for count in xrange(len(a)):
new = abs(b[count]-a[count])
if new > max:
max = new
return max
elif metric == 'L2SQUARED':
sum = 0.
for count in xrange(len(a)):
sum += (b[count]-a[count])**2.
return sum
elif metric == 'L1':
sum = 0.
for count in xrange(len(a)):
sum += abs(b[count]-a[count])
return sum
def longestAndMeanDist(pts):
'''given a list of points, finds the largest distance between any 2. also
finds mean distance between all pairs. returns both, in that order.'''
longestDist = 0.
sumDists, countDists = 0., 0
for indexOne, ptOne in enumerate(pts):
for ptTwo in pts[indexOne + 1:]: # no duplicates, minimal looping
thisDist = distL2(ptOne, ptTwo)
longestDist = max(thisDist, longestDist)
sumDists += thisDist
countDists += 1
return longestDist, sumDists/float(countDists)
def getAngle(a, b):
'''helper function for triangle interior, returns angle between two vectors'''
ab = a[0] * b[0] + a[1] * b[1] + a[2] * b[2] # all inlined for speed
aSquared = a[0]**2. + a[1]**2. + a[2]**2.
bSquared = b[0]**2. + b[1]**2. + b[2]**2.
#ab = 0. #tons of debugging here
#aSquared = 0.
#bSquared = 0.
#for index in xrange(len(a)):
# ab += a[index] * b[index]
# aSquared += a[index]**2.
# bSquared += b[index]**2.
return math.acos(
max(-1., min(1., (ab) / (((aSquared)**0.5)*((bSquared)**0.5)))))
def calcTriAreaList(abc):
'''uses heron's formula'''
a, b, c = abc # unpack
dists = [distL2(a, b), distL2(b, c), distL2(a, c)]
s = (dists[0] + dists[1] + dists[2])*0.5
triArea = (s*(s-dists[0])*(s-dists[1])*(s-dists[2]))**(0.5)
return triArea
def calcTriArea(a, b, c): # 3 points in 3d
'''uses heron's formula'''
dists = [distL2(a, b), distL2(b, c), distL2(a, c)]
s = (dists[0] + dists[1] + dists[2])*0.5
triArea = (s*(s-dists[0])*(s-dists[1])*(s-dists[2]))**(0.5)
return triArea
def getVector(a, b):
'''does a-b, returns'''
return [a[i]-b[i] for i in range(len(a))]
def getNormalVector(a, b):
'''normal(a-b)'''
return normalizeVector(getVector(a, b))
def getVector(a, b):
'''does a-b, returns'''
return [a[i]-b[i] for i in range(len(a))]
def normalizeVector(vector):
'''divides each by the total components squared'''
total = 0.
for coord in vector:
total += coord**2.
total = total**0.5
newVect = []
for coord in vector:
newVect.append(coord/total)
return newVect
def length(vector):
'''vector length'''
total = 0.
for coord in vector:
total += coord**2.
total = total**0.5
return total
def dot(x, y):
'''gives dot product of two vectors of any dimension, assumes same length'''
dot = 0.
for index in range(len(x)):
dot += x[index] * y[index]
return dot
def cross(x, y):
'''gives cross product of two vectors'''
return [
x[1] * y[2] - x[2] * y[1],
x[2] * y[0] - x[0] * y[2],
x[0] * y[1] - x[1] * y[0]]
def getDihedralUnited(all):
'''list of 4 xyzs, gets the dihedral'''
return getDihedral(all[0], all[1], all[2], all[3])
def getDihedral(a, b, c, d):
'''4 xyzs, gets the dihedral'''
cross1 = normalizeVector(
cross(getNormalVector(a, b), getNormalVector(b, c)))
cross2 = normalizeVector(
cross(getNormalVector(b, c), getNormalVector(c, d)))
try:
dihedral1 = math.acos(dot(cross1, cross2))
except ValueError:
dihedral1 = 0.0 # sometimes the dot ends up a tiny bit above 1.0
#have to figure out +- direction
planeD = calculatePlaneD(cross1, b)
planeFull = (cross1[0], cross1[1], cross1[2], planeD)
if not checkPlaneSide(planeFull, d):
dihedral1 = -dihedral1
return dihedral1
def rotateAboutLine(aIn, dIn, xyz, theta):
'''rotates the point xyz about the line d-a to an angle of theta radians'''
#based on http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/
# ArbitraryAxisRotation.html
#first we have to constrain theta to be within -pi to +pi
while theta < math.pi:
theta += 2 * math.pi
while theta > math.pi:
theta -= 2 * math.pi
da = getVector(dIn, aIn) # line through a and d
#break down and just use the worst notation ever. someone punch me in the face
a, b, c = aIn # unpack many things
d, e, f = dIn
u, v, w = da
x, y, z = xyz
#shortcuts
uvw = length(da)
uvw2 = uvw * uvw
#long stupid equations
newX = (
a * (v**2. + w**2.) + u * (- b * v - c * w + u * x + v * y + w * z) +
(- a * (v**2. + w**2.) + u * (b * v + c * w - v * y - w * z) +
x * (v**2. + w**2.)) * math.cos(theta) +
(- c * v + b * w - w * y + v * z) * math.sin(theta) * uvw) / uvw2
newY = (
b * (u**2. + w**2.) + v * (- a * u - c * w + u * x + v * y + w * z) +
(- b * (u**2. + w**2.) + v * (a * u + c * w - u * x - w * z) +
y * (u**2. + w**2.)) * math.cos(theta) +
(c * u - a * w + w * x - u * z) * math.sin(theta) * uvw) / uvw2
newZ = (
c * (v**2. + u**2.) + w * (- a * u - b * v + u * x + v * y + w * z) +
(- c * (v**2. + u**2.) + w * (a * u + b * v - u * x - v * y) +
z * (v**2. + u**2.)) * math.cos(theta) +
(- b * u + a * v - v * x + u * y) * math.sin(theta) * uvw) / uvw2
return newX, newY, newZ
def getTriNormalList(united):
return getTriNormal(united[0], united[1], united[2])
def getTriNormal(a, b, c, firstTime=True):
'''a, b and c are triange points in clockwise order, returns normal vector
that points out. returns NORMALIZED vector now. or 0s.'''
#find a-b and c-b
#vecAB = normalizeVector(getVector(a, b))
#vecCB = normalizeVector(getVector(c, b))
vecAB = getVector(a, b)
vecCB = getVector(c, b)
#does the cross product, that's all there is to it
normal = cross(vecAB, vecCB)
#only enter this part if all 0 and if first time being called
if not firstTime: # has been called recursively.
return normal # don't check 0s.don't normalize
elif firstTime and normal[0] == 0. and normal[1] == 0. and normal[2] == 0.:
'''this is a big problem. attempt to call after permuting values'''
newNor = getTriNormal(b, c, a, firstTime=False) # still maintains clockwise
if newNor[0] == 0. and newNor[1] == 0. and newNor[2] == 0.:
lastNo = getTriNormal(c, a, b, firstTime=False) # again
#if this is zero we still have to return it
if lastNo[0] == 0. and lastNo[1] == 0. and lastNo[2] == 0.:
return lastNo # 0s knowingly returned
else:
return normalizeVector(lastNo)
else:
return normalizeVector(newNor)
else:
return normalizeVector(normal)
def getAverage(listPoints):
'''averages any number of 3d points passed in as list'''
average = [0., 0., 0.]
for point in listPoints:
for index in xrange(len(average)):
average[index] += point[index]
for index in xrange(len(average)):
average[index] /= len(listPoints)
return average
def getAverage1(listPoints):
'''averages any number of 1d points passed in as list'''
average = 0.
for point in listPoints:
average += point
average /= len(listPoints)
return average
def getAverageArbitraryDimension(listPoints, dimension=2):
'''averages any number of nD points passed in as list'''
average = [0. for count in xrange(dimension)]
for point in listPoints:
for index in xrange(len(average)):
average[index] += point[index]
for index in xrange(len(average)):
average[index] /= len(listPoints)
return average
def planeDistToOrigin(normal):
'''uses formula from http://mathworld.wolfram.com/Plane.html
normal is a, b, c, d of plane
dist = d / ((a^2 + b^2 + c^2) ^ (1/2))'''
a, b, c, d = normal # unpack tuple for laziness
return d / ((a**2. + b**2. + c**2.) ** 0.5)
def fixNormalZeros(vector):
'''if all 0s, return unchanged, that's fine.
if 1 or 2 0s, permute a tiny bit so there are no 0s. normalize and return'''
alpha = 0.0000000000000000001
if vector[0] == 0. and vector[1] == 0. and vector[2] == 0.:
return vector # all zeros
elif vector[0] == 0. or vector[1] == 0. or vector[2] == 0.:
newVec = vector[:] # deep copy, since gets modified
if vector[0] == 0.:
newVec[0] += alpha
if vector[1] == 0.:
newVec[1] += alpha
if vector[2] == 0.:
newVec[2] += alpha
return normalizeVector(newVec)
else:
return vector # no zeros
def withinTolerance(pointA, pointB, tolerance):
'''trying to make something fast to check if pointA and pointB are within
the tolerance of each other.
exact distance function (l2, l1, linf) not a big deal'''
if abs(pointA[0] - pointB[0]) < tolerance:
if abs(pointA[1] - pointB[1]) < tolerance:
if abs(pointA[2] - pointB[2]) < tolerance:
return True
return False
def perturbTriangle(p1, p2, p3):
'''used to change triangles slightly for intersection checks'''
p1new = [x+.0000001 for x in p1]
p2new = [x-.000001 for x in p2]
p3new = [x+.00001 for x in p3]
return p1new, p2new, p3new
#p1, p2, p3 are the plane, p4, p5 are the line
#returns the point that is the intersection
#doesn't do uniqueness checks, etc.
#math from Eric W. Weisstein. "Line-Plane Intersection."
#From MathWorld--A Wolfram Web Resource.
#http://mathworld.wolfram.com/Line-PlaneIntersection.html
# t = - |1 1 1 1 |
# |x1 x2 x3 x4|
# |y1 y2 y3 y4|
# |z1 z2 z3 z4|
# ----------------
# |1 1 1 0 |
# |x1 x2 x3 x5-x4|
# |y1 y2 y3 y5-y4|
# |z1 y2 z3 z5-z4|
#plug t into:
# x = x4 + (x5-x4)t
# y = y4 + (y5-z4)t
# z = z4 + (y5-z4)t
#uses pMatrix class for now--maybe switch to numericpython if needed
def linePlaneIntersection(p1, p2, p3, p4, p5):
top = pMatrix.pMatrix(
[
[1., 1., 1., 1.],
[p1[0], p2[0], p3[0], p4[0]], [p1[1], p2[1], p3[1], p4[1]],
[p1[2], p2[2], p3[2], p4[2]]])
topDet = top.determinant()
bottom = pMatrix.pMatrix(
[
[1., 1., 1., 0.],
[p1[0], p2[0], p3[0], p5[0] - p4[0]],
[p1[1], p2[1], p3[1], p5[1] - p4[1]],
[p1[2], p2[2], p3[2], p5[2] - p4[2]]])
botDet = bottom.determinant()
if topDet == 0.0 or botDet == 0.0:
return False
t = -topDet/botDet
x = p4[0] + (p5[0]-p4[0]) * t
y = p4[1] + (p5[1]-p4[1]) * t
z = p4[2] + (p5[2]-p4[2]) * t
return [x, y, z]
#p1, p2, p3 are the plane, p4, p5 are the line
#returns the point that is the intersection
#doesn't do uniqueness checks, etc.
#math from Eric W. Weisstein. "Line-Plane Intersection."
# From MathWorld--A Wolfram Web Resource.
# http://mathworld.wolfram.com/Line-PlaneIntersection.html
# t = - |1 1 1 1 |
# |x1 x2 x3 x4|
# |y1 y2 y3 y4|
# |z1 z2 z3 z4|
# ----------------
# |1 1 1 0 |
# |x1 x2 x3 x5-x4|
# |y1 y2 y3 y5-y4|
# |z1 y2 z3 z5-z4|
#plug t into:
# x = x4 + (x5-x4)t
# y = y4 + (y5-z4)t
# z = z4 + (y5-z4)t
#uses NumericPython for matrix stuff... falls back to pMatrix standalone funct
def linePlaneIntersectionNumeric(p1, p2, p3, p4, p5):
if not useNumeric:
return linePlaneIntersection(p1, p2, p3, p4, p5)
if useNumpy:
top = [
[1., 1., 1., 1.],
[p1[0], p2[0], p3[0], p4[0]], [p1[1], p2[1], p3[1], p4[1]],
[p1[2], p2[2], p3[2], p4[2]]]
topDet = numpy.linalg.det(top)
bottom = [
[1., 1., 1., 0.], [p1[0], p2[0], p3[0], p5[0]-p4[0]],
[p1[1], p2[1], p3[1], p5[1]-p4[1]], [p1[2], p2[2], p3[2], p5[2]-p4[2]]]
botDet = numpy.linalg.det(bottom)
else: # actually use numeric
top = Matrix.Matrix(
[[1., 1., 1., 1.], [p1[0], p2[0], p3[0], p4[0]], [p1[1], p2[1],
p3[1], p4[1]], [p1[2], p2[2], p3[2], p4[2]]])
topDet = LinearAlgebra.determinant(top)
bottom = Matrix.Matrix(
[[1., 1., 1., 0.], [p1[0], p2[0], p3[0], p5[0]-p4[0]], [p1[1],
p2[1], p3[1], p5[1]-p4[1]], [p1[2], p2[2], p3[2], p5[2]-p4[2]]])
botDet = LinearAlgebra.determinant(bottom)
if topDet == 0.0 or botDet == 0.0:
return False
t = -topDet/botDet
x = p4[0] + (p5[0]-p4[0]) * t
y = p4[1] + (p5[1]-p4[1]) * t
z = p4[2] + (p5[2]-p4[2]) * t
return [x, y, z]
def intPointInsideTri(p1, p2, p3, intPt):
'''helper function that checks to see if the intPt is inside
the triangle p1, p2, p3
do three checks, make sure intPt is closer to every
set of 2 vectors than they are to each other'''
#print "p1, p2, p3, intPt =", p1,",", p2,",", p3,",", intPt
p2p3ang = getAngle(getVector(p2, p1), getVector(p3, p1))
if p2p3ang < getAngle(getVector(p2, p1), getVector(intPt, p1)) or \
p2p3ang < getAngle(getVector(p3, p1), getVector(intPt, p1)):
return False
p1p2ang = getAngle(getVector(p1, p3), getVector(p2, p3))
if p1p2ang < getAngle(getVector(p2, p3), getVector(intPt, p3)) or \
p1p2ang < getAngle(getVector(p1, p3), getVector(intPt, p3)):
return False
p3p1ang = getAngle(getVector(p3, p2), getVector(p1, p2))
if p3p1ang < getAngle(getVector(p3, p2), getVector(intPt, p2)) or \
p3p1ang < getAngle(getVector(p1, p2), getVector(intPt, p2)):
return False
return True
def intPointInsideTriTuple(triTuple, intPt):
'''helper function that checks to see if the intPt is inside the
triangle p1, p2, p3'''
# the tuple format is ((x), (y), (z), (x-y), (y-x), (y-z), (z-y), (x-z),(z-x))
#do three checks, make sure intPt is closer to every
# set of 2 vectors than they are to each other
inside = True
#print "triTuple, intPt =", triTuple,",", intPt
p2p3ang = getAngle(triTuple[4], triTuple[8])
if p2p3ang < getAngle(triTuple[4], getVector(intPt, triTuple[0])) or \
p2p3ang < getAngle(triTuple[8], getVector(intPt, triTuple[0])):
return False
p1p2ang = getAngle(triTuple[7], triTuple[5])
if p1p2ang < getAngle(triTuple[7], getVector(intPt, triTuple[2])) or \
p1p2ang < getAngle(triTuple[5], getVector(intPt, triTuple[2])):
return False
p3p1ang = getAngle(triTuple[3], triTuple[6])
if p3p1ang < getAngle(triTuple[3], getVector(intPt, triTuple[1])) or \
p3p1ang < getAngle(triTuple[6], getVector(intPt, triTuple[1])):
return False
return inside
def getTriNormalList(united):
return getTriNormal(united[0], united[1], united[2])
def getTriNormal(a, b, c, firstTime=True):
'''a, b and c are triange points in clockwise order, returns normal vector
that points out. returns NORMALIZED vector now. or 0s.'''
#find a-b and c-b
#vecAB = normalizeVector(getVector(a, b))
#vecCB = normalizeVector(getVector(c, b))
vecAB = getVector(a, b)
vecCB = getVector(c, b)
#does the cross product, that's all there is to it
normal = cross(vecAB, vecCB)
#only enter this part if all 0 and if first time being called
if not firstTime: # has been called recursively. don't check 0s.
return normal # don't normalize
elif firstTime and normal[0] == 0. and normal[1] == 0. and normal[2] == 0.:
'''this is a big problem. attempt to call after permuting values'''
newNor = getTriNormal(b, c, a, firstTime=False) # still maintains clockwise
if newNor[0] == 0. and newNor[1] == 0. and newNor[2] == 0.:
lastNo = getTriNormal(c, a, b, firstTime=False) # again
#if this is zero we still have to return it
if lastNo[0] == 0. and lastNo[1] == 0. and lastNo[2] == 0.:
return lastNo # 0s knowingly returned
else:
return normalizeVector(lastNo)
else:
return normalizeVector(newNor)
else:
return normalizeVector(normal)
def getAverage(listPoints):
'''averages any number of 3d points passed in as list'''
average = [0., 0., 0.]
for point in listPoints:
for index in xrange(len(average)):
average[index] += point[index]
for index in xrange(len(average)):
average[index] /= len(listPoints)
return average
def getAverageArbitraryDimension(listPoints, dimension=2):
'''averages any number of nD points passed in as list'''
average = [0. for count in range(dimension)]
for point in listPoints:
for index in xrange(len(average)):
average[index] += point[index]
for index in xrange(len(average)):
average[index] /= len(listPoints)
return average
def findMinsMaxsSpheres(spheres):
'''goes through all spheres, finds the min and max in each dimension.
spheres are expected in [x, y, z, r] format'''
if 0 == len(spheres):
return False, False # indicates failure
mins, maxs = [], []
for xyz in range(3):
mins.append(spheres[0][xyz] - spheres[0][3]) # x-radius then y-rad, z-rad
maxs.append(spheres[0][xyz] + spheres[0][3]) # x+radius then y+rad, z+rad
for sphere in spheres[1:]: # already did the first
for xyz in range(3):
mins[xyz] = min(mins[xyz], sphere[xyz]-sphere[3])
maxs[xyz] = max(maxs[xyz], sphere[xyz]+sphere[3])
return mins, maxs
def lineSphereIntersection(minLine, maxLine, sphere):
'''line goes from minline to maxline, sphere is x, y, z,radius,
returns 2 points of intersection, or if failure returns False
math is from http://en.wikipedia.org/wiki/Ray-sphere_intersection'''
#move sphere and line so that line starts at 0, 0, 0
newSphere = []
for coord in range(3):
newSphere.append(sphere[coord]-minLine[coord])
newSphere.append(sphere[3]) # radius
#convert line to necessary form
dirLine = []
for coord in range(3):
dirLine.append(maxLine[coord]-minLine[coord])
dirLine = normalizeVector(dirLine)
partA = 0.
partB = 0.
partC = 0.
for coord in range(3):
partA += dirLine[coord]*newSphere[coord] # lxsx + lysx + lzsz
partB += dirLine[coord]**2. # lx2 + ly2 + lz2
partC += newSphere[coord]**2. # sx2 + sy2 + sz2
partC -= newSphere[3]**2. # -sr2
try:
oneIntersectionD = (partA + ((partA**2.)-partB*partC)**0.5)/(partB)
twoIntersectionD = (partA - ((partA**2.)-partB*partC)**0.5)/(partB)
intersections = [oneIntersectionD, twoIntersectionD]
if intersections[1] < intersections[0]:
intersections.reverse()
#construct output points from original input line
outputPoints = [[], []]
for coord in xrange(3):
for which in xrange(2):
outputPoints[which].append(
minLine[coord] + dirLine[coord]*intersections[which])
#print minLine, maxLine, sphere, outputPoints #debugging
return outputPoints
except ValueError:
return False # didn't work
def countPathTriIntersections(pathPoints, triangle):
'''checks each line segment against one triangle, counts intersections
assume pathpoints and triangle have length 3 and are XYZ ordered'''
intersectionCount = 0
lastPathPt = pathPoints[0] # init for loop
for nextPathPt in pathPoints[1:]:
triPts0 = triangle[0]
triPts1 = triangle[1]
triPts2 = triangle[2]
posPt, maxIt = False, 5000
while False == posPt:
posPt = linePlaneIntersectionNumeric(
triPts0, triPts1, triPts2, lastPathPt, nextPathPt)
if False == posPt:
triPts0, triPts1, triPts2 = perturbTriangle(triPts0, triPts1, triPts2)
maxIt -= 1
if maxIt < 0:
print "had to perturb points 5000 times", triPts0, triPts1, triPts2, \
lastPathPt, nextPathPt, "giving up"
sys.exit(1)
if posPt is not False:
if distL2(lastPathPt, nextPathPt) >= distL2(lastPathPt, posPt) and \
distL2(lastPathPt, nextPathPt) >= distL2(nextPathPt, posPt):
if intPointInsideTri(triPts0, triPts1, triPts2, posPt):
# broken when using large tri?
intersectionCount += 1
lastPathPt = nextPathPt # for next loop
return intersectionCount
def perturbLine(longAxis, shortAxis1, shortAxis2, startPt, endPt, itersLeft):
'''makes a slightly different line'''
#perturb starting line, try again
newStartPt = [-1., -1., -1.]
newEndPt = [-1., -1., -1.]
newStartPt[longAxis] = startPt[longAxis]
newEndPt[longAxis] = endPt[longAxis]
if itersLeft % 4 == 3: # alternate back and forth around line
newStartPt[shortAxis1] = startPt[shortAxis1] + \
float(0.0000000001*(5001.-itersLeft))
newStartPt[shortAxis2] = startPt[shortAxis2] - \
float(0.000000001*(5001.-itersLeft))
newEndPt[shortAxis1] = endPt[shortAxis1] + \
float(0.00000001*(5001.-itersLeft))
newEndPt[shortAxis2] = endPt[shortAxis2] - \
float(0.000000001*(5001.-itersLeft))
elif itersLeft % 4 == 2: # alternate back and forth around line
newStartPt[shortAxis1] = startPt[shortAxis1] - \
float(0.0000001*(5001.-itersLeft))
newStartPt[shortAxis2] = startPt[shortAxis2] + \
float(0.000000001*(5001.-itersLeft))
newEndPt[shortAxis1] = endPt[shortAxis1] + \
float(0.00000001*(5001.-itersLeft))
newEndPt[shortAxis2] = endPt[shortAxis2] - \
float(0.000000001*(5001.-itersLeft))
elif itersLeft % 4 == 1: # alternate back and forth around line
newStartPt[shortAxis1] = startPt[shortAxis1] + \
float(0.0000000001*(5001.-itersLeft))
newStartPt[shortAxis2] = startPt[shortAxis2] - \
float(0.000001*(5001.-itersLeft))
newEndPt[shortAxis1] = endPt[shortAxis1] - \
float(0.0000001*(5001.-itersLeft))
newEndPt[shortAxis2] = endPt[shortAxis2] + \
float(0.0000000001*(5001.-itersLeft))
else:
newStartPt[shortAxis1] = startPt[shortAxis1] - \
float(0.0000001*(5001.-itersLeft))
newStartPt[shortAxis2] = startPt[shortAxis2] + \
float(0.0000001*(5001.-itersLeft))
newEndPt[shortAxis1] = endPt[shortAxis1] - \
float(0.000000001*(5001.-itersLeft))
newEndPt[shortAxis2] = endPt[shortAxis2] + \
float(0.00000001*(5001.-itersLeft))
return newStartPt, newEndPt
def getLongestEdge(triList, pointList, direction=-1):
'''helper function, finds the longest edge in the molecular surface
direction is 0, 1,2 for the axis to use for projection,
or -1 to find the euclidean'''
longestEdge = 0.0
if -1 == direction:
for triangle in triList:
distAB = distL2(
pointList[triangle[1]-1][1:], pointList[triangle[2]-1][1:])
distBC = distL2(
pointList[triangle[2]-1][1:], pointList[triangle[3]-1][1:])
distCA = distL2(
pointList[triangle[3]-1][1:], pointList[triangle[1]-1][1:])
longestEdge = max(distAB, distBC, distCA, longestEdge)
else:
pi = [0, 0]
if 0 == direction:
pi = [2, 3] # add 1
elif 1 == direction:
pi = [1, 3] # add 1
elif 2 == direction:
pi = [1, 2] # add 1
for triangle in triList:
distAB = distL2(
[pointList[triangle[1]-1][pi[0]], pointList[triangle[1]-1][pi[1]]],
[pointList[triangle[2]-1][pi[0]], pointList[triangle[2]-1][pi[1]]])
distBC = distL2(
[pointList[triangle[2]-1][pi[0]], pointList[triangle[2]-1][pi[1]]],
[pointList[triangle[3]-1][pi[0]], pointList[triangle[3]-1][pi[1]]])
distCA = distL2(
[pointList[triangle[3]-1][pi[0]], pointList[triangle[3]-1][pi[1]]],
[pointList[triangle[1]-1][pi[0]], pointList[triangle[1]-1][pi[1]]])
longestEdge = max(distAB, distBC, distCA, longestEdge)
return longestEdge
def cacheTriangle(triList, pointList, allowedTris=[-1]):
'''speed-up function, cache all the various vectors made from a triangle need
since all triangles get used a couple times, this should be worth it (if you
have the memory)'''
#make a vector of tuples
# the tuple format is ((x), (y), (z), (x-y), (y-x), (y-z), (z-y),
# (x-z), (z-x), (tri#))
#apparently not [] evaluates to true... so fix that
cacheDict = {}
for tri in triList:
if [-1] == allowedTris or tri[0] in allowedTris:
x = pointList[tri[1]-1][1:]
y = pointList[tri[2]-1][1:]
z = pointList[tri[3]-1][1:]
xy = getVector(x, y)
yx = getVector(y, x)
yz = getVector(y, z)
zy = getVector(z, y)
xz = getVector(x, z)
zx = getVector(z, x)
tupleRow = (x[0], x[1], x[2]), (y[0], y[1], y[2]), (z[0], z[1], z[2]), \
(xy[0], xy[1], xy[2]), (yx[0], yx[1], yx[2]), \
(yz[0], yz[1], yz[2]), (zy[0], zy[1], zy[2]), \
(xz[0], xz[1], xz[2]), (zx[0], zx[1], zx[2]), \
(tri[1], tri[2], tri[3]), (tri[0])
cacheDict[tri[0]] = tupleRow
return cacheDict
def calculatePlaneD(normal, pointOnP):
'''calculates the d of a plane where d = -ax -by -cz where normal = a, b, c
and point on plane = x, y, z'''
return - normal[0] * pointOnP[0] - normal[1] * pointOnP[1] - normal[2] * \
pointOnP[2]
def checkPlaneSide(plane, point):
'''plane is normal + D (from function calculatePlaneD). sees if point is
in the direction of normal or not, return boolean'''
sign = plane[0] * point[0] + plane[1] * point[1] + plane[2] * point[2] + \
plane[3]
if sign >= 0:
return True
else:
return False
def planeDistToOrigin(normal):
'''uses formula from http://mathworld.wolfram.com/Plane.html
normal is a , b, c, d of plane
dist = d / ((a^2 + b^2 + c^2) ^ (1 / 2))'''
a, b, c, d = normal # unpack tuple for laziness
return d / ((a**2. + b**2. + c**2.) ** 0.5)
def calculateSphericity(area, volume):
'''from wikipedia http://en.wikipedia.org/wiki/Sphericity
Wadell Sphericity, J Geol 1935.
sphericity = pi^(1/3)(6volume)^(2/3) / area'''
return ((math.pi**(1./3.))*((6 * volume)**(2. / 3.))) / area | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.forms import *
from django.test import TestCase
from django.utils.translation import ugettext_lazy, override
from forms_tests.models import Cheese
class FormsRegressionsTestCase(TestCase):
def test_class(self):
# Tests to prevent against recurrences of earlier bugs.
extra_attrs = {'class': 'special'}
class TestForm(Form):
f1 = CharField(max_length=10, widget=TextInput(attrs=extra_attrs))
f2 = CharField(widget=TextInput(attrs=extra_attrs))
self.assertHTMLEqual(TestForm(auto_id=False).as_p(), '<p>F1: <input type="text" class="special" name="f1" maxlength="10" /></p>\n<p>F2: <input type="text" class="special" name="f2" /></p>')
def test_regression_3600(self):
# Tests for form i18n #
# There were some problems with form translations in #3600
class SomeForm(Form):
username = CharField(max_length=10, label=ugettext_lazy('Username'))
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
# Translations are done at rendering time, so multi-lingual apps can define forms)
with override('de'):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Benutzername:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
with override('pl', deactivate=True):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Nazwa u\u017cytkownika:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
def test_regression_5216(self):
# There was some problems with form translations in #5216
class SomeForm(Form):
field_1 = CharField(max_length=10, label=ugettext_lazy('field_1'))
field_2 = CharField(max_length=10, label=ugettext_lazy('field_2'), widget=TextInput(attrs={'id': 'field_2_id'}))
f = SomeForm()
self.assertHTMLEqual(f['field_1'].label_tag(), '<label for="id_field_1">field_1</label>')
self.assertHTMLEqual(f['field_2'].label_tag(), '<label for="field_2_id">field_2</label>')
# Unicode decoding problems...
GENDERS = (('\xc5', 'En tied\xe4'), ('\xf8', 'Mies'), ('\xdf', 'Nainen'))
class SomeForm(Form):
somechoice = ChoiceField(choices=GENDERS, widget=RadioSelect(), label='\xc5\xf8\xdf')
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul id="id_somechoice">\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Testing choice validation with UTF-8 bytestrings as input (these are the
# Russian abbreviations "мес." and "шт.".
UNITS = ((b'\xd0\xbc\xd0\xb5\xd1\x81.', b'\xd0\xbc\xd0\xb5\xd1\x81.'),
(b'\xd1\x88\xd1\x82.', b'\xd1\x88\xd1\x82.'))
f = ChoiceField(choices=UNITS)
with warnings.catch_warnings():
# Ignore UnicodeWarning
warnings.simplefilter("ignore")
self.assertEqual(f.clean('\u0448\u0442.'), '\u0448\u0442.')
self.assertEqual(f.clean(b'\xd1\x88\xd1\x82.'), '\u0448\u0442.')
# Translated error messages used to be buggy.
with override('ru'):
f = SomeForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist"><li>\u041e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u043f\u043e\u043b\u0435.</li></ul>\n<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul id="id_somechoice">\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Deep copying translated text shouldn't raise an error)
from django.utils.translation import gettext_lazy
class CopyForm(Form):
degree = IntegerField(widget=Select(choices=((1, gettext_lazy('test')),)))
f = CopyForm()
def test_misc(self):
# There once was a problem with Form fields called "data". Let's make sure that
# doesn't come back.
class DataForm(Form):
data = CharField(max_length=10)
f = DataForm({'data': 'xyzzy'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'data': 'xyzzy'})
# A form with *only* hidden fields that has errors is going to be very unusual.
class HiddenForm(Form):
data = IntegerField(widget=HiddenInput)
f = HiddenForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist"><li>(Hidden field data) This field is required.</li></ul>\n<p> <input type="hidden" name="data" id="id_data" /></p>')
self.assertHTMLEqual(f.as_table(), '<tr><td colspan="2"><ul class="errorlist"><li>(Hidden field data) This field is required.</li></ul><input type="hidden" name="data" id="id_data" /></td></tr>')
def test_xss_error_messages(self):
###################################################
# Tests for XSS vulnerabilities in error messages #
###################################################
# The forms layer doesn't escape input values directly because error messages
# might be presented in non-HTML contexts. Instead, the message is just marked
# for escaping by the template engine. So we'll need to construct a little
# silly template to trigger the escaping.
from django.template import Template, Context
t = Template('{{ form.errors }}')
class SomeForm(Form):
field = ChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': '<script>'})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
class SomeForm(Form):
field = MultipleChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
from forms_tests.models import ChoiceModel
class SomeForm(Form):
field = ModelMultipleChoiceField(ChoiceModel.objects.all())
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>"<script>" is not a valid value for a primary key.</li></ul></li></ul>')
def test_regression_14234(self):
"""
Re-cleaning an instance that was added via a ModelForm should not raise
a pk uniqueness error.
"""
class CheeseForm(ModelForm):
class Meta:
model = Cheese
fields = '__all__'
form = CheeseForm({
'name': 'Brie',
})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Camembert'
obj.full_clean() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import, unicode_literals
import unittest
import mock
import pykka
from mopidy.core import Core
from mopidy.internal import versioning
class CoreActorTest(unittest.TestCase):
def setUp(self): # noqa: N802
self.backend1 = mock.Mock()
self.backend1.uri_schemes.get.return_value = ['dummy1']
self.backend1.actor_ref.actor_class.__name__ = b'B1'
self.backend2 = mock.Mock()
self.backend2.uri_schemes.get.return_value = ['dummy2']
self.backend2.actor_ref.actor_class.__name__ = b'B2'
self.core = Core(mixer=None, backends=[self.backend1, self.backend2])
def tearDown(self): # noqa: N802
pykka.ActorRegistry.stop_all()
def test_uri_schemes_has_uris_from_all_backends(self):
result = self.core.uri_schemes
self.assertIn('dummy1', result)
self.assertIn('dummy2', result)
def test_backends_with_colliding_uri_schemes_fails(self):
self.backend2.uri_schemes.get.return_value = ['dummy1', 'dummy2']
self.assertRaisesRegexp(
AssertionError,
'Cannot add URI scheme "dummy1" for B2, '
'it is already handled by B1',
Core, mixer=None, backends=[self.backend1, self.backend2])
def test_version(self):
self.assertEqual(self.core.version, versioning.get_version()) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from scipy import stats
from tensorflow.contrib.distributions.python.ops import beta as beta_lib
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
class BetaTest(test.TestCase):
def testSimpleShapes(self):
with self.test_session():
a = np.random.rand(3)
b = np.random.rand(3)
dist = beta_lib.Beta(a, b)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([3], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3]), dist.batch_shape)
def testComplexShapes(self):
with self.test_session():
a = np.random.rand(3, 2, 2)
b = np.random.rand(3, 2, 2)
dist = beta_lib.Beta(a, b)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(
tensor_shape.TensorShape([3, 2, 2]), dist.batch_shape)
def testComplexShapesBroadcast(self):
with self.test_session():
a = np.random.rand(3, 2, 2)
b = np.random.rand(2, 2)
dist = beta_lib.Beta(a, b)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(
tensor_shape.TensorShape([3, 2, 2]), dist.batch_shape)
def testAlphaProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.test_session():
dist = beta_lib.Beta(a, b)
self.assertEqual([1, 3], dist.concentration1.get_shape())
self.assertAllClose(a, dist.concentration1.eval())
def testBetaProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.test_session():
dist = beta_lib.Beta(a, b)
self.assertEqual([1, 3], dist.concentration0.get_shape())
self.assertAllClose(b, dist.concentration0.eval())
def testPdfXProper(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.test_session():
dist = beta_lib.Beta(a, b, validate_args=True)
dist.prob([.1, .3, .6]).eval()
dist.prob([.2, .3, .5]).eval()
# Either condition can trigger.
with self.assertRaisesOpError("sample must be positive"):
dist.prob([-1., 0.1, 0.5]).eval()
with self.assertRaisesOpError("sample must be positive"):
dist.prob([0., 0.1, 0.5]).eval()
with self.assertRaisesOpError("sample must be no larger than `1`"):
dist.prob([.1, .2, 1.2]).eval()
def testPdfTwoBatches(self):
with self.test_session():
a = [1., 2]
b = [1., 2]
x = [.5, .5]
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1., 3. / 2], pdf.eval())
self.assertEqual((2,), pdf.get_shape())
def testPdfTwoBatchesNontrivialX(self):
with self.test_session():
a = [1., 2]
b = [1., 2]
x = [.3, .7]
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1, 63. / 50], pdf.eval())
self.assertEqual((2,), pdf.get_shape())
def testPdfUniformZeroBatch(self):
with self.test_session():
# This is equivalent to a uniform distribution
a = 1.
b = 1.
x = np.array([.1, .2, .3, .5, .8], dtype=np.float32)
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1.] * 5, pdf.eval())
self.assertEqual((5,), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenSameRank(self):
with self.test_session():
a = [[1., 2]]
b = [[1., 2]]
x = [[.5, .5], [.3, .7]]
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([[1., 3. / 2], [1., 63. / 50]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
a = [1., 2]
b = [1., 2]
x = [[.5, .5], [.2, .8]]
pdf = beta_lib.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [1., 24. / 25]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenSameRank(self):
with self.test_session():
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [[.5, .5]]
pdf = beta_lib.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [3. / 2, 15. / 8]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [.5, .5]
pdf = beta_lib.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [3. / 2, 15. / 8]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testBetaMean(self):
with session.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
expected_mean = stats.beta.mean(a, b)
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.mean().get_shape(), (3,))
self.assertAllClose(expected_mean, dist.mean().eval())
def testBetaVariance(self):
with session.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
expected_variance = stats.beta.var(a, b)
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.variance().get_shape(), (3,))
self.assertAllClose(expected_variance, dist.variance().eval())
def testBetaMode(self):
with session.Session():
a = np.array([1.1, 2, 3])
b = np.array([2., 4, 1.2])
expected_mode = (a - 1) / (a + b - 2)
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.mode().get_shape(), (3,))
self.assertAllClose(expected_mode, dist.mode().eval())
def testBetaModeInvalid(self):
with session.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
dist.mode().eval()
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
dist.mode().eval()
def testBetaModeEnableAllowNanStats(self):
with session.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=True)
expected_mode = (a - 1) / (a + b - 2)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().get_shape())
self.assertAllClose(expected_mode, dist.mode().eval())
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=True)
expected_mode = (a - 1) / (a + b - 2)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().get_shape())
self.assertAllClose(expected_mode, dist.mode().eval())
def testBetaEntropy(self):
with session.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
expected_entropy = stats.beta.entropy(a, b)
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.entropy().get_shape(), (3,))
self.assertAllClose(expected_entropy, dist.entropy().eval())
def testBetaSample(self):
with self.test_session():
a = 1.
b = 2.
beta = beta_lib.Beta(a, b)
n = constant_op.constant(100000)
samples = beta.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertFalse(np.any(sample_values < 0.0))
self.assertLess(
stats.kstest(
# Beta is a univariate distribution.
sample_values,
stats.beta(a=1., b=2.).cdf)[0],
0.01)
# The standard error of the sample mean is 1 / (sqrt(18 * n))
self.assertAllClose(
sample_values.mean(axis=0), stats.beta.mean(a, b), atol=1e-2)
self.assertAllClose(
np.cov(sample_values, rowvar=0), stats.beta.var(a, b), atol=1e-1)
# Test that sampling with the same seed twice gives the same results.
def testBetaSampleMultipleTimes(self):
with self.test_session():
a_val = 1.
b_val = 2.
n_val = 100
random_seed.set_random_seed(654321)
beta1 = beta_lib.Beta(concentration1=a_val,
concentration0=b_val,
name="beta1")
samples1 = beta1.sample(n_val, seed=123456).eval()
random_seed.set_random_seed(654321)
beta2 = beta_lib.Beta(concentration1=a_val,
concentration0=b_val,
name="beta2")
samples2 = beta2.sample(n_val, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testBetaSampleMultidimensional(self):
with self.test_session():
a = np.random.rand(3, 2, 2).astype(np.float32)
b = np.random.rand(3, 2, 2).astype(np.float32)
beta = beta_lib.Beta(a, b)
n = constant_op.constant(100000)
samples = beta.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000, 3, 2, 2))
self.assertFalse(np.any(sample_values < 0.0))
self.assertAllClose(
sample_values[:, 1, :].mean(axis=0),
stats.beta.mean(a, b)[1, :],
atol=1e-1)
def testBetaCdf(self):
with self.test_session():
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = beta_lib.Beta(a, b).cdf(x).eval()
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
self.assertAllClose(stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)
def testBetaLogCdf(self):
with self.test_session():
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = math_ops.exp(beta_lib.Beta(a, b).log_cdf(x)).eval()
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
self.assertAllClose(stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)
def testBetaWithSoftplusConcentration(self):
with self.test_session():
a, b = -4.2, -9.1
dist = beta_lib.BetaWithSoftplusConcentration(a, b)
self.assertAllClose(nn_ops.softplus(a).eval(), dist.concentration1.eval())
self.assertAllClose(nn_ops.softplus(b).eval(), dist.concentration0.eval())
def testBetaBetaKL(self):
with self.test_session() as sess:
for shape in [(10,), (4, 5)]:
a1 = 6.0 * np.random.random(size=shape) + 1e-4
b1 = 6.0 * np.random.random(size=shape) + 1e-4
a2 = 6.0 * np.random.random(size=shape) + 1e-4
b2 = 6.0 * np.random.random(size=shape) + 1e-4
# Take inverse softplus of values to test BetaWithSoftplusConcentration
a1_sp = np.log(np.exp(a1) - 1.0)
b1_sp = np.log(np.exp(b1) - 1.0)
a2_sp = np.log(np.exp(a2) - 1.0)
b2_sp = np.log(np.exp(b2) - 1.0)
d1 = beta_lib.Beta(concentration1=a1, concentration0=b1)
d2 = beta_lib.Beta(concentration1=a2, concentration0=b2)
d1_sp = beta_lib.BetaWithSoftplusConcentration(concentration1=a1_sp,
concentration0=b1_sp)
d2_sp = beta_lib.BetaWithSoftplusConcentration(concentration1=a2_sp,
concentration0=b2_sp)
kl_expected = (special.betaln(a2, b2) - special.betaln(a1, b1) +
(a1 - a2) * special.digamma(a1) +
(b1 - b2) * special.digamma(b1) +
(a2 - a1 + b2 - b1) * special.digamma(a1 + b1))
for dist1 in [d1, d1_sp]:
for dist2 in [d2, d2_sp]:
kl = kullback_leibler.kl_divergence(dist1, dist2)
kl_val = sess.run(kl)
self.assertEqual(kl.get_shape(), shape)
self.assertAllClose(kl_val, kl_expected)
# Make sure KL(d1||d1) is 0
kl_same = sess.run(kullback_leibler.kl_divergence(d1, d1))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
#ifndef REPLAY_H
#define REPLAY_H
#include "hash.h"
struct repository;
struct rev_info;
/*
* A set of options that can be passed to `replay_revisions()`.
*/
struct replay_revisions_options {
/*
* Starting point at which to create the new commits; must be a branch
* name. The branch will be updated to point to the rewritten commits.
* This option is mutually exclusive with `onto`.
*/
const char *advance;
/*
* Starting point at which to create the new commits; must be a
* committish. References pointing at decendants of `onto` will be
* updated to point to the new commits.
*/
const char *onto;
/*
* Update branches that point at commits in the given revision range.
* Requires `onto` to be set.
*/
int contained;
};
/* This struct is used as an out-parameter by `replay_revisions()`. */
struct replay_result {
/*
* The set of reference updates that are caused by replaying the
* commits.
*/
struct replay_ref_update {
char *refname;
struct object_id old_oid;
struct object_id new_oid;
} *updates;
size_t updates_nr, updates_alloc;
};
void replay_result_release(struct replay_result *result);
/*
* Replay a set of commits onto a new location. Leaves both the working tree,
* index and references untouched. Reference updates caused by the replay will
* be recorded in the `updates` out pointer.
*
* Returns 0 on success, 1 on conflict and a negative error code otherwise.
*/
int replay_revisions(struct rev_info *revs,
struct replay_revisions_options *opts,
struct replay_result *out);
#endif | c | github | https://github.com/git/git | replay.h |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from eve.utils import ParsedRequest
from eve.versioning import resolve_document_version
from apps.archive.common import insert_into_versions, is_assigned_to_a_desk, get_expiry
from superdesk.resource import Resource
from superdesk.errors import SuperdeskApiError, InvalidStateTransitionError
from superdesk.notification import push_notification
from superdesk.utc import utcnow
from apps.archive.common import on_create_item, item_url, update_version
from superdesk.services import BaseService
from apps.content import metadata_schema
import superdesk
from superdesk.activity import add_activity, ACTIVITY_CREATE, ACTIVITY_UPDATE
from apps.archive.archive import get_subject
from superdesk.workflow import is_workflow_state_transition_valid
from copy import copy
from eve.utils import config
from apps.archive.archive import SOURCE as ARCHIVE
from superdesk import get_resource_service
task_statuses = ['todo', 'in_progress', 'done']
default_status = 'todo'
def init_app(app):
endpoint_name = 'tasks'
service = TasksService(TaskResource.datasource['source'], backend=superdesk.get_backend())
TaskResource(endpoint_name, app=app, service=service)
def send_to(doc, desk_id=None, stage_id=None):
"""Send item to given desk and stage.
:param doc: item to be sent
:param desk: id of desk where item should be sent
:param stage: optional stage within the desk
"""
task = doc.get('task', {})
task.setdefault('desk', desk_id)
task.setdefault('stage', stage_id)
calculate_expiry_from = None
if desk_id and not stage_id:
desk = superdesk.get_resource_service('desks').find_one(req=None, _id=desk_id)
if not desk:
raise SuperdeskApiError.notFoundError('Invalid desk identifier %s' % desk_id)
calculate_expiry_from = desk
task['desk'] = desk_id
task['stage'] = desk.get('incoming_stage')
if stage_id:
stage = get_resource_service('stages').find_one(req=None, _id=stage_id)
if not stage:
raise SuperdeskApiError.notFoundError('Invalid stage identifier %s' % stage_id)
calculate_expiry_from = stage
task['desk'] = stage['desk']
task['stage'] = stage_id
if stage.get('task_status'):
task['status'] = stage['task_status']
doc['task'] = task
doc['expiry'] = get_expiry(desk_or_stage_doc=calculate_expiry_from)
class TaskResource(Resource):
datasource = {
'source': 'archive',
'default_sort': [('_updated', -1)],
'filter': {'task': {'$exists': True}},
'elastic_filter': {'bool': {
'must': {'exists': {'field': 'task'}},
'must_not': {'term': {'state': 'spiked'}},
}}
}
item_url = item_url
schema = {
'slugline': metadata_schema['slugline'],
'description_text': metadata_schema['description'],
'type': metadata_schema['type'],
'planning_item': Resource.rel('planning', True, type='string'),
'task': {
'type': 'dict',
'schema': {
'status': {
'type': 'string',
'allowed': task_statuses,
'default': default_status
},
'due_date': {'type': 'datetime'},
'started_at': {'type': 'datetime'},
'finished_at': {'type': 'datetime'},
'user': Resource.rel('users', True),
'desk': Resource.rel('desks', True),
'stage': Resource.rel('stages', True)
}
}
}
privileges = {'POST': 'tasks', 'PATCH': 'tasks', 'DELETE': 'tasks'}
class TasksService(BaseService):
def get(self, req, lookup):
if req is None:
req = ParsedRequest()
return self.backend.get('tasks', req=req, lookup=lookup)
def update_times(self, doc):
task = doc.get('task', {})
status = task.get('status', None)
if status == 'in_progress':
task.setdefault('started_at', utcnow())
if status == 'done':
task.setdefault('finished_at', utcnow())
def __is_content_moved_from_desk(self, doc):
"""
Returns True if the 'doc' is being moved from a desk. False otherwise.
"""
return doc.get('task', {}).get('desk', None) is None
def __is_content_assigned_to_new_desk(self, original, updates):
"""
Checks if the content is assigned to a new desk.
:return: True if the content is being moved to a new desk. False otherwise.
"""
return str(original.get('task', {}).get('desk', '')) != str(updates.get('task', {}).get('desk', ''))
def __update_state(self, updates, original):
if self.__is_content_assigned_to_new_desk(original, updates):
# check if the preconditions for the action are in place
original_state = original[config.CONTENT_STATE]
if not is_workflow_state_transition_valid('move', original_state):
raise InvalidStateTransitionError()
updates[config.CONTENT_STATE] = 'draft' if self.__is_content_moved_from_desk(updates) else 'submitted'
resolve_document_version(updates, ARCHIVE, 'PATCH', original)
def update_stage(self, doc):
task = doc.get('task', {})
desk_id = task.get('desk', None)
stage_id = task.get('stage', None)
send_to(doc, desk_id, stage_id)
def on_create(self, docs):
on_create_item(docs)
for doc in docs:
resolve_document_version(doc, ARCHIVE, 'POST')
self.update_times(doc)
self.update_stage(doc)
def on_created(self, docs):
push_notification(self.datasource, created=1)
push_notification('task:new')
for doc in docs:
insert_into_versions(doc['_id'])
if is_assigned_to_a_desk(doc):
add_activity(ACTIVITY_CREATE, 'added new task {{ subject }} of type {{ type }}',
self.datasource, item=doc,
subject=get_subject(doc), type=doc['type'])
def on_update(self, updates, original):
self.update_times(updates)
if is_assigned_to_a_desk(updates):
self.__update_state(updates, original)
new_stage_id = updates.get('task', {}).get('stage', '')
old_stage_id = original.get('task', {}).get('stage', '')
if new_stage_id and new_stage_id != old_stage_id:
new_stage = get_resource_service('stages').find_one(req=None, _id=new_stage_id)
if not new_stage:
raise SuperdeskApiError.notFoundError('Invalid stage identifier %s' % new_stage)
updates['expiry'] = get_expiry(new_stage['desk'], new_stage_id)
if new_stage.get('task_status'):
updates['task']['status'] = new_stage['task_status']
update_version(updates, original)
def on_updated(self, updates, original):
new_task = updates.get('task', {})
old_task = original.get('task', {})
if new_task.get('stage') != old_task.get('stage'):
push_notification('task:stage',
new_stage=str(new_task.get('stage', '')),
old_stage=str(old_task.get('stage', '')),
new_desk=str(new_task.get('desk', '')),
old_desk=str(old_task.get('desk', ''))
)
else:
push_notification(self.datasource, updated=1)
updated = copy(original)
updated.update(updates)
if is_assigned_to_a_desk(updated):
if self.__is_content_assigned_to_new_desk(original, updates):
insert_into_versions(original['_id'])
add_activity(ACTIVITY_UPDATE, 'updated task {{ subject }} for item {{ type }}',
self.datasource, item=updated, subject=get_subject(updated), type=updated['type'])
def on_deleted(self, doc):
push_notification(self.datasource, deleted=1)
def assign_user(self, item_id, user):
return self.patch(item_id, {'task': user})
superdesk.privilege(name='tasks', label='Tasks Management', description='Tasks Management') | unknown | codeparrot/codeparrot-clean | ||
from setuptools import setup, find_packages
from codecs import open
from os import path
def readme():
with open('README.md', encoding='utf-8') as f:
return f.read()
setup(
name='opencc-python-reimplemented',
version='0.1.6',
description='OpenCC made with Python',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/yichen0831/opencc-python',
author='Yichen Huang (Eugene)',
author_email='yichen0831@gmail.com',
license='Apache License',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='chinese conversion',
packages=['opencc'],
install_requires=[],
package_data={
'opencc': [
'README.md',
'LICENSE.txt',
'NOTICE.txt',
'config/hk2s.json', 'config/s2hk.json', 'config/s2t.json',
'config/s2tw.json', 'config/s2twp.json', 'config/t2hk.json',
'config/t2s.json', 'config/t2tw.json', 'config/tw2s.json',
'config/tw2sp.json',
'dictionary/HKVariants.txt',
'dictionary/HKVariantsPhrases.txt',
'dictionary/HKVariantsRev.txt',
'dictionary/HKVariantsRevPhrases.txt',
'dictionary/JPVariants.txt',
'dictionary/STCharacters.txt',
'dictionary/STPhrases.txt',
'dictionary/TSCharacters.txt',
'dictionary/TSPhrases.txt',
'dictionary/TWPhrases.txt',
'dictionary/TWPhrasesRev.txt',
'dictionary/TWVariants.txt',
'dictionary/TWVariantsRev.txt',
'dictionary/TWVariantsRevPhrases.txt',
]
},
) | unknown | codeparrot/codeparrot-clean | ||
/* Errno module */
// Need limited C API version 3.13 for Py_mod_gil
#include "pyconfig.h" // Py_GIL_DISABLED
#ifndef Py_GIL_DISABLED
# define Py_LIMITED_API 0x030d0000
#endif
#include "Python.h"
#include <errno.h> // EPIPE
/* Windows socket errors (WSA*) */
#ifdef MS_WINDOWS
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
// The following constants were added to errno.h in VS2010 but have
// preferred WSA equivalents.
# undef EADDRINUSE
# undef EADDRNOTAVAIL
# undef EAFNOSUPPORT
# undef EALREADY
# undef ECONNABORTED
# undef ECONNREFUSED
# undef ECONNRESET
# undef EDESTADDRREQ
# undef EHOSTUNREACH
# undef EINPROGRESS
# undef EISCONN
# undef ELOOP
# undef EMSGSIZE
# undef ENETDOWN
# undef ENETRESET
# undef ENETUNREACH
# undef ENOBUFS
# undef ENOPROTOOPT
# undef ENOTCONN
# undef ENOTSOCK
# undef EOPNOTSUPP
# undef EPROTONOSUPPORT
# undef EPROTOTYPE
# undef ETIMEDOUT
# undef EWOULDBLOCK
#endif
/*
* Pull in the system error definitions
*/
static PyMethodDef errno_methods[] = {
{NULL, NULL}
};
/* Helper function doing the dictionary inserting */
static int
_add_errcode(PyObject *module_dict, PyObject *error_dict, const char *name_str, int code_int)
{
PyObject *name = PyUnicode_FromString(name_str);
if (!name) {
return -1;
}
PyObject *code = PyLong_FromLong(code_int);
if (!code) {
Py_DECREF(name);
return -1;
}
int ret = -1;
/* insert in modules dict */
if (PyDict_SetItem(module_dict, name, code) < 0) {
goto end;
}
/* insert in errorcode dict */
if (PyDict_SetItem(error_dict, code, name) < 0) {
goto end;
}
ret = 0;
end:
Py_DECREF(name);
Py_DECREF(code);
return ret;
}
static int
errno_exec(PyObject *module)
{
PyObject *module_dict = PyModule_GetDict(module); // Borrowed ref.
if (module_dict == NULL) {
return -1;
}
PyObject *error_dict = PyDict_New();
if (error_dict == NULL) {
return -1;
}
if (PyDict_SetItemString(module_dict, "errorcode", error_dict) < 0) {
Py_DECREF(error_dict);
return -1;
}
/* Macro so I don't have to edit each and every line below... */
#define add_errcode(name, code, comment) \
do { \
if (_add_errcode(module_dict, error_dict, name, code) < 0) { \
Py_DECREF(error_dict); \
return -1; \
} \
} while (0);
/*
* The names and comments are borrowed from linux/include/errno.h,
* which should be pretty all-inclusive. However, the Solaris specific
* names and comments are borrowed from sys/errno.h in Solaris.
* MacOSX specific names and comments are borrowed from sys/errno.h in
* MacOSX.
*/
#ifdef ENODEV
add_errcode("ENODEV", ENODEV, "No such device");
#endif
#ifdef ENOCSI
add_errcode("ENOCSI", ENOCSI, "No CSI structure available");
#endif
#ifdef EHOSTUNREACH
add_errcode("EHOSTUNREACH", EHOSTUNREACH, "No route to host");
#else
#ifdef WSAEHOSTUNREACH
add_errcode("EHOSTUNREACH", WSAEHOSTUNREACH, "No route to host");
#endif
#endif
#ifdef ENOMSG
add_errcode("ENOMSG", ENOMSG, "No message of desired type");
#endif
#ifdef EUCLEAN
add_errcode("EUCLEAN", EUCLEAN, "Structure needs cleaning");
#endif
#ifdef EL2NSYNC
add_errcode("EL2NSYNC", EL2NSYNC, "Level 2 not synchronized");
#endif
#ifdef EL2HLT
add_errcode("EL2HLT", EL2HLT, "Level 2 halted");
#endif
#ifdef ENODATA
add_errcode("ENODATA", ENODATA, "No data available");
#endif
#ifdef ENOTBLK
add_errcode("ENOTBLK", ENOTBLK, "Block device required");
#endif
#ifdef ENOSYS
add_errcode("ENOSYS", ENOSYS, "Function not implemented");
#endif
#ifdef EPIPE
add_errcode("EPIPE", EPIPE, "Broken pipe");
#endif
#ifdef EINVAL
add_errcode("EINVAL", EINVAL, "Invalid argument");
#else
#ifdef WSAEINVAL
add_errcode("EINVAL", WSAEINVAL, "Invalid argument");
#endif
#endif
#ifdef EOVERFLOW
add_errcode("EOVERFLOW", EOVERFLOW, "Value too large for defined data type");
#endif
#ifdef EADV
add_errcode("EADV", EADV, "Advertise error");
#endif
#ifdef EINTR
add_errcode("EINTR", EINTR, "Interrupted system call");
#else
#ifdef WSAEINTR
add_errcode("EINTR", WSAEINTR, "Interrupted system call");
#endif
#endif
#ifdef EUSERS
add_errcode("EUSERS", EUSERS, "Too many users");
#else
#ifdef WSAEUSERS
add_errcode("EUSERS", WSAEUSERS, "Too many users");
#endif
#endif
#ifdef ENOTEMPTY
add_errcode("ENOTEMPTY", ENOTEMPTY, "Directory not empty");
#else
#ifdef WSAENOTEMPTY
add_errcode("ENOTEMPTY", WSAENOTEMPTY, "Directory not empty");
#endif
#endif
#ifdef ENOBUFS
add_errcode("ENOBUFS", ENOBUFS, "No buffer space available");
#else
#ifdef WSAENOBUFS
add_errcode("ENOBUFS", WSAENOBUFS, "No buffer space available");
#endif
#endif
#ifdef EPROTO
add_errcode("EPROTO", EPROTO, "Protocol error");
#endif
#ifdef EREMOTE
add_errcode("EREMOTE", EREMOTE, "Object is remote");
#else
#ifdef WSAEREMOTE
add_errcode("EREMOTE", WSAEREMOTE, "Object is remote");
#endif
#endif
#ifdef ENAVAIL
add_errcode("ENAVAIL", ENAVAIL, "No XENIX semaphores available");
#endif
#ifdef ECHILD
add_errcode("ECHILD", ECHILD, "No child processes");
#endif
#ifdef ELOOP
add_errcode("ELOOP", ELOOP, "Too many symbolic links encountered");
#else
#ifdef WSAELOOP
add_errcode("ELOOP", WSAELOOP, "Too many symbolic links encountered");
#endif
#endif
#ifdef EXDEV
add_errcode("EXDEV", EXDEV, "Cross-device link");
#endif
#ifdef E2BIG
add_errcode("E2BIG", E2BIG, "Arg list too long");
#endif
#ifdef ESRCH
add_errcode("ESRCH", ESRCH, "No such process");
#endif
#ifdef EMSGSIZE
add_errcode("EMSGSIZE", EMSGSIZE, "Message too long");
#else
#ifdef WSAEMSGSIZE
add_errcode("EMSGSIZE", WSAEMSGSIZE, "Message too long");
#endif
#endif
#ifdef EAFNOSUPPORT
add_errcode("EAFNOSUPPORT", EAFNOSUPPORT, "Address family not supported by protocol");
#else
#ifdef WSAEAFNOSUPPORT
add_errcode("EAFNOSUPPORT", WSAEAFNOSUPPORT, "Address family not supported by protocol");
#endif
#endif
#ifdef EBADR
add_errcode("EBADR", EBADR, "Invalid request descriptor");
#endif
#ifdef EHOSTDOWN
add_errcode("EHOSTDOWN", EHOSTDOWN, "Host is down");
#else
#ifdef WSAEHOSTDOWN
add_errcode("EHOSTDOWN", WSAEHOSTDOWN, "Host is down");
#endif
#endif
#ifdef EPFNOSUPPORT
add_errcode("EPFNOSUPPORT", EPFNOSUPPORT, "Protocol family not supported");
#else
#ifdef WSAEPFNOSUPPORT
add_errcode("EPFNOSUPPORT", WSAEPFNOSUPPORT, "Protocol family not supported");
#endif
#endif
#ifdef ENOPROTOOPT
add_errcode("ENOPROTOOPT", ENOPROTOOPT, "Protocol not available");
#else
#ifdef WSAENOPROTOOPT
add_errcode("ENOPROTOOPT", WSAENOPROTOOPT, "Protocol not available");
#endif
#endif
#ifdef EBUSY
add_errcode("EBUSY", EBUSY, "Device or resource busy");
#endif
#ifdef EWOULDBLOCK
add_errcode("EWOULDBLOCK", EWOULDBLOCK, "Operation would block");
#else
#ifdef WSAEWOULDBLOCK
add_errcode("EWOULDBLOCK", WSAEWOULDBLOCK, "Operation would block");
#endif
#endif
#ifdef EBADFD
add_errcode("EBADFD", EBADFD, "File descriptor in bad state");
#endif
#ifdef EDOTDOT
add_errcode("EDOTDOT", EDOTDOT, "RFS specific error");
#endif
#ifdef EISCONN
add_errcode("EISCONN", EISCONN, "Transport endpoint is already connected");
#else
#ifdef WSAEISCONN
add_errcode("EISCONN", WSAEISCONN, "Transport endpoint is already connected");
#endif
#endif
#ifdef ENOANO
add_errcode("ENOANO", ENOANO, "No anode");
#endif
#if defined(__wasi__) && !defined(ESHUTDOWN)
// WASI SDK 16 does not have ESHUTDOWN, shutdown results in EPIPE.
#define ESHUTDOWN EPIPE
#endif
#ifdef ESHUTDOWN
add_errcode("ESHUTDOWN", ESHUTDOWN, "Cannot send after transport endpoint shutdown");
#else
#ifdef WSAESHUTDOWN
add_errcode("ESHUTDOWN", WSAESHUTDOWN, "Cannot send after transport endpoint shutdown");
#endif
#endif
#ifdef ECHRNG
add_errcode("ECHRNG", ECHRNG, "Channel number out of range");
#endif
#ifdef ELIBBAD
add_errcode("ELIBBAD", ELIBBAD, "Accessing a corrupted shared library");
#endif
#ifdef ENONET
add_errcode("ENONET", ENONET, "Machine is not on the network");
#endif
#ifdef EBADE
add_errcode("EBADE", EBADE, "Invalid exchange");
#endif
#ifdef EBADF
add_errcode("EBADF", EBADF, "Bad file number");
#else
#ifdef WSAEBADF
add_errcode("EBADF", WSAEBADF, "Bad file number");
#endif
#endif
#ifdef EMULTIHOP
add_errcode("EMULTIHOP", EMULTIHOP, "Multihop attempted");
#endif
#ifdef EIO
add_errcode("EIO", EIO, "I/O error");
#endif
#ifdef EUNATCH
add_errcode("EUNATCH", EUNATCH, "Protocol driver not attached");
#endif
#ifdef EPROTOTYPE
add_errcode("EPROTOTYPE", EPROTOTYPE, "Protocol wrong type for socket");
#else
#ifdef WSAEPROTOTYPE
add_errcode("EPROTOTYPE", WSAEPROTOTYPE, "Protocol wrong type for socket");
#endif
#endif
#ifdef ENOSPC
add_errcode("ENOSPC", ENOSPC, "No space left on device");
#endif
#ifdef ENOEXEC
add_errcode("ENOEXEC", ENOEXEC, "Exec format error");
#endif
#ifdef EALREADY
add_errcode("EALREADY", EALREADY, "Operation already in progress");
#else
#ifdef WSAEALREADY
add_errcode("EALREADY", WSAEALREADY, "Operation already in progress");
#endif
#endif
#ifdef ENETDOWN
add_errcode("ENETDOWN", ENETDOWN, "Network is down");
#else
#ifdef WSAENETDOWN
add_errcode("ENETDOWN", WSAENETDOWN, "Network is down");
#endif
#endif
#ifdef ENOTNAM
add_errcode("ENOTNAM", ENOTNAM, "Not a XENIX named type file");
#endif
#ifdef EACCES
add_errcode("EACCES", EACCES, "Permission denied");
#else
#ifdef WSAEACCES
add_errcode("EACCES", WSAEACCES, "Permission denied");
#endif
#endif
#ifdef ELNRNG
add_errcode("ELNRNG", ELNRNG, "Link number out of range");
#endif
#ifdef EILSEQ
add_errcode("EILSEQ", EILSEQ, "Illegal byte sequence");
#endif
#ifdef ENOTDIR
add_errcode("ENOTDIR", ENOTDIR, "Not a directory");
#endif
#ifdef ENOTUNIQ
add_errcode("ENOTUNIQ", ENOTUNIQ, "Name not unique on network");
#endif
#ifdef EPERM
add_errcode("EPERM", EPERM, "Operation not permitted");
#endif
#ifdef EDOM
add_errcode("EDOM", EDOM, "Math argument out of domain of func");
#endif
#ifdef EXFULL
add_errcode("EXFULL", EXFULL, "Exchange full");
#endif
#ifdef ECONNREFUSED
add_errcode("ECONNREFUSED", ECONNREFUSED, "Connection refused");
#else
#ifdef WSAECONNREFUSED
add_errcode("ECONNREFUSED", WSAECONNREFUSED, "Connection refused");
#endif
#endif
#ifdef EISDIR
add_errcode("EISDIR", EISDIR, "Is a directory");
#endif
#ifdef EPROTONOSUPPORT
add_errcode("EPROTONOSUPPORT", EPROTONOSUPPORT, "Protocol not supported");
#else
#ifdef WSAEPROTONOSUPPORT
add_errcode("EPROTONOSUPPORT", WSAEPROTONOSUPPORT, "Protocol not supported");
#endif
#endif
#ifdef EROFS
add_errcode("EROFS", EROFS, "Read-only file system");
#endif
#ifdef EADDRNOTAVAIL
add_errcode("EADDRNOTAVAIL", EADDRNOTAVAIL, "Cannot assign requested address");
#else
#ifdef WSAEADDRNOTAVAIL
add_errcode("EADDRNOTAVAIL", WSAEADDRNOTAVAIL, "Cannot assign requested address");
#endif
#endif
#ifdef EIDRM
add_errcode("EIDRM", EIDRM, "Identifier removed");
#endif
#ifdef ECOMM
add_errcode("ECOMM", ECOMM, "Communication error on send");
#endif
#ifdef ESRMNT
add_errcode("ESRMNT", ESRMNT, "Srmount error");
#endif
#ifdef EREMOTEIO
add_errcode("EREMOTEIO", EREMOTEIO, "Remote I/O error");
#endif
#ifdef EL3RST
add_errcode("EL3RST", EL3RST, "Level 3 reset");
#endif
#ifdef EBADMSG
add_errcode("EBADMSG", EBADMSG, "Not a data message");
#endif
#ifdef ENFILE
add_errcode("ENFILE", ENFILE, "File table overflow");
#endif
#ifdef ELIBMAX
add_errcode("ELIBMAX", ELIBMAX, "Attempting to link in too many shared libraries");
#endif
#ifdef ESPIPE
add_errcode("ESPIPE", ESPIPE, "Illegal seek");
#endif
#ifdef ENOLINK
add_errcode("ENOLINK", ENOLINK, "Link has been severed");
#endif
#ifdef ENETRESET
add_errcode("ENETRESET", ENETRESET, "Network dropped connection because of reset");
#else
#ifdef WSAENETRESET
add_errcode("ENETRESET", WSAENETRESET, "Network dropped connection because of reset");
#endif
#endif
#ifdef ETIMEDOUT
add_errcode("ETIMEDOUT", ETIMEDOUT, "Connection timed out");
#else
#ifdef WSAETIMEDOUT
add_errcode("ETIMEDOUT", WSAETIMEDOUT, "Connection timed out");
#endif
#endif
#ifdef ENOENT
add_errcode("ENOENT", ENOENT, "No such file or directory");
#endif
#ifdef EEXIST
add_errcode("EEXIST", EEXIST, "File exists");
#endif
#ifdef EDQUOT
add_errcode("EDQUOT", EDQUOT, "Quota exceeded");
#else
#ifdef WSAEDQUOT
add_errcode("EDQUOT", WSAEDQUOT, "Quota exceeded");
#endif
#endif
#ifdef ENOSTR
add_errcode("ENOSTR", ENOSTR, "Device not a stream");
#endif
#ifdef EBADSLT
add_errcode("EBADSLT", EBADSLT, "Invalid slot");
#endif
#ifdef EBADRQC
add_errcode("EBADRQC", EBADRQC, "Invalid request code");
#endif
#ifdef ELIBACC
add_errcode("ELIBACC", ELIBACC, "Can not access a needed shared library");
#endif
#ifdef EFAULT
add_errcode("EFAULT", EFAULT, "Bad address");
#else
#ifdef WSAEFAULT
add_errcode("EFAULT", WSAEFAULT, "Bad address");
#endif
#endif
#ifdef EFBIG
add_errcode("EFBIG", EFBIG, "File too large");
#endif
#ifdef EDEADLK
add_errcode("EDEADLK", EDEADLK, "Resource deadlock would occur");
#endif
#ifdef ENOTCONN
add_errcode("ENOTCONN", ENOTCONN, "Transport endpoint is not connected");
#else
#ifdef WSAENOTCONN
add_errcode("ENOTCONN", WSAENOTCONN, "Transport endpoint is not connected");
#endif
#endif
#ifdef EDESTADDRREQ
add_errcode("EDESTADDRREQ", EDESTADDRREQ, "Destination address required");
#else
#ifdef WSAEDESTADDRREQ
add_errcode("EDESTADDRREQ", WSAEDESTADDRREQ, "Destination address required");
#endif
#endif
#ifdef ELIBSCN
add_errcode("ELIBSCN", ELIBSCN, ".lib section in a.out corrupted");
#endif
#ifdef ENOLCK
add_errcode("ENOLCK", ENOLCK, "No record locks available");
#endif
#ifdef EISNAM
add_errcode("EISNAM", EISNAM, "Is a named type file");
#endif
#ifdef ECONNABORTED
add_errcode("ECONNABORTED", ECONNABORTED, "Software caused connection abort");
#else
#ifdef WSAECONNABORTED
add_errcode("ECONNABORTED", WSAECONNABORTED, "Software caused connection abort");
#endif
#endif
#ifdef ENETUNREACH
add_errcode("ENETUNREACH", ENETUNREACH, "Network is unreachable");
#else
#ifdef WSAENETUNREACH
add_errcode("ENETUNREACH", WSAENETUNREACH, "Network is unreachable");
#endif
#endif
#ifdef ESTALE
add_errcode("ESTALE", ESTALE, "Stale NFS file handle");
#else
#ifdef WSAESTALE
add_errcode("ESTALE", WSAESTALE, "Stale NFS file handle");
#endif
#endif
#ifdef ENOSR
add_errcode("ENOSR", ENOSR, "Out of streams resources");
#endif
#ifdef ENOMEM
add_errcode("ENOMEM", ENOMEM, "Out of memory");
#endif
#ifdef ENOTSOCK
add_errcode("ENOTSOCK", ENOTSOCK, "Socket operation on non-socket");
#else
#ifdef WSAENOTSOCK
add_errcode("ENOTSOCK", WSAENOTSOCK, "Socket operation on non-socket");
#endif
#endif
#ifdef ESTRPIPE
add_errcode("ESTRPIPE", ESTRPIPE, "Streams pipe error");
#endif
#ifdef EMLINK
add_errcode("EMLINK", EMLINK, "Too many links");
#endif
#ifdef ERANGE
add_errcode("ERANGE", ERANGE, "Math result not representable");
#endif
#ifdef ELIBEXEC
add_errcode("ELIBEXEC", ELIBEXEC, "Cannot exec a shared library directly");
#endif
#ifdef EL3HLT
add_errcode("EL3HLT", EL3HLT, "Level 3 halted");
#endif
#ifdef ECONNRESET
add_errcode("ECONNRESET", ECONNRESET, "Connection reset by peer");
#else
#ifdef WSAECONNRESET
add_errcode("ECONNRESET", WSAECONNRESET, "Connection reset by peer");
#endif
#endif
#ifdef EADDRINUSE
add_errcode("EADDRINUSE", EADDRINUSE, "Address already in use");
#else
#ifdef WSAEADDRINUSE
add_errcode("EADDRINUSE", WSAEADDRINUSE, "Address already in use");
#endif
#endif
#ifdef EOPNOTSUPP
add_errcode("EOPNOTSUPP", EOPNOTSUPP, "Operation not supported on transport endpoint");
#else
#ifdef WSAEOPNOTSUPP
add_errcode("EOPNOTSUPP", WSAEOPNOTSUPP, "Operation not supported on transport endpoint");
#endif
#endif
#ifdef EREMCHG
add_errcode("EREMCHG", EREMCHG, "Remote address changed");
#endif
#ifdef EAGAIN
add_errcode("EAGAIN", EAGAIN, "Try again");
#endif
#ifdef ENAMETOOLONG
add_errcode("ENAMETOOLONG", ENAMETOOLONG, "File name too long");
#else
#ifdef WSAENAMETOOLONG
add_errcode("ENAMETOOLONG", WSAENAMETOOLONG, "File name too long");
#endif
#endif
#ifdef ENOTTY
add_errcode("ENOTTY", ENOTTY, "Not a typewriter");
#endif
#ifdef ERESTART
add_errcode("ERESTART", ERESTART, "Interrupted system call should be restarted");
#endif
#ifdef ESOCKTNOSUPPORT
add_errcode("ESOCKTNOSUPPORT", ESOCKTNOSUPPORT, "Socket type not supported");
#else
#ifdef WSAESOCKTNOSUPPORT
add_errcode("ESOCKTNOSUPPORT", WSAESOCKTNOSUPPORT, "Socket type not supported");
#endif
#endif
#ifdef ETIME
add_errcode("ETIME", ETIME, "Timer expired");
#endif
#ifdef EBFONT
add_errcode("EBFONT", EBFONT, "Bad font file format");
#endif
#ifdef EDEADLOCK
add_errcode("EDEADLOCK", EDEADLOCK, "Error EDEADLOCK");
#endif
#ifdef ETOOMANYREFS
add_errcode("ETOOMANYREFS", ETOOMANYREFS, "Too many references: cannot splice");
#else
#ifdef WSAETOOMANYREFS
add_errcode("ETOOMANYREFS", WSAETOOMANYREFS, "Too many references: cannot splice");
#endif
#endif
#ifdef EMFILE
add_errcode("EMFILE", EMFILE, "Too many open files");
#else
#ifdef WSAEMFILE
add_errcode("EMFILE", WSAEMFILE, "Too many open files");
#endif
#endif
#ifdef ETXTBSY
add_errcode("ETXTBSY", ETXTBSY, "Text file busy");
#endif
#ifdef EINPROGRESS
add_errcode("EINPROGRESS", EINPROGRESS, "Operation now in progress");
#else
#ifdef WSAEINPROGRESS
add_errcode("EINPROGRESS", WSAEINPROGRESS, "Operation now in progress");
#endif
#endif
#ifdef ENXIO
add_errcode("ENXIO", ENXIO, "No such device or address");
#endif
#ifdef ENOPKG
add_errcode("ENOPKG", ENOPKG, "Package not installed");
#endif
#ifdef WSASY
add_errcode("WSASY", WSASY, "Error WSASY");
#endif
#ifdef WSAEHOSTDOWN
add_errcode("WSAEHOSTDOWN", WSAEHOSTDOWN, "Host is down");
#endif
#ifdef WSAENETDOWN
add_errcode("WSAENETDOWN", WSAENETDOWN, "Network is down");
#endif
#ifdef WSAENOTSOCK
add_errcode("WSAENOTSOCK", WSAENOTSOCK, "Socket operation on non-socket");
#endif
#ifdef WSAEHOSTUNREACH
add_errcode("WSAEHOSTUNREACH", WSAEHOSTUNREACH, "No route to host");
#endif
#ifdef WSAELOOP
add_errcode("WSAELOOP", WSAELOOP, "Too many symbolic links encountered");
#endif
#ifdef WSAEMFILE
add_errcode("WSAEMFILE", WSAEMFILE, "Too many open files");
#endif
#ifdef WSAESTALE
add_errcode("WSAESTALE", WSAESTALE, "Stale NFS file handle");
#endif
#ifdef WSAVERNOTSUPPORTED
add_errcode("WSAVERNOTSUPPORTED", WSAVERNOTSUPPORTED, "Error WSAVERNOTSUPPORTED");
#endif
#ifdef WSAENETUNREACH
add_errcode("WSAENETUNREACH", WSAENETUNREACH, "Network is unreachable");
#endif
#ifdef WSAEPROCLIM
add_errcode("WSAEPROCLIM", WSAEPROCLIM, "Error WSAEPROCLIM");
#endif
#ifdef WSAEFAULT
add_errcode("WSAEFAULT", WSAEFAULT, "Bad address");
#endif
#ifdef WSANOTINITIALISED
add_errcode("WSANOTINITIALISED", WSANOTINITIALISED, "Error WSANOTINITIALISED");
#endif
#ifdef WSAEUSERS
add_errcode("WSAEUSERS", WSAEUSERS, "Too many users");
#endif
#ifdef WSAMAKEASYNCREPL
add_errcode("WSAMAKEASYNCREPL", WSAMAKEASYNCREPL, "Error WSAMAKEASYNCREPL");
#endif
#ifdef WSAENOPROTOOPT
add_errcode("WSAENOPROTOOPT", WSAENOPROTOOPT, "Protocol not available");
#endif
#ifdef WSAECONNABORTED
add_errcode("WSAECONNABORTED", WSAECONNABORTED, "Software caused connection abort");
#endif
#ifdef WSAENAMETOOLONG
add_errcode("WSAENAMETOOLONG", WSAENAMETOOLONG, "File name too long");
#endif
#ifdef WSAENOTEMPTY
add_errcode("WSAENOTEMPTY", WSAENOTEMPTY, "Directory not empty");
#endif
#ifdef WSAESHUTDOWN
add_errcode("WSAESHUTDOWN", WSAESHUTDOWN, "Cannot send after transport endpoint shutdown");
#endif
#ifdef WSAEAFNOSUPPORT
add_errcode("WSAEAFNOSUPPORT", WSAEAFNOSUPPORT, "Address family not supported by protocol");
#endif
#ifdef WSAETOOMANYREFS
add_errcode("WSAETOOMANYREFS", WSAETOOMANYREFS, "Too many references: cannot splice");
#endif
#ifdef WSAEACCES
add_errcode("WSAEACCES", WSAEACCES, "Permission denied");
#endif
#ifdef WSATR
add_errcode("WSATR", WSATR, "Error WSATR");
#endif
#ifdef WSABASEERR
add_errcode("WSABASEERR", WSABASEERR, "Error WSABASEERR");
#endif
#ifdef WSADESCRIPTIO
add_errcode("WSADESCRIPTIO", WSADESCRIPTIO, "Error WSADESCRIPTIO");
#endif
#ifdef WSAEMSGSIZE
add_errcode("WSAEMSGSIZE", WSAEMSGSIZE, "Message too long");
#endif
#ifdef WSAEBADF
add_errcode("WSAEBADF", WSAEBADF, "Bad file number");
#endif
#ifdef WSAECONNRESET
add_errcode("WSAECONNRESET", WSAECONNRESET, "Connection reset by peer");
#endif
#ifdef WSAGETSELECTERRO
add_errcode("WSAGETSELECTERRO", WSAGETSELECTERRO, "Error WSAGETSELECTERRO");
#endif
#ifdef WSAETIMEDOUT
add_errcode("WSAETIMEDOUT", WSAETIMEDOUT, "Connection timed out");
#endif
#ifdef WSAENOBUFS
add_errcode("WSAENOBUFS", WSAENOBUFS, "No buffer space available");
#endif
#ifdef WSAEDISCON
add_errcode("WSAEDISCON", WSAEDISCON, "Error WSAEDISCON");
#endif
#ifdef WSAEINTR
add_errcode("WSAEINTR", WSAEINTR, "Interrupted system call");
#endif
#ifdef WSAEPROTOTYPE
add_errcode("WSAEPROTOTYPE", WSAEPROTOTYPE, "Protocol wrong type for socket");
#endif
#ifdef WSAHOS
add_errcode("WSAHOS", WSAHOS, "Error WSAHOS");
#endif
#ifdef WSAEADDRINUSE
add_errcode("WSAEADDRINUSE", WSAEADDRINUSE, "Address already in use");
#endif
#ifdef WSAEADDRNOTAVAIL
add_errcode("WSAEADDRNOTAVAIL", WSAEADDRNOTAVAIL, "Cannot assign requested address");
#endif
#ifdef WSAEALREADY
add_errcode("WSAEALREADY", WSAEALREADY, "Operation already in progress");
#endif
#ifdef WSAEPROTONOSUPPORT
add_errcode("WSAEPROTONOSUPPORT", WSAEPROTONOSUPPORT, "Protocol not supported");
#endif
#ifdef WSASYSNOTREADY
add_errcode("WSASYSNOTREADY", WSASYSNOTREADY, "Error WSASYSNOTREADY");
#endif
#ifdef WSAEWOULDBLOCK
add_errcode("WSAEWOULDBLOCK", WSAEWOULDBLOCK, "Operation would block");
#endif
#ifdef WSAEPFNOSUPPORT
add_errcode("WSAEPFNOSUPPORT", WSAEPFNOSUPPORT, "Protocol family not supported");
#endif
#ifdef WSAEOPNOTSUPP
add_errcode("WSAEOPNOTSUPP", WSAEOPNOTSUPP, "Operation not supported on transport endpoint");
#endif
#ifdef WSAEISCONN
add_errcode("WSAEISCONN", WSAEISCONN, "Transport endpoint is already connected");
#endif
#ifdef WSAEDQUOT
add_errcode("WSAEDQUOT", WSAEDQUOT, "Quota exceeded");
#endif
#ifdef WSAENOTCONN
add_errcode("WSAENOTCONN", WSAENOTCONN, "Transport endpoint is not connected");
#endif
#ifdef WSAEREMOTE
add_errcode("WSAEREMOTE", WSAEREMOTE, "Object is remote");
#endif
#ifdef WSAEINVAL
add_errcode("WSAEINVAL", WSAEINVAL, "Invalid argument");
#endif
#ifdef WSAEINPROGRESS
add_errcode("WSAEINPROGRESS", WSAEINPROGRESS, "Operation now in progress");
#endif
#ifdef WSAGETSELECTEVEN
add_errcode("WSAGETSELECTEVEN", WSAGETSELECTEVEN, "Error WSAGETSELECTEVEN");
#endif
#ifdef WSAESOCKTNOSUPPORT
add_errcode("WSAESOCKTNOSUPPORT", WSAESOCKTNOSUPPORT, "Socket type not supported");
#endif
#ifdef WSAGETASYNCERRO
add_errcode("WSAGETASYNCERRO", WSAGETASYNCERRO, "Error WSAGETASYNCERRO");
#endif
#ifdef WSAMAKESELECTREPL
add_errcode("WSAMAKESELECTREPL", WSAMAKESELECTREPL, "Error WSAMAKESELECTREPL");
#endif
#ifdef WSAGETASYNCBUFLE
add_errcode("WSAGETASYNCBUFLE", WSAGETASYNCBUFLE, "Error WSAGETASYNCBUFLE");
#endif
#ifdef WSAEDESTADDRREQ
add_errcode("WSAEDESTADDRREQ", WSAEDESTADDRREQ, "Destination address required");
#endif
#ifdef WSAECONNREFUSED
add_errcode("WSAECONNREFUSED", WSAECONNREFUSED, "Connection refused");
#endif
#ifdef WSAENETRESET
add_errcode("WSAENETRESET", WSAENETRESET, "Network dropped connection because of reset");
#endif
#ifdef WSAN
add_errcode("WSAN", WSAN, "Error WSAN");
#endif
#ifdef ENOMEDIUM
add_errcode("ENOMEDIUM", ENOMEDIUM, "No medium found");
#endif
#ifdef EMEDIUMTYPE
add_errcode("EMEDIUMTYPE", EMEDIUMTYPE, "Wrong medium type");
#endif
#ifdef ECANCELED
add_errcode("ECANCELED", ECANCELED, "Operation Canceled");
#endif
#ifdef ENOKEY
add_errcode("ENOKEY", ENOKEY, "Required key not available");
#endif
#ifdef EHWPOISON
add_errcode("EHWPOISON", EHWPOISON, "Memory page has hardware error");
#endif
#ifdef EKEYEXPIRED
add_errcode("EKEYEXPIRED", EKEYEXPIRED, "Key has expired");
#endif
#ifdef EKEYREVOKED
add_errcode("EKEYREVOKED", EKEYREVOKED, "Key has been revoked");
#endif
#ifdef EKEYREJECTED
add_errcode("EKEYREJECTED", EKEYREJECTED, "Key was rejected by service");
#endif
#ifdef EOWNERDEAD
add_errcode("EOWNERDEAD", EOWNERDEAD, "Owner died");
#endif
#ifdef ENOTRECOVERABLE
add_errcode("ENOTRECOVERABLE", ENOTRECOVERABLE, "State not recoverable");
#endif
#ifdef ERFKILL
add_errcode("ERFKILL", ERFKILL, "Operation not possible due to RF-kill");
#endif
/* Solaris-specific errnos */
#ifdef ECANCELED
add_errcode("ECANCELED", ECANCELED, "Operation canceled");
#endif
#ifdef ENOTSUP
add_errcode("ENOTSUP", ENOTSUP, "Operation not supported");
#endif
#ifdef EOWNERDEAD
add_errcode("EOWNERDEAD", EOWNERDEAD, "Process died with the lock");
#endif
#ifdef ENOTRECOVERABLE
add_errcode("ENOTRECOVERABLE", ENOTRECOVERABLE, "Lock is not recoverable");
#endif
#ifdef ELOCKUNMAPPED
add_errcode("ELOCKUNMAPPED", ELOCKUNMAPPED, "Locked lock was unmapped");
#endif
#ifdef ENOTACTIVE
add_errcode("ENOTACTIVE", ENOTACTIVE, "Facility is not active");
#endif
/* MacOSX specific errnos */
#ifdef EAUTH
add_errcode("EAUTH", EAUTH, "Authentication error");
#endif
#ifdef EBADARCH
add_errcode("EBADARCH", EBADARCH, "Bad CPU type in executable");
#endif
#ifdef EBADEXEC
add_errcode("EBADEXEC", EBADEXEC, "Bad executable (or shared library)");
#endif
#ifdef EBADMACHO
add_errcode("EBADMACHO", EBADMACHO, "Malformed Mach-o file");
#endif
#ifdef EBADRPC
add_errcode("EBADRPC", EBADRPC, "RPC struct is bad");
#endif
#ifdef EDEVERR
add_errcode("EDEVERR", EDEVERR, "Device error");
#endif
#ifdef EFTYPE
add_errcode("EFTYPE", EFTYPE, "Inappropriate file type or format");
#endif
#ifdef ENEEDAUTH
add_errcode("ENEEDAUTH", ENEEDAUTH, "Need authenticator");
#endif
#ifdef ENOATTR
add_errcode("ENOATTR", ENOATTR, "Attribute not found");
#endif
#ifdef ENOPOLICY
add_errcode("ENOPOLICY", ENOPOLICY, "Policy not found");
#endif
#ifdef EPROCLIM
add_errcode("EPROCLIM", EPROCLIM, "Too many processes");
#endif
#ifdef EPROCUNAVAIL
add_errcode("EPROCUNAVAIL", EPROCUNAVAIL, "Bad procedure for program");
#endif
#ifdef EPROGMISMATCH
add_errcode("EPROGMISMATCH", EPROGMISMATCH, "Program version wrong");
#endif
#ifdef EPROGUNAVAIL
add_errcode("EPROGUNAVAIL", EPROGUNAVAIL, "RPC prog. not avail");
#endif
#ifdef EPWROFF
add_errcode("EPWROFF", EPWROFF, "Device power is off");
#endif
#ifdef ERPCMISMATCH
add_errcode("ERPCMISMATCH", ERPCMISMATCH, "RPC version wrong");
#endif
#ifdef ESHLIBVERS
add_errcode("ESHLIBVERS", ESHLIBVERS, "Shared library version mismatch");
#endif
#ifdef EQFULL
add_errcode("EQFULL", EQFULL, "Interface output queue is full");
#endif
#ifdef ENOTCAPABLE
// WASI extension
add_errcode("ENOTCAPABLE", ENOTCAPABLE, "Capabilities insufficient");
#endif
Py_DECREF(error_dict);
return 0;
}
static PyModuleDef_Slot errno_slots[] = {
{Py_mod_exec, errno_exec},
{Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED},
{Py_mod_gil, Py_MOD_GIL_NOT_USED},
{0, NULL}
};
PyDoc_STRVAR(errno__doc__,
"This module makes available standard errno system symbols.\n\
\n\
The value of each symbol is the corresponding integer value,\n\
e.g., on most systems, errno.ENOENT equals the integer 2.\n\
\n\
The dictionary errno.errorcode maps numeric codes to symbol names,\n\
e.g., errno.errorcode[2] could be the string 'ENOENT'.\n\
\n\
Symbols that are not relevant to the underlying system are not defined.\n\
\n\
To map error codes to error messages, use the function os.strerror(),\n\
e.g. os.strerror(2) could return 'No such file or directory'.");
static struct PyModuleDef errnomodule = {
PyModuleDef_HEAD_INIT,
.m_name = "errno",
.m_doc = errno__doc__,
.m_size = 0,
.m_methods = errno_methods,
.m_slots = errno_slots,
};
PyMODINIT_FUNC
PyInit_errno(void)
{
return PyModuleDef_Init(&errnomodule);
} | c | github | https://github.com/python/cpython | Modules/errnomodule.c |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import mox
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
from oslo_serialization import jsonutils
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
neutron_port_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: net1234
fixed_ips:
- subnet: sub1234
ip_address: 10.0.3.21
device_owner: network:dhcp
'''
neutron_port_with_address_pair_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: abcd1234
allowed_address_pairs:
- ip_address: 10.0.3.21
mac_address: 00-B0-D0-86-BB-F7
'''
neutron_port_security_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: abcd1234
port_security_enabled: False
'''
class NeutronPortTest(common.HeatTestCase):
def setUp(self):
super(NeutronPortTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_port')
self.m.StubOutWithMock(neutronclient.Client, 'show_port')
self.m.StubOutWithMock(neutronclient.Client, 'update_port')
self.m.StubOutWithMock(neutronclient.Client, 'show_subnet')
self.m.StubOutWithMock(neutronV20, 'find_resourceid_by_name_or_id')
def test_missing_subnet_id(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'fixed_ips': [
{'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['fixed_ips'][0].pop('subnet')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_ip_address(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234',
cmd_resource=None,
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'fixed_ips': [
{'subnet_id': u'sub1234'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['fixed_ips'][0].pop('ip_address')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_fixed_ips(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_allowed_address_pair(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234',
cmd_resource=None,
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.21',
'mac_address': u'00-B0-D0-86-BB-F7'
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_port_security_enabled(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234',
cmd_resource=None,
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'port_security_enabled': False,
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_security_template)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_mac_address(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234',
cmd_resource=None,
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.21',
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
t['resources']['port']['properties']['allowed_address_pairs'][0].pop(
'mac_address'
)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_ip_address_is_cidr(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234',
cmd_resource=None,
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.0/24',
'mac_address': u'00-B0-D0-86-BB-F7'
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "2e00180a-ff9d-42c4-b701-a0606b243447"
}})
neutronclient.Client.show_port(
'2e00180a-ff9d-42c4-b701-a0606b243447'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "2e00180a-ff9d-42c4-b701-a0606b243447"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
t['resources']['port']['properties'][
'allowed_address_pairs'][0]['ip_address'] = '10.0.3.0/24'
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def _mock_create_with_security_groups(self, port_prop):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234',
cmd_resource=None,
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': port_prop}).AndReturn(
{'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
def test_security_groups(self):
port_prop = {
'network_id': u'net1234',
'security_groups': ['8a2f582a-e1cd-480f-b85d-b02631c10656',
'024613dc-b489-4478-b46f-ada462738740'],
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
self._mock_create_with_security_groups(port_prop)
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656',
'024613dc-b489-4478-b46f-ada462738740']
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_security_groups_empty_list(self):
port_prop = {
'network_id': u'net1234',
'security_groups': [],
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'
}
self._mock_create_with_security_groups(port_prop)
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['security_groups'] = []
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_create_and_update_port(self):
props = {'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
policy_id = '8a2f582a-e1cd-480f-b85d-b02631c10656'
new_props = props.copy()
new_props['name'] = "new_name"
new_props['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656']
new_props['device_id'] = 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
new_props['device_owner'] = 'network:router_interface'
new_props_update = new_props.copy()
new_props_update.pop('network_id')
new_props_update['qos_policy_id'] = policy_id
new_props['qos_policy'] = policy_id
new_props1 = new_props.copy()
new_props1.pop('security_groups')
new_props1['qos_policy'] = None
new_props_update1 = new_props_update.copy()
new_props_update1['security_groups'] = [
'0389f747-7785-4757-b7bb-2ab07e4b09c3']
new_props_update1['qos_policy_id'] = None
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port(
{'port': props}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes(
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
self.patchobject(neutron.NeutronClientPlugin, 'get_qos_policy_id')
neutron.NeutronClientPlugin.get_qos_policy_id.return_value = policy_id
self.stub_QoSPolicyConstraint_validate()
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': new_props_update}
).AndReturn(None)
fake_groups_list = {
'security_groups': [
{
'tenant_id': 'dc4b074874244f7693dd65583733a758',
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
'name': 'default',
'security_group_rules': [],
'description': 'no protocol'
}
]
}
self.m.StubOutWithMock(neutronclient.Client, 'list_security_groups')
neutronclient.Client.list_security_groups().AndReturn(
fake_groups_list)
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': new_props_update1}
).AndReturn(None)
self.m.ReplayAll()
# create port
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
# update port
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
scheduler.TaskRunner(port.update, update_snippet)()
# update again to test port without security group
# and without qos_policy
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props1)
scheduler.TaskRunner(port.update, update_snippet)()
self.m.VerifyAll()
def test_port_needs_update(self):
props = {'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port(
{'port': props}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
self.m.ReplayAll()
# create port
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
new_props = props.copy()
# test always replace
new_props['replacement_policy'] = 'REPLACE_ALWAYS'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertRaises(exception.UpdateReplace, port._needs_update,
update_snippet, port.frozen_definition(),
new_props, props, None)
# test deferring to Resource._needs_update
new_props['replacement_policy'] = 'AUTO'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertTrue(port._needs_update(update_snippet,
port.frozen_definition(),
new_props, props, None))
self.m.VerifyAll()
def test_port_needs_update_network(self):
props = {'network': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'old_network',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
create_props = props.copy()
create_props['network_id'] = create_props.pop('network')
neutronclient.Client.create_port(
{'port': create_props}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'new_network',
cmd_resource=None,
).AndReturn('net5678')
self.m.ReplayAll()
# create port
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
new_props = props.copy()
# test no replace, switch ID for name of same network
new_props = props.copy()
new_props['network'] = 'old_network'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertTrue(port._needs_update(update_snippet,
port.frozen_definition(),
new_props, props, None))
new_props['network'] = 'new_network'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertRaises(exception.UpdateReplace, port._needs_update,
update_snippet, port.frozen_definition(),
new_props, props, None)
self.m.VerifyAll()
def test_get_port_attributes(self):
subnet_dict = {'name': 'test-subnet', 'enable_dhcp': True,
'network_id': 'net1234', 'dns_nameservers': [],
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'ipv6_ra_mode': None, 'cidr': '10.0.0.0/24',
'allocation_pools': [{'start': '10.0.0.2',
'end': u'10.0.0.254'}],
'gateway_ip': '10.0.0.1', 'ipv6_address_mode': None,
'ip_version': 4, 'host_routes': [],
'id': '6dd609ad-d52a-4587-b1a0-b335f76062a5'}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
'status': 'BUILD',
'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
}})
neutronclient.Client.show_subnet(
'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e'
).AndReturn({'subnet': subnet_dict})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes().AndReturn({'port': {
'status': 'DOWN',
'name': utils.PhysName('test_stack', 'port'),
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': 'net1234',
'device_id': 'dc68eg2c-b60g-4b3f-bd82-67ec87650532',
'mac_address': 'fa:16:3e:75:67:60',
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'security_groups': ['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
'fixed_ips': [{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}]
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertEqual('DOWN', port.FnGetAtt('status'))
self.assertEqual([], port.FnGetAtt('allowed_address_pairs'))
self.assertTrue(port.FnGetAtt('admin_state_up'))
self.assertEqual('net1234', port.FnGetAtt('network_id'))
self.assertEqual('fa:16:3e:75:67:60', port.FnGetAtt('mac_address'))
self.assertEqual(utils.PhysName('test_stack', 'port'),
port.FnGetAtt('name'))
self.assertEqual('dc68eg2c-b60g-4b3f-bd82-67ec87650532',
port.FnGetAtt('device_id'))
self.assertEqual('58a61fc3992944ce971404a2ece6ff98',
port.FnGetAtt('tenant_id'))
self.assertEqual(['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
port.FnGetAtt('security_groups'))
self.assertEqual([{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}],
port.FnGetAtt('fixed_ips'))
self.assertEqual([subnet_dict], port.FnGetAtt('subnets'))
self.assertRaises(exception.InvalidTemplateAttribute,
port.FnGetAtt, 'Foo')
self.m.VerifyAll()
def test_subnet_attribute_exception(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
'status': 'BUILD',
'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes().AndReturn({'port': {
'status': 'DOWN',
'name': utils.PhysName('test_stack', 'port'),
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': 'net1234',
'device_id': 'dc68eg2c-b60g-4b3f-bd82-67ec87650532',
'mac_address': 'fa:16:3e:75:67:60',
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'security_groups': ['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
'fixed_ips': [{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}]
}})
neutronclient.Client.show_subnet(
'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e'
).AndRaise(qe.NeutronClientException('ConnectionFailed: Connection '
'to neutron failed: Maximum '
'attempts reached'))
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertIsNone(port.FnGetAtt('subnets'))
log_msg = ('Failed to fetch resource attributes: ConnectionFailed: '
'Connection to neutron failed: Maximum attempts reached')
self.assertIn(log_msg, self.LOG.output)
self.m.VerifyAll()
def test_vnic_create_update(self):
port_prop = {
'network_id': u'net1234',
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': 'network:dhcp',
'binding:vnic_type': 'direct'
}
new_port_prop = port_prop.copy()
new_port_prop['binding:vnic_type'] = 'normal'
new_port_prop['name'] = "new_name"
new_port_prop['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656']
new_port_prop.pop('network_id')
prop_update = copy.deepcopy(new_port_prop)
new_port_prop['replacement_policy'] = 'AUTO'
new_port_prop['network'] = u'net1234'
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234',
cmd_resource=None,
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': port_prop}).AndReturn(
{'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.stub_SubnetConstraint_validate()
self.stub_NetworkConstraint_validate()
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': prop_update}
).AndReturn(None)
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
prop_update2 = copy.deepcopy(prop_update)
prop_update2['binding:vnic_type'] = 'direct'
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': prop_update2}
).AndReturn(None)
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['binding:vnic_type'] = 'direct'
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertEqual('direct', port.properties['binding:vnic_type'])
# update to normal
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_port_prop)
new_port_prop2 = copy.deepcopy(new_port_prop)
scheduler.TaskRunner(port.update, update_snippet)()
self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
self.assertEqual('normal', port.properties['binding:vnic_type'])
# update back to direct
new_port_prop2['binding:vnic_type'] = 'direct'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_port_prop2)
scheduler.TaskRunner(port.update, update_snippet)()
self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
self.assertEqual('direct', port.properties['binding:vnic_type'])
self.m.VerifyAll()
def test_prepare_for_replace_port_not_created(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
port = stack['port']
port._show_resource = mock.Mock()
port.data_set = mock.Mock()
n_client = mock.Mock()
port.client = mock.Mock(return_value=n_client)
self.assertIsNone(port.resource_id)
# execute prepare_for_replace
port.prepare_for_replace()
# check, if the port is not created, do nothing in
# prepare_for_replace()
self.assertFalse(port._show_resource.called)
self.assertFalse(port.data_set.called)
self.assertFalse(n_client.update_port.called)
def test_prepare_for_replace_port(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
port = stack['port']
port.resource_id = 'test_res_id'
_value = {
'fixed_ips': {
'subnet_id': 'test_subnet',
'ip_address': '42.42.42.42'
}
}
port._show_resource = mock.Mock(return_value=_value)
port.data_set = mock.Mock()
n_client = mock.Mock()
port.client = mock.Mock(return_value=n_client)
# execute prepare_for_replace
port.prepare_for_replace()
# check, that data was stored
port.data_set.assert_called_once_with(
'port_fip', jsonutils.dumps(_value.get('fixed_ips')))
# check, that port was updated and ip was removed
expected_props = {'port': {'fixed_ips': []}}
n_client.update_port.assert_called_once_with('test_res_id',
expected_props)
def test_restore_prev_rsrc(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
new_port = stack['port']
new_port.resource_id = 'new_res_id'
# mock backup stack to return only one mocked old_port
old_port = mock.Mock()
new_port.stack._backup_stack = mock.Mock()
new_port.stack._backup_stack().resources.get.return_value = old_port
old_port.resource_id = 'old_res_id'
_value = {
'subnet_id': 'test_subnet',
'ip_address': '42.42.42.42'
}
old_port.data = mock.Mock(
return_value={'port_fip': jsonutils.dumps(_value)})
n_client = mock.Mock()
new_port.client = mock.Mock(return_value=n_client)
# execute restore_prev_rsrc
new_port.restore_prev_rsrc()
# check, that ports were updated: old port get ip and
# same ip was removed from old port
expected_new_props = {'port': {'fixed_ips': []}}
expected_old_props = {'port': {'fixed_ips': _value}}
n_client.update_port.assert_has_calls([
mock.call('new_res_id', expected_new_props),
mock.call('old_res_id', expected_old_props)])
def test_restore_prev_rsrc_convergence(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
stack.store()
# mock resource from previous template
prev_rsrc = stack['port']
prev_rsrc.resource_id = 'prev-rsrc'
# store in db
prev_rsrc.state_set(prev_rsrc.UPDATE, prev_rsrc.COMPLETE)
# mock resource from existing template and store in db
existing_rsrc = stack['port']
existing_rsrc.current_template_id = stack.t.id
existing_rsrc.resource_id = 'existing-rsrc'
existing_rsrc.state_set(existing_rsrc.UPDATE, existing_rsrc.COMPLETE)
# mock previous resource was replaced by existing resource
prev_rsrc.replaced_by = existing_rsrc.id
_value = {
'subnet_id': 'test_subnet',
'ip_address': '42.42.42.42'
}
prev_rsrc._data = {'port_fip': jsonutils.dumps(_value)}
n_client = mock.Mock()
prev_rsrc.client = mock.Mock(return_value=n_client)
# execute restore_prev_rsrc
prev_rsrc.restore_prev_rsrc(convergence=True)
expected_existing_props = {'port': {'fixed_ips': []}}
expected_prev_props = {'port': {'fixed_ips': _value}}
n_client.update_port.assert_has_calls([
mock.call(existing_rsrc.resource_id, expected_existing_props),
mock.call(prev_rsrc.resource_id, expected_prev_props)]) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_publicipaddress
version_added: "2.1"
short_description: Manage Azure Public IP Addresses.
description:
- Create, update and delete a Public IP address. Allows setting and updating the address allocation method and
domain name label. Use the azure_rm_networkinterface module to associate a Public IP with a network interface.
options:
resource_group:
description:
- Name of resource group with which the Public IP is associated.
required: true
allocation_method:
description:
- Control whether the assigned Public IP remains permanently assigned to the object. If not
set to 'Static', the IP address my changed anytime an associated virtual machine is power cycled.
choices:
- Dynamic
- Static
default: Dynamic
required: false
domain_name_label:
description:
- The customizable portion of the FQDN assigned to public IP address. This is an explicit setting. If
no value is provided, any existing value will be removed on an existing public IP.
aliases:
- domain_name_label
required: false
default: null
name:
description:
- Name of the Public IP.
required: true
state:
description:
- Assert the state of the Public IP. Use 'present' to create or update a and
'absent' to delete.
default: present
choices:
- absent
- present
required: false
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a public ip address
azure_rm_publicipaddress:
resource_group: testing
name: my_public_ip
allocation_method: Static
domain_name: foobar
- name: Delete public ip
azure_rm_publicipaddress:
resource_group: testing
name: my_public_ip
state: absent
'''
RETURN = '''
state:
description: Facts about the current state of the object.
returned: always
type: dict
sample: {
"dns_settings": {},
"etag": '"/"a5e56955-12df-445a-bda4-dc129d22c12f"',
"idle_timeout_in_minutes": 4,
"ip_address": "52.160.103.93",
"location": "westus",
"name": "publicip002",
"provisioning_state": "Succeeded",
"public_ip_allocation_method": "Static",
"tags": {},
"type": "Microsoft.Network/publicIPAddresses"
}
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import PublicIPAddress, PublicIPAddressDnsSettings
except ImportError:
# This is handled in azure_rm_common
pass
def pip_to_dict(pip):
result = dict(
name=pip.name,
type=pip.type,
location=pip.location,
tags=pip.tags,
public_ip_allocation_method=pip.public_ip_allocation_method,
dns_settings=dict(),
ip_address=pip.ip_address,
idle_timeout_in_minutes=pip.idle_timeout_in_minutes,
provisioning_state=pip.provisioning_state,
etag=pip.etag
)
if pip.dns_settings:
result['dns_settings']['domain_name_label'] = pip.dns_settings.domain_name_label
result['dns_settings']['fqdn'] = pip.dns_settings.fqdn
result['dns_settings']['reverse_fqdn'] = pip.dns_settings.reverse_fqdn
return result
class AzureRMPublicIPAddress(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
allocation_method=dict(type='str', default='Dynamic', choices=['Dynamic', 'Static']),
domain_name=dict(type='str', aliases=['domain_name_label']),
)
self.resource_group = None
self.name = None
self.location = None
self.state = None
self.tags = None
self.allocation_method = None
self.domain_name = None
self.results = dict(
changed=False,
state=dict()
)
super(AzureRMPublicIPAddress, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
results = dict()
changed = False
pip = None
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
try:
self.log("Fetch public ip {0}".format(self.name))
pip = self.network_client.public_ip_addresses.get(self.resource_group, self.name)
self.check_provisioning_state(pip, self.state)
self.log("PIP {0} exists".format(self.name))
if self.state == 'present':
results = pip_to_dict(pip)
if self.domain_name != results['dns_settings'].get('domain_name_label'):
self.log('CHANGED: domain_name_label')
changed = True
results['dns_settings']['domain_name_label'] =self.domain_name
if self.allocation_method != results['public_ip_allocation_method']:
self.log("CHANGED: allocation_method")
changed = True
results['public_ip_allocation_method'] = self.allocation_method
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
elif self.state == 'absent':
self.log("CHANGED: public ip {0} exists but requested state is 'absent'".format(self.name))
changed = True
except CloudError:
self.log('Public ip {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: pip {0} does not exist but requested state is 'present'".format(self.name))
changed = True
self.results['state'] = results
self.results['changed'] = changed
if self.check_mode:
return results
if changed:
if self.state == 'present':
if not pip:
self.log("Create new Public IP {0}".format(self.name))
pip = PublicIPAddress(
location=self.location,
public_ip_allocation_method=self.allocation_method,
)
if self.tags:
pip.tags = self.tags
if self.domain_name:
pip.dns_settings = PublicIPAddressDnsSettings(
domain_name_label=self.domain_name
)
else:
self.log("Update Public IP {0}".format(self.name))
pip = PublicIPAddress(
location=results['location'],
public_ip_allocation_method=results['public_ip_allocation_method'],
tags=results['tags']
)
if self.domain_name:
pip.dns_settings = PublicIPAddressDnsSettings(
domain_name_label=self.domain_name
)
self.results['state'] = self.create_or_update_pip(pip)
elif self.state == 'absent':
self.log('Delete public ip {0}'.format(self.name))
self.delete_pip()
return self.results
def create_or_update_pip(self, pip):
try:
poller = self.network_client.public_ip_addresses.create_or_update(self.resource_group, self.name, pip)
pip = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating {0} - {1}".format(self.name, str(exc)))
return pip_to_dict(pip)
def delete_pip(self):
try:
poller = self.network_client.public_ip_addresses.delete(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting {0} - {1}".format(self.name, str(exc)))
# Delete returns nada. If we get here, assume that all is well.
self.results['state']['status'] = 'Deleted'
return True
def main():
AzureRMPublicIPAddress()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
"""Support for Homematic thermostats."""
import logging
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
STATE_AUTO, STATE_MANUAL, SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from . import ATTR_DISCOVER_DEVICES, HM_ATTRIBUTE_SUPPORT, HMDevice
_LOGGER = logging.getLogger(__name__)
STATE_BOOST = 'boost'
STATE_COMFORT = 'comfort'
STATE_LOWERING = 'lowering'
HM_STATE_MAP = {
'AUTO_MODE': STATE_AUTO,
'MANU_MODE': STATE_MANUAL,
'BOOST_MODE': STATE_BOOST,
'COMFORT_MODE': STATE_COMFORT,
'LOWERING_MODE': STATE_LOWERING
}
HM_TEMP_MAP = [
'ACTUAL_TEMPERATURE',
'TEMPERATURE',
]
HM_HUMI_MAP = [
'ACTUAL_HUMIDITY',
'HUMIDITY',
]
HM_CONTROL_MODE = 'CONTROL_MODE'
HMIP_CONTROL_MODE = 'SET_POINT_MODE'
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Homematic thermostat platform."""
if discovery_info is None:
return
devices = []
for conf in discovery_info[ATTR_DISCOVER_DEVICES]:
new_device = HMThermostat(conf)
devices.append(new_device)
add_entities(devices)
class HMThermostat(HMDevice, ClimateDevice):
"""Representation of a Homematic thermostat."""
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if HM_CONTROL_MODE not in self._data:
return None
# boost mode is active
if self._data.get('BOOST_MODE', False):
return STATE_BOOST
# HmIP uses the set_point_mode to say if its
# auto or manual
if HMIP_CONTROL_MODE in self._data:
code = self._data[HMIP_CONTROL_MODE]
# Other devices use the control_mode
else:
code = self._data['CONTROL_MODE']
# get the name of the mode
name = HM_ATTRIBUTE_SUPPORT[HM_CONTROL_MODE][1][code]
return name.lower()
@property
def operation_list(self):
"""Return the list of available operation modes."""
# HMIP use set_point_mode for operation
if HMIP_CONTROL_MODE in self._data:
return [STATE_MANUAL, STATE_AUTO, STATE_BOOST]
# HM
op_list = []
for mode in self._hmdevice.ACTIONNODE:
if mode in HM_STATE_MAP:
op_list.append(HM_STATE_MAP.get(mode))
return op_list
@property
def current_humidity(self):
"""Return the current humidity."""
for node in HM_HUMI_MAP:
if node in self._data:
return self._data[node]
@property
def current_temperature(self):
"""Return the current temperature."""
for node in HM_TEMP_MAP:
if node in self._data:
return self._data[node]
@property
def target_temperature(self):
"""Return the target temperature."""
return self._data.get(self._state)
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return None
self._hmdevice.writeNodeData(self._state, float(temperature))
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
for mode, state in HM_STATE_MAP.items():
if state == operation_mode:
code = getattr(self._hmdevice, mode, 0)
self._hmdevice.MODE = code
return
@property
def min_temp(self):
"""Return the minimum temperature - 4.5 means off."""
return 4.5
@property
def max_temp(self):
"""Return the maximum temperature - 30.5 means on."""
return 30.5
def _init_data_struct(self):
"""Generate a data dict (self._data) from the Homematic metadata."""
self._state = next(iter(self._hmdevice.WRITENODE.keys()))
self._data[self._state] = None
if HM_CONTROL_MODE in self._hmdevice.ATTRIBUTENODE or \
HMIP_CONTROL_MODE in self._hmdevice.ATTRIBUTENODE:
self._data[HM_CONTROL_MODE] = None
for node in self._hmdevice.SENSORNODE.keys():
self._data[node] = None | unknown | codeparrot/codeparrot-clean | ||
ok_label: OK | unknown | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/Functional/Bundle/LegacyBundle/Resources/translations/legacy.en.yaml |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/samsung,exynos-ext-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung SoC external/osc/XXTI/XusbXTI clock
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
description: |
Samsung SoCs require an external clock supplied through XXTI or XusbXTI pins.
properties:
compatible:
enum:
- samsung,clock-xxti
- samsung,clock-xusbxti
- samsung,exynos5420-oscclk
"#clock-cells":
const: 0
clock-frequency: true
clock-output-names:
maxItems: 1
required:
- compatible
- clock-frequency
additionalProperties: false
examples:
- |
fixed-rate-clocks {
clock {
compatible = "samsung,clock-xxti";
clock-frequency = <24000000>;
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/clock/samsung,exynos-ext-clock.yaml |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reorder sys.path to put $VTROOT/dist/* paths before others.
This ensures libraries installed there will be preferred over other versions
that may be present at the system level. We do this at runtime because
regardless of what we set in the PYTHONPATH environment variable, the system
dist-packages folder gets prepended sometimes.
To use this, just import it before importing packages that you want to make
sure are overridden from $VTROOT/dist.
from vtdb import prefer_vtroot_imports # pylint: disable=unused-import
"""
import os
import sys
def _prefer_vtroot_imports():
"""Reorder sys.path to put $VTROOT/dist before others."""
vtroot = os.environ.get('VTROOT')
if not vtroot:
# VTROOT is not set. Don't try anything.
return
dist = os.path.join(vtroot, 'dist')
dist_paths = []
other_paths = []
for path in sys.path:
if path:
if path.startswith(dist):
dist_paths.append(path)
else:
other_paths.append(path)
sys.path = [''] + dist_paths + other_paths
_prefer_vtroot_imports() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
pygments.lexers.nimrod
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Nim language (formerly known as Nimrod).
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['NimrodLexer']
class NimrodLexer(RegexLexer):
"""
For `Nim <http://nim-lang.org/>`_ source code.
.. versionadded:: 1.5
"""
name = 'Nimrod'
aliases = ['nim', 'nimrod']
filenames = ['*.nim', '*.nimrod']
mimetypes = ['text/x-nim']
flags = re.MULTILINE | re.IGNORECASE | re.UNICODE
def underscorize(words):
newWords = []
new = ""
for word in words:
for ch in word:
new += (ch + "_?")
newWords.append(new)
new = ""
return "|".join(newWords)
keywords = [
'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break', 'case',
'cast', 'concept', 'const', 'continue', 'converter', 'defer', 'discard',
'distinct', 'div', 'do', 'elif', 'else', 'end', 'enum', 'except',
'export', 'finally', 'for', 'func', 'if', 'in', 'yield', 'interface',
'is', 'isnot', 'iterator', 'let', 'macro', 'method', 'mixin', 'mod',
'not', 'notin', 'object', 'of', 'or', 'out', 'proc', 'ptr', 'raise',
'ref', 'return', 'shared', 'shl', 'shr', 'static', 'template', 'try',
'tuple', 'type', 'when', 'while', 'with', 'without', 'xor'
]
keywordsPseudo = [
'nil', 'true', 'false'
]
opWords = [
'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in',
'notin', 'is', 'isnot'
]
types = [
'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64',
'bool', 'char', 'range', 'array', 'seq', 'set', 'string'
]
tokens = {
'root': [
(r'##.*$', String.Doc),
(r'#.*$', Comment),
(r'[*=><+\-/@$~&%!?|\\\[\]]', Operator),
(r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;',
Punctuation),
# Strings
(r'(?:[\w]+)"', String, 'rdqs'),
(r'"""', String, 'tdqs'),
('"', String, 'dqs'),
# Char
("'", String.Char, 'chars'),
# Keywords
(r'(%s)\b' % underscorize(opWords), Operator.Word),
(r'(p_?r_?o_?c_?\s)(?![(\[\]])', Keyword, 'funcname'),
(r'(%s)\b' % underscorize(keywords), Keyword),
(r'(%s)\b' % underscorize(['from', 'import', 'include']),
Keyword.Namespace),
(r'(v_?a_?r)\b', Keyword.Declaration),
(r'(%s)\b' % underscorize(types), Keyword.Type),
(r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo),
# Identifiers
(r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name),
# Numbers
(r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))',
Number.Float, ('float-suffix', 'float-number')),
(r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'),
(r'0b[01][01_]*', Number.Bin, 'int-suffix'),
(r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'),
(r'[0-9][0-9_]*', Number.Integer, 'int-suffix'),
# Whitespace
(r'\s+', Text),
(r'.+$', Error),
],
'chars': [
(r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape),
(r"'", String.Char, '#pop'),
(r".", String.Char)
],
'strings': [
(r'(?<!\$)\$(\d+|#|\w+)+', String.Interpol),
(r'[^\\\'"$\n]+', String),
# quotes, dollars and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'\$', String)
# newlines are an error (use "nl" state)
],
'dqs': [
(r'\\([\\abcefnrtvl"\']|\n|x[a-f0-9]{2}|[0-9]{1,3})',
String.Escape),
(r'"', String, '#pop'),
include('strings')
],
'rdqs': [
(r'"(?!")', String, '#pop'),
(r'""', String.Escape),
include('strings')
],
'tdqs': [
(r'"""(?!")', String, '#pop'),
include('strings'),
include('nl')
],
'funcname': [
(r'((?![\d_])\w)(((?!_)\w)|(_(?!_)\w))*', Name.Function, '#pop'),
(r'`.+`', Name.Function, '#pop')
],
'nl': [
(r'\n', String)
],
'float-number': [
(r'\.(?!\.)[0-9_]*', Number.Float),
(r'e[+-]?[0-9][0-9_]*', Number.Float),
default('#pop')
],
'float-suffix': [
(r'\'f(32|64)', Number.Float),
default('#pop')
],
'int-suffix': [
(r'\'i(32|64)', Number.Integer.Long),
(r'\'i(8|16)', Number.Integer),
default('#pop')
],
} | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
import json
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import api as compute_api
from nova import exception
from nova.network.security_group import neutron_driver
from nova.network.security_group import openstack_driver
ALIAS = 'os-security-groups'
ATTRIBUTE_NAME = '%s:security_groups' % ALIAS
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
softauth = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = _authorize_context(req)
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[ATTRIBUTE_NAME] = [{"name": group["name"]}
for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context,
servers))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[ATTRIBUTE_NAME] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
# try converting to json
req_obj = json.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][ATTRIBUTE_NAME] = req_obj['server'].get(
ATTRIBUTE_NAME, [{'name': 'default'}])
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroups(extensions.V3APIExtensionBase):
"""Security group support."""
name = "SecurityGroups"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [output]
def get_resources(self):
return []
def server_create(self, server_dict, create_kwargs):
security_groups = server_dict.get(ATTRIBUTE_NAME)
if security_groups is not None:
create_kwargs['security_group'] = [
sg['name'] for sg in security_groups if sg.get('name')]
create_kwargs['security_group'] = list(
set(create_kwargs['security_group']))
class NativeSecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exception.SecurityGroupNotFound(msg)
class NativeNovaSecurityGroupAPI(NativeSecurityGroupExceptions,
compute_api.SecurityGroupAPI):
pass
class NativeNeutronSecurityGroupAPI(NativeSecurityGroupExceptions,
neutron_driver.SecurityGroupAPI):
pass | unknown | codeparrot/codeparrot-clean | ||
package service
import (
"context"
"strconv"
"sync/atomic"
"github.com/containerd/log"
"github.com/moby/moby/api/types/events"
volumetypes "github.com/moby/moby/api/types/volume"
"github.com/moby/moby/v2/daemon/internal/directory"
"github.com/moby/moby/v2/daemon/internal/filters"
"github.com/moby/moby/v2/daemon/internal/idtools"
"github.com/moby/moby/v2/daemon/internal/stringid"
"github.com/moby/moby/v2/daemon/volume"
"github.com/moby/moby/v2/daemon/volume/drivers"
"github.com/moby/moby/v2/daemon/volume/service/opts"
"github.com/moby/moby/v2/errdefs"
"github.com/moby/moby/v2/pkg/plugingetter"
"github.com/pkg/errors"
)
type driverLister interface {
GetDriverList() []string
}
// VolumeEventLogger interface provides methods to log volume-related events
type VolumeEventLogger interface {
// LogVolumeEvent generates an event related to a volume.
LogVolumeEvent(volumeID string, action events.Action, attributes map[string]string)
}
// VolumesService manages access to volumes
// This is used as the main access point for volumes to higher level services and the API.
type VolumesService struct {
vs *VolumeStore
ds driverLister
pruneRunning atomic.Bool
eventLogger VolumeEventLogger
}
// NewVolumeService creates a new volume service
func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger VolumeEventLogger) (*VolumesService, error) {
ds := drivers.NewStore(pg)
if err := setupDefaultDriver(ds, root, rootIDs); err != nil {
return nil, err
}
vs, err := NewStore(root, ds, WithEventLogger(logger))
if err != nil {
return nil, err
}
return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil
}
// GetDriverList gets the list of registered volume drivers
func (s *VolumesService) GetDriverList() []string {
return s.ds.GetDriverList()
}
// AnonymousLabel is the label used to indicate that a volume is anonymous
// This is set automatically on a volume when a volume is created without a name specified, and as such an id is generated for it.
const AnonymousLabel = "com.docker.volume.anonymous"
// Create creates a volume
// If the caller is creating this volume to be consumed immediately, it is
// expected that the caller specifies a reference ID.
// This reference ID will protect this volume from removal.
//
// A good example for a reference ID is a container's ID.
// When whatever is going to reference this volume is removed the caller should dereference the volume by calling `Release`.
func (s *VolumesService) Create(ctx context.Context, name, driverName string, options ...opts.CreateOption) (*volumetypes.Volume, error) {
if name == "" {
name = stringid.GenerateRandomID()
if driverName == "" {
driverName = volume.DefaultDriverName
}
options = append(options, opts.WithCreateLabel(AnonymousLabel, ""))
log.G(ctx).WithFields(log.Fields{"volume": name, "driver": driverName}).Debug("Creating anonymous volume")
} else {
log.G(ctx).WithField("volume", name).Debug("Creating named volume")
}
v, err := s.vs.Create(ctx, name, driverName, options...)
if err != nil {
return nil, err
}
apiV := volumeToAPIType(v)
return &apiV, nil
}
// Get returns details about a volume
func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*volumetypes.Volume, error) {
v, err := s.vs.Get(ctx, name, getOpts...)
if err != nil {
return nil, err
}
vol := volumeToAPIType(v)
var cfg opts.GetConfig
for _, o := range getOpts {
o(&cfg)
}
if cfg.ResolveStatus {
vol.Status = v.Status()
}
return &vol, nil
}
// Mount mounts the volume
// Callers should specify a unique reference for each Mount/Unmount pair.
//
// Example:
// ```go
// mountID := "randomString"
// s.Mount(ctx, vol, mountID)
// s.Unmount(ctx, vol, mountID)
// ```
func (s *VolumesService) Mount(ctx context.Context, vol *volumetypes.Volume, ref string) (string, error) {
v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver))
if err != nil {
if IsNotExist(err) {
err = errdefs.NotFound(err)
}
return "", err
}
return v.Mount(ref)
}
// Unmount unmounts the volume.
// Note that depending on the implementation, the volume may still be mounted due to other resources using it.
//
// The reference specified here should be the same reference specified during `Mount` and should be
// unique for each mount/unmount pair.
// See `Mount` documentation for an example.
func (s *VolumesService) Unmount(ctx context.Context, vol *volumetypes.Volume, ref string) error {
v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver))
if err != nil {
if IsNotExist(err) {
err = errdefs.NotFound(err)
}
return err
}
return v.Unmount(ref)
}
// Release releases a volume reference
func (s *VolumesService) Release(ctx context.Context, name string, ref string) error {
return s.vs.Release(ctx, name, ref)
}
// Remove removes a volume
// An error is returned if the volume is still referenced.
func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error {
var cfg opts.RemoveConfig
for _, o := range rmOpts {
o(&cfg)
}
v, err := s.vs.Get(ctx, name)
if err != nil {
if IsNotExist(err) && cfg.PurgeOnError {
return nil
}
return err
}
err = s.vs.Remove(ctx, v, rmOpts...)
if IsNotExist(err) {
err = nil
} else if IsInUse(err) {
err = errdefs.Conflict(err)
} else if IsNotExist(err) && cfg.PurgeOnError {
err = nil
}
return err
}
var acceptedPruneFilters = map[string]bool{
"label": true,
"label!": true,
// All tells the filter to consider all volumes not just anonymous ones.
"all": true,
}
var acceptedListFilters = map[string]bool{
"dangling": true,
"name": true,
"driver": true,
"label": true,
}
// LocalVolumesSize gets all local volumes and fetches their size on disk
// Note that this intentionally skips volumes which have mount options. Typically
// volumes with mount options are not really local even if they are using the
// local driver.
func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]volumetypes.Volume, error) {
ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool {
dv, ok := v.(volume.DetailedVolume)
return ok && len(dv.Options()) == 0
})))
if err != nil {
return nil, err
}
return s.volumesToAPI(ctx, ls, calcSize(true)), nil
}
// Prune removes (local) volumes which match the past in filter arguments.
// Note that this intentionally skips volumes with mount options as there would
// be no space reclaimed in this case.
func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*volumetypes.PruneReport, error) {
if !s.pruneRunning.CompareAndSwap(false, true) {
return nil, errdefs.Conflict(errors.New("a prune operation is already running"))
}
defer s.pruneRunning.Store(false)
if err := withPrune(filter); err != nil {
return nil, err
}
by, err := filtersToBy(filter, acceptedPruneFilters)
if err != nil {
return nil, err
}
ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool {
dv, ok := v.(volume.DetailedVolume)
return ok && len(dv.Options()) == 0
})))
if err != nil {
return nil, err
}
rep := &volumetypes.PruneReport{VolumesDeleted: make([]string, 0, len(ls))}
for _, v := range ls {
select {
case <-ctx.Done():
err := ctx.Err()
if err == context.Canceled {
err = nil
}
return rep, err
default:
}
vSize, err := directory.Size(ctx, v.Path())
if err != nil {
log.G(ctx).WithFields(log.Fields{
"error": err,
"volume": v.Name(),
}).Warn("could not determine size of volume")
}
if err := s.vs.Remove(ctx, v); err != nil {
log.G(ctx).WithFields(log.Fields{
"error": err,
"volume": v.Name(),
}).Warn("Could not remove volume")
continue
}
rep.SpaceReclaimed += uint64(vSize)
rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name())
}
s.eventLogger.LogVolumeEvent("", events.ActionPrune, map[string]string{
"reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10),
})
return rep, nil
}
// List gets the list of volumes which match the past in filters
// If filters is nil or empty all volumes are returned.
func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumes []volumetypes.Volume, warnings []string, _ error) {
by, err := filtersToBy(filter, acceptedListFilters)
if err != nil {
return nil, nil, err
}
vols, warns, err := s.vs.Find(ctx, by)
if err != nil {
return nil, nil, err
}
return s.volumesToAPI(ctx, vols, useCachedPath(true)), warns, nil
}
// Shutdown shuts down the image service and dependencies
func (s *VolumesService) Shutdown() error {
return s.vs.Shutdown()
}
// LiveRestoreVolume passes through the LiveRestoreVolume call to the volume if it is implemented
// otherwise it is a no-op.
func (s *VolumesService) LiveRestoreVolume(ctx context.Context, vol *volumetypes.Volume, ref string) error {
v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver))
if err != nil {
return err
}
rlv, ok := v.(volume.LiveRestorer)
if !ok {
log.G(ctx).WithField("volume", vol.Name).Debugf("volume does not implement LiveRestoreVolume: %T", v)
return nil
}
return rlv.LiveRestoreVolume(ctx, ref)
} | go | github | https://github.com/moby/moby | daemon/volume/service/service.go |
from __future__ import annotations
import json
from typing import (
TYPE_CHECKING,
Any,
)
from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import (
combine_kwargs,
validate_freeze_panes,
)
if TYPE_CHECKING:
from pandas._typing import (
ExcelWriterIfSheetExists,
FilePath,
StorageOptions,
WriteExcelBuffer,
)
class _XlsxStyler:
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
"font": [
(("name",), "font_name"),
(("sz",), "font_size"),
(("size",), "font_size"),
(("color", "rgb"), "font_color"),
(("color",), "font_color"),
(("b",), "bold"),
(("bold",), "bold"),
(("i",), "italic"),
(("italic",), "italic"),
(("u",), "underline"),
(("underline",), "underline"),
(("strike",), "font_strikeout"),
(("vertAlign",), "font_script"),
(("vertalign",), "font_script"),
],
"number_format": [(("format_code",), "num_format"), ((), "num_format")],
"protection": [(("locked",), "locked"), (("hidden",), "hidden")],
"alignment": [
(("horizontal",), "align"),
(("vertical",), "valign"),
(("text_rotation",), "rotation"),
(("wrap_text",), "text_wrap"),
(("indent",), "indent"),
(("shrink_to_fit",), "shrink"),
],
"fill": [
(("patternType",), "pattern"),
(("patterntype",), "pattern"),
(("fill_type",), "pattern"),
(("start_color", "rgb"), "fg_color"),
(("fgColor", "rgb"), "fg_color"),
(("fgcolor", "rgb"), "fg_color"),
(("start_color",), "fg_color"),
(("fgColor",), "fg_color"),
(("fgcolor",), "fg_color"),
(("end_color", "rgb"), "bg_color"),
(("bgColor", "rgb"), "bg_color"),
(("bgcolor", "rgb"), "bg_color"),
(("end_color",), "bg_color"),
(("bgColor",), "bg_color"),
(("bgcolor",), "bg_color"),
],
"border": [
(("color", "rgb"), "border_color"),
(("color",), "border_color"),
(("style",), "border"),
(("top", "color", "rgb"), "top_color"),
(("top", "color"), "top_color"),
(("top", "style"), "top"),
(("top",), "top"),
(("right", "color", "rgb"), "right_color"),
(("right", "color"), "right_color"),
(("right", "style"), "right"),
(("right",), "right"),
(("bottom", "color", "rgb"), "bottom_color"),
(("bottom", "color"), "bottom_color"),
(("bottom", "style"), "bottom"),
(("bottom",), "bottom"),
(("left", "color", "rgb"), "left_color"),
(("left", "color"), "left_color"),
(("left", "style"), "left"),
(("left",), "left"),
],
}
@classmethod
def convert(cls, style_dict, num_format_str=None) -> dict[str, Any]:
"""
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
"""
# Create an XlsxWriter format object.
props = {}
if num_format_str is not None:
props["num_format"] = num_format_str
if style_dict is None:
return props
if "borders" in style_dict:
style_dict = style_dict.copy()
style_dict["border"] = style_dict.pop("borders")
for style_group_key, style_group in style_dict.items():
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
# src is a sequence of keys into a nested dict
# dst is a flat key
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
if isinstance(props.get("pattern"), str):
# TODO: support other fill patterns
props["pattern"] = 0 if props["pattern"] == "none" else 1
for k in ["border", "top", "right", "bottom", "left"]:
if isinstance(props.get(k), str):
try:
props[k] = [
"none",
"thin",
"medium",
"dashed",
"dotted",
"thick",
"double",
"hair",
"mediumDashed",
"dashDot",
"mediumDashDot",
"dashDotDot",
"mediumDashDotDot",
"slantDashDot",
].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get("font_script"), str):
props["font_script"] = ["baseline", "superscript", "subscript"].index(
props["font_script"]
)
if isinstance(props.get("underline"), str):
props["underline"] = {
"none": 0,
"single": 1,
"double": 2,
"singleAccounting": 33,
"doubleAccounting": 34,
}[props["underline"]]
# GH 30107 - xlsxwriter uses different name
if props.get("valign") == "center":
props["valign"] = "vcenter"
return props
class XlsxWriter(ExcelWriter):
_engine = "xlsxwriter"
_supported_extensions = (".xlsx",)
def __init__( # pyright: ignore[reportInconsistentConstructor]
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
date_format: str | None = None,
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions | None = None,
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> None:
# Use the xlsxwriter module as the Excel writer.
from xlsxwriter import Workbook
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
if mode == "a":
raise ValueError("Append mode is not supported with xlsxwriter!")
super().__init__(
path,
engine=engine,
date_format=date_format,
datetime_format=datetime_format,
mode=mode,
storage_options=storage_options,
if_sheet_exists=if_sheet_exists,
engine_kwargs=engine_kwargs,
)
try:
self._book = Workbook(self._handles.handle, **engine_kwargs)
except TypeError:
self._handles.handle.close()
raise
@property
def book(self):
"""
Book instance of class xlsxwriter.Workbook.
This attribute can be used to access engine-specific features.
"""
return self._book
@property
def sheets(self) -> dict[str, Any]:
result = self.book.sheetnames
return result
def _save(self) -> None:
"""
Save workbook to disk.
"""
self.book.close()
def _write_cells(
self,
cells,
sheet_name: str | None = None,
startrow: int = 0,
startcol: int = 0,
freeze_panes: tuple[int, int] | None = None,
autofilter_range: str | None = None,
) -> None:
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
wks = self.book.get_worksheet_by_name(sheet_name)
if wks is None:
wks = self.book.add_worksheet(sheet_name)
style_dict = {"null": None}
if validate_freeze_panes(freeze_panes):
wks.freeze_panes(*(freeze_panes))
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(
startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
val,
style,
)
else:
wks.write(startrow + cell.row, startcol + cell.col, val, style)
if autofilter_range:
wks.autofilter(autofilter_range) | python | github | https://github.com/pandas-dev/pandas | pandas/io/excel/_xlsxwriter.py |
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
from ipalib import api, errors
from ipalib import Updater
from ipapython.dn import DN
from ipapython.ipa_log_manager import root_logger
from ipaserver.install import sysupgrade
class update_passync_privilege_check(Updater):
def execute(self, **options):
update_done = sysupgrade.get_upgrade_state('winsync', 'passsync_privilege_updated')
if update_done:
root_logger.debug("PassSync privilege update pre-check not needed")
return False, []
root_logger.debug("Check if there is existing PassSync privilege")
passsync_privilege_dn = DN(('cn','PassSync Service'),
self.api.env.container_privilege,
self.api.env.basedn)
ldap = self.api.Backend.ldap2
try:
ldap.get_entry(passsync_privilege_dn, [''])
except errors.NotFound:
root_logger.debug("PassSync privilege not found, this is a new update")
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', False)
else:
root_logger.debug("PassSync privilege found, skip updating PassSync")
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', True)
return False, []
api.register(update_passync_privilege_check)
class update_passync_privilege_update(Updater):
"""
Add PassSync user as a member of PassSync privilege, if it exists
"""
def execute(self, **options):
update_done = sysupgrade.get_upgrade_state('winsync', 'passsync_privilege_updated')
if update_done:
root_logger.debug("PassSync privilege update not needed")
return False, []
root_logger.debug("Add PassSync user as a member of PassSync privilege")
ldap = self.api.Backend.ldap2
passsync_dn = DN(('uid','passsync'), ('cn', 'sysaccounts'), ('cn', 'etc'),
self.api.env.basedn)
passsync_privilege_dn = DN(('cn','PassSync Service'),
self.api.env.container_privilege,
self.api.env.basedn)
try:
entry = ldap.get_entry(passsync_dn, [''])
except errors.NotFound:
root_logger.debug("PassSync user not found, no update needed")
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', True)
return False, []
else:
root_logger.debug("PassSync user found, do update")
update = {'dn': passsync_privilege_dn,
'updates': [
dict(action='add', attr='member', value=passsync_dn),
]
}
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', True)
return False, [update]
api.register(update_passync_privilege_update) | unknown | codeparrot/codeparrot-clean | ||
'''tzinfo timezone information for America/Manaus.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Manaus(DstTzInfo):
'''America/Manaus timezone definition. See datetime.tzinfo for details'''
zone = 'America/Manaus'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1914,1,1,4,0,4),
d(1931,10,3,15,0,0),
d(1932,4,1,3,0,0),
d(1932,10,3,4,0,0),
d(1933,4,1,3,0,0),
d(1949,12,1,4,0,0),
d(1950,4,16,4,0,0),
d(1950,12,1,4,0,0),
d(1951,4,1,3,0,0),
d(1951,12,1,4,0,0),
d(1952,4,1,3,0,0),
d(1952,12,1,4,0,0),
d(1953,3,1,3,0,0),
d(1963,12,9,4,0,0),
d(1964,3,1,3,0,0),
d(1965,1,31,4,0,0),
d(1965,3,31,3,0,0),
d(1965,12,1,4,0,0),
d(1966,3,1,3,0,0),
d(1966,11,1,4,0,0),
d(1967,3,1,3,0,0),
d(1967,11,1,4,0,0),
d(1968,3,1,3,0,0),
d(1985,11,2,4,0,0),
d(1986,3,15,3,0,0),
d(1986,10,25,4,0,0),
d(1987,2,14,3,0,0),
d(1987,10,25,4,0,0),
d(1988,2,7,3,0,0),
d(1993,10,17,4,0,0),
d(1994,2,20,3,0,0),
]
_transition_info = [
i(-14400,0,'LMT'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
]
Manaus = Manaus() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A powerful dynamic attention wrapper object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism",
"AttentionWrapper",
"AttentionWrapperState",
"LuongAttention",
"BahdanauAttention",
"hardmax",
"safe_cumprod",
"monotonic_attention",
"BahdanauMonotonicAttention",
"LuongMonotonicAttention",
]
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
class AttentionMechanism(object):
@property
def alignments_size(self):
raise NotImplementedError
@property
def state_size(self):
raise NotImplementedError
def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(
lambda m: ops.convert_to_tensor(m, name="memory"), memory)
if memory_sequence_length is not None:
memory_sequence_length = ops.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length")
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError("Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape()))
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None:
seq_len_mask = None
else:
seq_len_mask = array_ops.sequence_mask(
memory_sequence_length,
maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype)
seq_len_batch_size = (
tensor_shape.dimension_value(memory_sequence_length.shape[0])
or array_ops.shape(memory_sequence_length)[0])
def _maybe_mask(m, seq_len_mask):
rank = m.get_shape().ndims
rank = rank if rank is not None else array_ops.rank(m)
extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
m_batch_size = tensor_shape.dimension_value(
m.shape[0]) or array_ops.shape(m)[0]
if memory_sequence_length is not None:
message = ("memory_sequence_length and memory tensor batch sizes do not "
"match.")
with ops.control_dependencies([
check_ops.assert_equal(
seq_len_batch_size, m_batch_size, message=message)]):
seq_len_mask = array_ops.reshape(
seq_len_mask,
array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))
return m * seq_len_mask
else:
return m
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
if memory_sequence_length is None:
return score
message = ("All values in memory_sequence_length must greater than zero.")
with ops.control_dependencies(
[check_ops.assert_positive(memory_sequence_length, message=message)]):
score_mask = array_ops.sequence_mask(
memory_sequence_length, maxlen=array_ops.shape(score)[1])
score_mask_values = score_mask_value * array_ops.ones_like(score)
return array_ops.where(score_mask, score, score_mask_values)
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(self,
query_layer,
memory,
probability_fn,
memory_sequence_length=None,
memory_layer=None,
check_inner_dims_defined=True,
score_mask_value=None,
name=None):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be:
`probabilities = probability_fn(score, state)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
"""
if (query_layer is not None
and not isinstance(query_layer, layers_base.Layer)):
raise TypeError(
"query_layer is not a Layer: %s" % type(query_layer).__name__)
if (memory_layer is not None
and not isinstance(memory_layer, layers_base.Layer)):
raise TypeError(
"memory_layer is not a Layer: %s" % type(memory_layer).__name__)
self._query_layer = query_layer
self._memory_layer = memory_layer
self.dtype = memory_layer.dtype
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
if score_mask_value is None:
score_mask_value = dtypes.as_dtype(
self._memory_layer.dtype).as_numpy_dtype(-np.inf)
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
probability_fn(
_maybe_mask_score(score, memory_sequence_length, score_mask_value),
prev))
with ops.name_scope(
name, "BaseAttentionMechanismInit", nest.flatten(memory)):
self._values = _prepare_memory(
memory, memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values)
self._batch_size = (
tensor_shape.dimension_value(self._keys.shape[0]) or
array_ops.shape(self._keys)[0])
self._alignments_size = (tensor_shape.dimension_value(self._keys.shape[1])
or array_ops.shape(self._keys)[1])
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return the same output as initial_alignments.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by `state_size`.
"""
return self.initial_alignments(batch_size, dtype)
def _luong_score(query, keys, scale):
"""Implements Luong-style (multiplicative) scoring function.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, call this function with `scale=True`.
Args:
query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
scale: Whether to apply a scale to the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?"
% (query, depth, keys, key_units, key_units))
dtype = query.dtype
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = array_ops.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_size, 1, max_time].
# we then squeeze out the center singleton dimension.
score = math_ops.matmul(query, keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
if scale:
# Scalar used in weight scaling
g = variable_scope.get_variable(
"attention_g", dtype=dtype,
initializer=init_ops.ones_initializer, shape=())
score = g * score
return score
class LuongAttention(_BaseAttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="LuongAttention"):
"""Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is `tf.nn.softmax`. Other options include
`tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the memory layer of the attention mechanism.
name: Name to use when creating ops.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(LuongAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_attention", [query]):
score = _luong_score(query, self._keys, self._scale)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def _bahdanau_score(processed_query, keys, normalize):
"""Implements Bahdanau-style (additive) scoring function.
This attention has two forms. The first is Bhandanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, set `normalize=True`.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
normalize: Whether to normalize the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
dtype = processed_query.dtype
# Get the number of hidden units from the trailing dimension of keys
num_units = tensor_shape.dimension_value(
keys.shape[2]) or array_ops.shape(keys)[2]
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
v = variable_scope.get_variable(
"attention_v", [num_units], dtype=dtype)
if normalize:
# Scalar used in weight normalization
g = variable_scope.get_variable(
"attention_g", dtype=dtype,
initializer=init_ops.constant_initializer(math.sqrt((1. / num_units))),
shape=())
# Bias added prior to the nonlinearity
b = variable_scope.get_variable(
"attention_b", [num_units], dtype=dtype,
initializer=init_ops.zeros_initializer())
# normed_v = g * v / ||v||
normed_v = g * v * math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(v)))
return math_ops.reduce_sum(
normed_v * math_ops.tanh(keys + processed_query + b), [2])
else:
return math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query), [2])
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="BahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is `tf.nn.softmax`. Other options include
`tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(BahdanauAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def safe_cumprod(x, *args, **kwargs):
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the argument
is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with ops.name_scope(None, "SafeCumprod", [x]):
x = ops.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return math_ops.exp(math_ops.cumsum(
math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs))
def monotonic_attention(p_choose_i, previous_attention, mode):
"""Compute monotonic attention distribution from choosing probabilities.
Monotonic attention implies that the input sequence is processed in an
explicitly left-to-right manner when generating the output sequence. In
addition, once an input sequence element is attended to at a given output
timestep, elements occurring before it cannot be attended to at subsequent
output timesteps. This function generates attention distributions according
to these assumptions. For more information, see `Online and Linear-Time
Attention by Enforcing Monotonic Alignments`.
Args:
p_choose_i: Probability of choosing input sequence/memory element i. Should
be of shape (batch_size, input_sequence_length), and should all be in the
range [0, 1].
previous_attention: The attention distribution from the previous output
timestep. Should be of shape (batch_size, input_sequence_length). For
the first output timestep, preevious_attention[n] should be [1, 0, 0, ...,
0] for all n in [0, ... batch_size - 1].
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'.
* 'recursive' uses tf.scan to recursively compute the distribution.
This is slowest but is exact, general, and does not suffer from
numerical instabilities.
* 'parallel' uses parallelized cumulative-sum and cumulative-product
operations to compute a closed-form solution to the recurrence
relation defining the attention distribution. This makes it more
efficient than 'recursive', but it requires numerical checks which
make the distribution non-exact. This can be a problem in particular
when input_sequence_length is long and/or p_choose_i has entries very
close to 0 or 1.
* 'hard' requires that the probabilities in p_choose_i are all either 0
or 1, and subsequently uses a more efficient and exact solution.
Returns:
A tensor of shape (batch_size, input_sequence_length) representing the
attention distributions for each sequence in the batch.
Raises:
ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
"""
# Force things to be tensors
p_choose_i = ops.convert_to_tensor(p_choose_i, name="p_choose_i")
previous_attention = ops.convert_to_tensor(
previous_attention, name="previous_attention")
if mode == "recursive":
# Use .shape[0] when it's not None, or fall back on symbolic shape
batch_size = tensor_shape.dimension_value(
p_choose_i.shape[0]) or array_ops.shape(p_choose_i)[0]
# Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]]
shifted_1mp_choose_i = array_ops.concat(
[array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1)
# Compute attention distribution recursively as
# q[i] = (1 - p_choose_i[i - 1])*q[i - 1] + previous_attention[i]
# attention[i] = p_choose_i[i]*q[i]
attention = p_choose_i*array_ops.transpose(functional_ops.scan(
# Need to use reshape to remind TF of the shape between loop iterations
lambda x, yz: array_ops.reshape(yz[0]*x + yz[1], (batch_size,)),
# Loop variables yz[0] and yz[1]
[array_ops.transpose(shifted_1mp_choose_i),
array_ops.transpose(previous_attention)],
# Initial value of x is just zeros
array_ops.zeros((batch_size,))))
elif mode == "parallel":
# safe_cumprod computes cumprod in logspace with numeric checks
cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True)
# Compute recurrence relation solution
attention = p_choose_i*cumprod_1mp_choose_i*math_ops.cumsum(
previous_attention /
# Clip cumprod_1mp to avoid divide-by-zero
clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.), axis=1)
elif mode == "hard":
# Remove any probabilities before the index chosen last time step
p_choose_i *= math_ops.cumsum(previous_attention, axis=1)
# Now, use exclusive cumprod to remove probabilities after the first
# chosen index, like so:
# p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
# cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
# Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
attention = p_choose_i*math_ops.cumprod(
1 - p_choose_i, axis=1, exclusive=True)
else:
raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
return attention
def _monotonic_probability_fn(score, previous_alignments, sigmoid_noise, mode,
seed=None):
"""Attention probability function for monotonic attention.
Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
the model to make discrete attention decisions, passes them through a sigmoid
to obtain "choosing" probabilities, and then calls monotonic_attention to
obtain the attention distribution. For more information, see
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
Args:
score: Unnormalized attention scores, shape `[batch_size, alignments_size]`
previous_alignments: Previous attention distribution, shape
`[batch_size, alignments_size]`
sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this larger
than 0 will encourage the model to produce large attention scores,
effectively making the choosing probabilities discrete and the resulting
attention distribution one-hot. It should be set to 0 at test-time, and
when hard attention is not desired.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
seed: (optional) Random seed for pre-sigmoid noise.
Returns:
A `[batch_size, alignments_size]`-shape tensor corresponding to the
resulting attention distribution.
"""
# Optionally add pre-sigmoid noise to the scores
if sigmoid_noise > 0:
noise = random_ops.random_normal(array_ops.shape(score), dtype=score.dtype,
seed=seed)
score += sigmoid_noise*noise
# Compute "choosing" probabilities from the attention scores
if mode == "hard":
# When mode is hard, use a hard sigmoid
p_choose_i = math_ops.cast(score > 0, score.dtype)
else:
p_choose_i = math_ops.sigmoid(score)
# Convert from choosing probabilities to attention distribution
return monotonic_attention(p_choose_i, previous_alignments, mode)
class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution, which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0]
for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32), max_time,
dtype=dtype)
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="BahdanauMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(BahdanauMonotonicAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
self._score_bias_init = score_bias_init
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(
None, "bahdanau_monotonic_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
score_bias = variable_scope.get_variable(
"attention_score_bias", dtype=processed_query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Otherwise, it is equivalent to
LuongAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="LuongMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(LuongMonotonicAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._score_bias_init = score_bias_init
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_monotonic_attention",
[query]):
score = _luong_score(query, self._keys, self._scale)
score_bias = variable_scope.get_variable(
"attention_score_bias", dtype=query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class AttentionWrapperState(
collections.namedtuple("AttentionWrapperState",
("cell_state", "attention", "time", "alignments",
"alignment_history", "attention_state"))):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell` at the previous time
step.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignments`: A single or tuple of `Tensor`(s) containing the alignments
emitted at the previous time step for each attention mechanism.
- `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)
containing alignment matrices from all time steps for each attention
mechanism. Call `stack()` on each to convert to a `Tensor`.
- `attention_state`: A single or tuple of nested objects
containing attention mechanism state for each attention mechanism.
The objects may contain Tensors or TensorArrays.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
The new state fields' shape must match original state fields' shape. This
will be validated, and original fields' shape will be propagated to new
fields.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
def with_same_shape(old, new):
"""Check and set new tensor's shape."""
if isinstance(old, ops.Tensor) and isinstance(new, ops.Tensor):
return tensor_util.with_same_shape(old, new)
return new
return nest.map_structure(
with_same_shape,
self,
super(AttentionWrapperState, self)._replace(**kwargs))
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if tensor_shape.dimension_value(logits.get_shape()[-1]) is not None:
depth = tensor_shape.dimension_value(logits.get_shape()[-1])
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
def _compute_attention(attention_mechanism, cell_output, attention_state,
attention_layer):
"""Computes the attention and alignments for a given attention_mechanism."""
alignments, next_attention_state = attention_mechanism(
cell_output, state=attention_state)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context = math_ops.matmul(expanded_alignments, attention_mechanism.values)
context = array_ops.squeeze(context, [1])
if attention_layer is not None:
attention = attention_layer(array_ops.concat([cell_output, context], 1))
else:
attention = context
return attention, alignments, next_attention_state
class AttentionWrapper(rnn_cell_impl.RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None,
attention_layer=None,
attention_fn=None):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
`tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length. If
attention_layer is set, this must be None. If attention_fn is set,
it must guaranteed that the outputs of attention_fn also meet the
above requirements.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: array_ops.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the behavior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
up to the next cell in an RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
attention_layer: A list of `tf.layers.Layer` instances or a
single `tf.layers.Layer` instance taking the context and cell output as
inputs to generate attention at each time step. If None (default), use
the context as attention at each time step. If attention_mechanism is a
list, attention_layer must be a list of the same length. If
attention_layers_size is set, this must be None.
attention_fn: An optional callable function that allows users to provide
their own customized attention function, which takes input
(attention_mechanism, cell_output, attention_state, attention_layer) and
outputs (attention, alignments, next_attention_state). If provided,
the attention_layer_size should be the size of the outputs of
attention_fn.
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`;
if `attention_layer_size` and `attention_layer` are set simultaneously.
"""
super(AttentionWrapper, self).__init__(name=name)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s"
% type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: array_ops.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if attention_layer_size is not None and attention_layer is not None:
raise ValueError("Only one of attention_layer_size and attention_layer "
"should be set")
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple))
else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d"
% (len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
elif attention_layer is not None:
self._attention_layers = tuple(
attention_layer
if isinstance(attention_layer, (list, tuple))
else (attention_layer,))
if len(self._attention_layers) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer must contain exactly one "
"layer per attention_mechanism, saw: %d vs %d"
% (len(self._attention_layers), len(attention_mechanisms)))
self._attention_layer_size = sum(
tensor_shape.dimension_value(layer.compute_output_shape(
[None,
cell.output_size + tensor_shape.dimension_value(
mechanism.values.shape[-1])])[-1])
for layer, mechanism in zip(
self._attention_layers, attention_mechanisms))
else:
self._attention_layers = None
self._attention_layer_size = sum(
tensor_shape.dimension_value(attention_mechanism.values.shape[-1])
for attention_mechanism in attention_mechanisms)
if attention_fn is None:
attention_fn = _compute_attention
self._attention_fn = attention_fn
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
tensor_shape.dimension_value(final_state_tensor.shape[0])
or array_ops.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state)
def _batch_size_checks(self, batch_size, error_message):
return [check_ops.assert_equal(batch_size,
attention_mechanism.batch_size,
message=error_message)
for attention_mechanism in self._attention_mechanisms]
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms)) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms]
return AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
tensor_array_ops.TensorArray(
dtype,
size=0,
dynamic_size=True,
element_shape=alignment.shape)
if self._alignment_history else ()
for alignment in initial_alignments))
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
tensor_shape.dimension_value(cell_output.shape[0]) or
array_ops.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)):
cell_output = array_ops.identity(
cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = self._attention_fn(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = array_ops.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state | unknown | codeparrot/codeparrot-clean | ||
from openerp.osv import fields as old_fields
from openerp import api,models,fields,tools
try:
from openerp.addons.email_template.email_template import mako_template_env
except ImportError:
try:
from openerp.addons.mail.mail_template import mako_template_env
except ImportError:
pass
from openerp.loglevels import ustr
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.charset import Charset
from email.header import Header
from email.utils import formatdate, make_msgid, COMMASPACE, parseaddr
from email import Encoders
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import html2text
import openerp.tools as tools
import re
import base64
from openerp.addons.base.ir.ir_mail_server import encode_rfc2822_address_header, encode_header, encode_header_param
class res_users(models.Model):
_inherit = 'res.users'
signature_id = fields.Many2one('res.users.signature', string='Signature template', help='Keep empty to edit signature manually')
_columns = {
'signature': old_fields.html('Signature', sanitize=False)
}
@api.one
@api.onchange('signature_id')
def render_signature_id(self):
if not self.signature_id:
return
mako = mako_template_env.from_string(tools.ustr(self.signature_id.template))
html = mako.render({'user':self})
if html != self.signature:
self.signature = html
@api.one
def write(self, vals):
res = super(res_users, self).write(vals)
if any([k in vals for k in ['company_id']]):
self.render_signature_id()
return res
class res_users_signature(models.Model):
_name = 'res.users.signature'
name = fields.Char('Name')
comment = fields.Text('Internal note')
template = fields.Html('Template', sanitize=False, help='''You can use variables:
* ${user.name}
* ${user.function} (job position)
* ${user.partner_id.company_id.name} (company in a partner form)
* ${user.company_id.name} (current company)
* ${user.email}
* ${user.phone}
* ${user.mobile}
* etc. (contact your administrator for further information)
You can use control structures:
% if user.mobile
Mobile: ${user.mobile}
% endif
''')
user_ids = fields.One2many('res.users', 'signature_id', string='Users')
@api.one
def write(self, vals):
res = super(res_users_signature, self).write(vals)
self.action_update_signature()
return res
@api.one
def action_update_signature(self):
self.user_ids.render_signature_id()
class res_partner(models.Model):
_inherit = 'res.partner'
@api.one
def write(self, vals):
res = super(res_partner, self).write(vals)
if self.user_ids:
self.user_ids.render_signature_id()
return res
class ir_mail_server(models.Model):
_inherit = "ir.mail_server"
def build_email(self, email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, object_id=False, subtype='plain', headers=None,
body_alternative=None, subtype_alternative='plain'):
""" copy-pasted from openerp/addons/base/ir/ir_mail_server.py::build_email """
ftemplate = '__image-%s__'
fcounter = 0
attachments = attachments or []
pattern = re.compile(r'"data:image/png;base64,[^"]*"')
pos = 0
new_body = ''
while True:
match = pattern.search(body, pos)
if not match:
break
s = match.start()
e = match.end()
data = body[s+len('"data:image/png;base64,'):e-1]
new_body += body[pos:s]
fname = ftemplate % fcounter
fcounter += 1
attachments.append( (fname, base64.b64decode(data)) )
new_body += '"cid:%s"' % fname
pos = e
new_body += body[pos:]
body = new_body
email_from = email_from or tools.config.get('email_from')
assert email_from, "You must either provide a sender address explicitly or configure "\
"a global sender address in the server configuration or with the "\
"--email-from startup parameter."
# Note: we must force all strings to to 8-bit utf-8 when crafting message,
# or use encode_header() for headers, which does it automatically.
headers = headers or {} # need valid dict later
if not email_cc: email_cc = []
if not email_bcc: email_bcc = []
if not body: body = u''
email_body_utf8 = ustr(body).encode('utf-8')
email_text_part = MIMEText(email_body_utf8, _subtype=subtype, _charset='utf-8')
msg = MIMEMultipart()
if not message_id:
if object_id:
message_id = tools.generate_tracking_message_id(object_id)
else:
message_id = make_msgid()
msg['Message-Id'] = encode_header(message_id)
if references:
msg['references'] = encode_header(references)
msg['Subject'] = encode_header(subject)
msg['From'] = encode_rfc2822_address_header(email_from)
del msg['Reply-To']
if reply_to:
msg['Reply-To'] = encode_rfc2822_address_header(reply_to)
else:
msg['Reply-To'] = msg['From']
msg['To'] = encode_rfc2822_address_header(COMMASPACE.join(email_to))
if email_cc:
msg['Cc'] = encode_rfc2822_address_header(COMMASPACE.join(email_cc))
if email_bcc:
msg['Bcc'] = encode_rfc2822_address_header(COMMASPACE.join(email_bcc))
msg['Date'] = formatdate()
# Custom headers may override normal headers or provide additional ones
for key, value in headers.iteritems():
msg[ustr(key).encode('utf-8')] = encode_header(value)
if subtype == 'html' and not body_alternative and html2text:
# Always provide alternative text body ourselves if possible.
text_utf8 = tools.html2text(email_body_utf8.decode('utf-8')).encode('utf-8')
alternative_part = MIMEMultipart(_subtype="alternative")
alternative_part.attach(MIMEText(text_utf8, _charset='utf-8', _subtype='plain'))
alternative_part.attach(email_text_part)
msg.attach(alternative_part)
elif body_alternative:
# Include both alternatives, as specified, within a multipart/alternative part
alternative_part = MIMEMultipart(_subtype="alternative")
body_alternative_utf8 = ustr(body_alternative).encode('utf-8')
alternative_body_part = MIMEText(body_alternative_utf8, _subtype=subtype_alternative, _charset='utf-8')
alternative_part.attach(alternative_body_part)
alternative_part.attach(email_text_part)
msg.attach(alternative_part)
else:
msg.attach(email_text_part)
if attachments:
for (fname, fcontent) in attachments:
filename_rfc2047 = encode_header_param(fname)
part = MIMEBase('application', "octet-stream")
# The default RFC2231 encoding of Message.add_header() works in Thunderbird but not GMail
# so we fix it by using RFC2047 encoding for the filename instead.
part.set_param('name', filename_rfc2047)
part.add_header('Content-Disposition', 'attachment', filename=filename_rfc2047)
part.add_header('Content-ID', '<%s>' % filename_rfc2047) # NEW STUFF
part.set_payload(fcontent)
Encoders.encode_base64(part)
msg.attach(part)
return msg | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
"""
Collects stats from bind 9.5's statistics server
#### Dependencies
* [bind 9.5](http://www.isc.org/software/bind/new-features/9.5)
configured with libxml2 and statistics-channels
"""
import diamond.collector
import sys
import urllib2
if sys.version_info >= (2, 5):
import xml.etree.cElementTree as ElementTree
ElementTree # workaround for pyflakes issue #13
else:
import cElementTree as ElementTree
class BindCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(BindCollector, self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
'publish': "Available stats: \n"
+ " - resolver (Per-view resolver and cache statistics) \n"
+ " - server (Incoming requests and their answers) \n"
+ " - zonemgmt (Zone management requests/responses)\n"
+ " - sockets (Socket statistics) \n"
+ " - memory (Global memory usage) \n",
'publish_view_bind': "",
'publish_view_meta': "",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(BindCollector, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8080,
'path': 'bind',
# Available stats:
# - resolver (Per-view resolver and cache statistics)
# - server (Incoming requests and their answers)
# - zonemgmt (Requests/responses related to zone management)
# - sockets (Socket statistics)
# - memory (Global memory usage)
'publish': [
'resolver',
'server',
'zonemgmt',
'sockets',
'memory',
],
# By default we don't publish these special views
'publish_view_bind': False,
'publish_view_meta': False,
})
return config
def clean_counter(self, name, value):
value = self.derivative(name, value)
if value < 0:
value = 0
self.publish(name, value)
def collect(self):
try:
req = urllib2.urlopen('http://%s:%d/' % (
self.config['host'], int(self.config['port'])))
except Exception, e:
self.log.error('Couldnt connect to bind: %s', e)
return {}
tree = ElementTree.parse(req)
if not tree:
raise ValueError("Corrupt XML file, no statistics found")
root = tree.find('bind/statistics')
if 'resolver' in self.config['publish']:
for view in root.findall('views/view'):
name = view.find('name').text
if name == '_bind' and not self.config['publish_view_bind']:
continue
if name == '_meta' and not self.config['publish_view_meta']:
continue
nzones = len(view.findall('zones/zone'))
self.publish('view.%s.zones' % name, nzones)
for counter in view.findall('rdtype'):
self.clean_counter(
'view.%s.query.%s' % (name,
counter.find('name').text),
int(counter.find('counter').text)
)
for counter in view.findall('resstat'):
self.clean_counter(
'view.%s.resstat.%s' % (name,
counter.find('name').text),
int(counter.find('counter').text)
)
for counter in view.findall('cache/rrset'):
self.clean_counter(
'view.%s.cache.%s' % (
name, counter.find('name').text.replace('!',
'NOT_')),
int(counter.find('counter').text)
)
if 'server' in self.config['publish']:
for counter in root.findall('server/requests/opcode'):
self.clean_counter(
'requests.%s' % counter.find('name').text,
int(counter.find('counter').text)
)
for counter in root.findall('server/queries-in/rdtype'):
self.clean_counter(
'queries.%s' % counter.find('name').text,
int(counter.find('counter').text)
)
for counter in root.findall('server/nsstat'):
self.clean_counter(
'nsstat.%s' % counter.find('name').text,
int(counter.find('counter').text)
)
if 'zonemgmt' in self.config['publish']:
for counter in root.findall('server/zonestat'):
self.clean_counter(
'zonestat.%s' % counter.find('name').text,
int(counter.find('counter').text)
)
if 'sockets' in self.config['publish']:
for counter in root.findall('server/sockstat'):
self.clean_counter(
'sockstat.%s' % counter.find('name').text,
int(counter.find('counter').text)
)
if 'memory' in self.config['publish']:
for counter in root.find('memory/summary').getchildren():
self.publish(
'memory.%s' % counter.tag,
int(counter.text)
) | unknown | codeparrot/codeparrot-clean | ||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
from hacking import core
# NOTE(Kezar): This checks a good enough if we have only py2.7 supported.
# As soon as we'll get py3.x we need to drop it or rewrite. You can read more
# about it in dev-list archive, topic: "[hacking]proposed rules drop for 1.0"
def _find_module(module, path=None):
mod_base = module
parent_path = None
while '.' in mod_base:
first, _, mod_base = mod_base.partition('.')
parent_path = path
_, path, _ = imp.find_module(first, path)
path = [path]
try:
_, path, _ = imp.find_module(mod_base, path)
except ImportError:
# NOTE(bnemec): There are two reasons we might get here: 1) A
# non-module import and 2) an import of a namespace module that is
# in the same namespace as the current project, which caused us to
# recurse into the project namespace but fail to find the third-party
# module. For 1), we won't be able to import it as a module, so we
# return the parent module's path, but for 2) the import below should
# succeed, so we re-raise the ImportError because the module was
# legitimately not found in this path.
try:
__import__(module)
except ImportError:
# Non-module import, return the parent path if we have it
if parent_path:
return parent_path
raise
raise
return path
module_cache = dict()
# List of all Python 2 stdlib modules - anything not in this list will be
# allowed in either the stdlib or third-party groups to allow for Python 3
# stdlib additions.
# The list was generated via the following script, which is a variation on
# the one found here:
# http://stackoverflow.com/questions/6463918/how-can-i-get-a-list-of-all-the-python-standard-library-modules
"""
from distutils import sysconfig
import os
import sys
std_lib = sysconfig.get_python_lib(standard_lib=True)
prefix_len = len(std_lib) + 1
modules = ''
line = '['
mod_list = []
for top, dirs, files in os.walk(std_lib):
for name in files:
if 'site-packages' not in top:
if name == '__init__.py':
full_name = top[prefix_len:].replace('/', '.')
mod_list.append(full_name)
elif name.endswith('.py'):
full_name = top.replace('/', '.') + '.'
full_name += name[:-3]
full_name = full_name[prefix_len:]
mod_list.append(full_name)
elif name.endswith('.so') and top.endswith('lib-dynload'):
full_name = name[:-3]
if full_name.endswith('module'):
full_name = full_name[:-6]
mod_list.append(full_name)
for name in sys.builtin_module_names:
mod_list.append(name)
mod_list.sort()
for mod in mod_list:
if len(line + mod) + 8 > 79:
modules += '\n' + line
line = ' '
line += "'%s', " % mod
print modules + ']'
"""
py2_stdlib = [
'BaseHTTPServer', 'Bastion', 'CGIHTTPServer', 'ConfigParser', 'Cookie',
'DocXMLRPCServer', 'HTMLParser', 'MimeWriter', 'Queue',
'SimpleHTTPServer', 'SimpleXMLRPCServer', 'SocketServer', 'StringIO',
'UserDict', 'UserList', 'UserString', '_LWPCookieJar',
'_MozillaCookieJar', '__builtin__', '__future__', '__main__',
'__phello__.foo', '_abcoll', '_ast', '_bisect', '_bsddb', '_codecs',
'_codecs_cn', '_codecs_hk', '_codecs_iso2022', '_codecs_jp',
'_codecs_kr', '_codecs_tw', '_collections', '_crypt', '_csv',
'_ctypes', '_curses', '_curses_panel', '_elementtree', '_functools',
'_hashlib', '_heapq', '_hotshot', '_io', '_json', '_locale',
'_lsprof', '_multibytecodec', '_multiprocessing', '_osx_support',
'_pyio', '_random', '_socket', '_sqlite3', '_sre', '_ssl',
'_strptime', '_struct', '_symtable', '_sysconfigdata',
'_threading_local', '_warnings', '_weakref', '_weakrefset', 'abc',
'aifc', 'antigravity', 'anydbm', 'argparse', 'array', 'ast',
'asynchat', 'asyncore', 'atexit', 'audiodev', 'audioop', 'base64',
'bdb', 'binascii', 'binhex', 'bisect', 'bsddb', 'bsddb.db',
'bsddb.dbobj', 'bsddb.dbrecio', 'bsddb.dbshelve', 'bsddb.dbtables',
'bsddb.dbutils', 'bz2', 'cPickle', 'cProfile', 'cStringIO',
'calendar', 'cgi', 'cgitb', 'chunk', 'cmath', 'cmd', 'code', 'codecs',
'codeop', 'collections', 'colorsys', 'commands', 'compileall',
'compiler', 'compiler.ast', 'compiler.consts', 'compiler.future',
'compiler.misc', 'compiler.pyassem', 'compiler.pycodegen',
'compiler.symbols', 'compiler.syntax', 'compiler.transformer',
'compiler.visitor', 'contextlib', 'cookielib', 'copy', 'copy_reg',
'crypt', 'csv', 'ctypes', 'ctypes._endian', 'ctypes.macholib',
'ctypes.macholib.dyld', 'ctypes.macholib.dylib',
'ctypes.macholib.framework', 'ctypes.util', 'ctypes.wintypes',
'curses', 'curses.ascii', 'curses.has_key', 'curses.panel',
'curses.textpad', 'curses.wrapper', 'datetime', 'dbhash', 'dbm',
'decimal', 'difflib', 'dircache', 'dis', 'distutils',
'distutils.archive_util', 'distutils.bcppcompiler',
'distutils.ccompiler', 'distutils.cmd', 'distutils.command',
'distutils.command.bdist', 'distutils.command.bdist_dumb',
'distutils.command.bdist_msi', 'distutils.command.bdist_rpm',
'distutils.command.bdist_wininst', 'distutils.command.build',
'distutils.command.build_clib', 'distutils.command.build_ext',
'distutils.command.build_py', 'distutils.command.build_scripts',
'distutils.command.check', 'distutils.command.clean',
'distutils.command.config', 'distutils.command.install',
'distutils.command.install_data',
'distutils.command.install_egg_info',
'distutils.command.install_headers', 'distutils.command.install_lib',
'distutils.command.install_scripts', 'distutils.command.register',
'distutils.command.sdist', 'distutils.command.upload',
'distutils.config', 'distutils.core', 'distutils.cygwinccompiler',
'distutils.debug', 'distutils.dep_util', 'distutils.dir_util',
'distutils.dist', 'distutils.emxccompiler', 'distutils.errors',
'distutils.extension', 'distutils.fancy_getopt',
'distutils.file_util', 'distutils.filelist', 'distutils.log',
'distutils.msvc9compiler', 'distutils.msvccompiler',
'distutils.spawn', 'distutils.sysconfig', 'distutils.text_file',
'distutils.unixccompiler', 'distutils.util', 'distutils.version',
'distutils.versionpredicate', 'dl', 'doctest', 'dumbdbm',
'dummy_thread', 'dummy_threading', 'email', 'email._parseaddr',
'email.base64mime', 'email.charset', 'email.encoders', 'email.errors',
'email.feedparser', 'email.generator', 'email.header',
'email.iterators', 'email.message', 'email.mime',
'email.mime.application', 'email.mime.audio', 'email.mime.base',
'email.mime.image', 'email.mime.message', 'email.mime.multipart',
'email.mime.nonmultipart', 'email.mime.text', 'email.parser',
'email.quoprimime', 'email.utils', 'encodings', 'encodings.aliases',
'encodings.ascii', 'encodings.base64_codec', 'encodings.big5',
'encodings.big5hkscs', 'encodings.bz2_codec', 'encodings.charmap',
'encodings.cp037', 'encodings.cp1006', 'encodings.cp1026',
'encodings.cp1140', 'encodings.cp1250', 'encodings.cp1251',
'encodings.cp1252', 'encodings.cp1253', 'encodings.cp1254',
'encodings.cp1255', 'encodings.cp1256', 'encodings.cp1257',
'encodings.cp1258', 'encodings.cp424', 'encodings.cp437',
'encodings.cp500', 'encodings.cp720', 'encodings.cp737',
'encodings.cp775', 'encodings.cp850', 'encodings.cp852',
'encodings.cp855', 'encodings.cp856', 'encodings.cp857',
'encodings.cp858', 'encodings.cp860', 'encodings.cp861',
'encodings.cp862', 'encodings.cp863', 'encodings.cp864',
'encodings.cp865', 'encodings.cp866', 'encodings.cp869',
'encodings.cp874', 'encodings.cp875', 'encodings.cp932',
'encodings.cp949', 'encodings.cp950', 'encodings.euc_jis_2004',
'encodings.euc_jisx0213', 'encodings.euc_jp', 'encodings.euc_kr',
'encodings.gb18030', 'encodings.gb2312', 'encodings.gbk',
'encodings.hex_codec', 'encodings.hp_roman8', 'encodings.hz',
'encodings.idna', 'encodings.iso2022_jp', 'encodings.iso2022_jp_1',
'encodings.iso2022_jp_2', 'encodings.iso2022_jp_2004',
'encodings.iso2022_jp_3', 'encodings.iso2022_jp_ext',
'encodings.iso2022_kr', 'encodings.iso8859_1', 'encodings.iso8859_10',
'encodings.iso8859_11', 'encodings.iso8859_13',
'encodings.iso8859_14', 'encodings.iso8859_15',
'encodings.iso8859_16', 'encodings.iso8859_2', 'encodings.iso8859_3',
'encodings.iso8859_4', 'encodings.iso8859_5', 'encodings.iso8859_6',
'encodings.iso8859_7', 'encodings.iso8859_8', 'encodings.iso8859_9',
'encodings.johab', 'encodings.koi8_r', 'encodings.koi8_u',
'encodings.latin_1', 'encodings.mac_arabic', 'encodings.mac_centeuro',
'encodings.mac_croatian', 'encodings.mac_cyrillic',
'encodings.mac_farsi', 'encodings.mac_greek', 'encodings.mac_iceland',
'encodings.mac_latin2', 'encodings.mac_roman',
'encodings.mac_romanian', 'encodings.mac_turkish', 'encodings.mbcs',
'encodings.palmos', 'encodings.ptcp154', 'encodings.punycode',
'encodings.quopri_codec', 'encodings.raw_unicode_escape',
'encodings.rot_13', 'encodings.shift_jis', 'encodings.shift_jis_2004',
'encodings.shift_jisx0213', 'encodings.string_escape',
'encodings.tis_620', 'encodings.undefined',
'encodings.unicode_escape', 'encodings.unicode_internal',
'encodings.utf_16', 'encodings.utf_16_be', 'encodings.utf_16_le',
'encodings.utf_32', 'encodings.utf_32_be', 'encodings.utf_32_le',
'encodings.utf_7', 'encodings.utf_8', 'encodings.utf_8_sig',
'encodings.uu_codec', 'encodings.zlib_codec', 'errno', 'exceptions',
'fcntl', 'filecmp', 'fileinput', 'fnmatch', 'formatter', 'fpformat',
'fractions', 'ftplib', 'functools', 'future_builtins', 'gc', 'gdbm',
'genericpath', 'getopt', 'getpass', 'gettext', 'glob', 'grp', 'gzip',
'hashlib', 'heapq', 'hmac', 'hotshot', 'hotshot.log', 'hotshot.stats',
'hotshot.stones', 'htmlentitydefs', 'htmllib', 'httplib', 'idlelib',
'idlelib.AutoComplete', 'idlelib.AutoCompleteWindow',
'idlelib.AutoExpand', 'idlelib.Bindings', 'idlelib.CallTipWindow',
'idlelib.CallTips', 'idlelib.ClassBrowser', 'idlelib.CodeContext',
'idlelib.ColorDelegator', 'idlelib.Debugger', 'idlelib.Delegator',
'idlelib.EditorWindow', 'idlelib.FileList', 'idlelib.FormatParagraph',
'idlelib.GrepDialog', 'idlelib.HyperParser', 'idlelib.IOBinding',
'idlelib.IdleHistory', 'idlelib.MultiCall', 'idlelib.MultiStatusBar',
'idlelib.ObjectBrowser', 'idlelib.OutputWindow', 'idlelib.ParenMatch',
'idlelib.PathBrowser', 'idlelib.Percolator', 'idlelib.PyParse',
'idlelib.PyShell', 'idlelib.RemoteDebugger',
'idlelib.RemoteObjectBrowser', 'idlelib.ReplaceDialog',
'idlelib.RstripExtension', 'idlelib.ScriptBinding',
'idlelib.ScrolledList', 'idlelib.SearchDialog',
'idlelib.SearchDialogBase', 'idlelib.SearchEngine',
'idlelib.StackViewer', 'idlelib.ToolTip', 'idlelib.TreeWidget',
'idlelib.UndoDelegator', 'idlelib.WidgetRedirector',
'idlelib.WindowList', 'idlelib.ZoomHeight', 'idlelib.aboutDialog',
'idlelib.configDialog', 'idlelib.configHandler',
'idlelib.configHelpSourceEdit', 'idlelib.configSectionNameDialog',
'idlelib.dynOptionMenuWidget', 'idlelib.idle', 'idlelib.idlever',
'idlelib.keybindingDialog', 'idlelib.macosxSupport', 'idlelib.rpc',
'idlelib.run', 'idlelib.tabbedpages', 'idlelib.textView', 'ihooks',
'imageop', 'imaplib', 'imghdr', 'imp', 'importlib', 'imputil',
'inspect', 'io', 'itertools', 'json', 'json.decoder', 'json.encoder',
'json.scanner', 'json.tool', 'keyword', 'lib2to3', 'lib2to3.__main__',
'lib2to3.btm_matcher', 'lib2to3.btm_utils', 'lib2to3.fixer_base',
'lib2to3.fixer_util', 'lib2to3.fixes', 'lib2to3.fixes.fix_apply',
'lib2to3.fixes.fix_basestring', 'lib2to3.fixes.fix_buffer',
'lib2to3.fixes.fix_callable', 'lib2to3.fixes.fix_dict',
'lib2to3.fixes.fix_except', 'lib2to3.fixes.fix_exec',
'lib2to3.fixes.fix_execfile', 'lib2to3.fixes.fix_exitfunc',
'lib2to3.fixes.fix_filter', 'lib2to3.fixes.fix_funcattrs',
'lib2to3.fixes.fix_future', 'lib2to3.fixes.fix_getcwdu',
'lib2to3.fixes.fix_has_key', 'lib2to3.fixes.fix_idioms',
'lib2to3.fixes.fix_import', 'lib2to3.fixes.fix_imports',
'lib2to3.fixes.fix_imports2', 'lib2to3.fixes.fix_input',
'lib2to3.fixes.fix_intern', 'lib2to3.fixes.fix_isinstance',
'lib2to3.fixes.fix_itertools', 'lib2to3.fixes.fix_itertools_imports',
'lib2to3.fixes.fix_long', 'lib2to3.fixes.fix_map',
'lib2to3.fixes.fix_metaclass', 'lib2to3.fixes.fix_methodattrs',
'lib2to3.fixes.fix_ne', 'lib2to3.fixes.fix_next',
'lib2to3.fixes.fix_nonzero', 'lib2to3.fixes.fix_numliterals',
'lib2to3.fixes.fix_operator', 'lib2to3.fixes.fix_paren',
'lib2to3.fixes.fix_print', 'lib2to3.fixes.fix_raise',
'lib2to3.fixes.fix_raw_input', 'lib2to3.fixes.fix_reduce',
'lib2to3.fixes.fix_renames', 'lib2to3.fixes.fix_repr',
'lib2to3.fixes.fix_set_literal', 'lib2to3.fixes.fix_standarderror',
'lib2to3.fixes.fix_sys_exc', 'lib2to3.fixes.fix_throw',
'lib2to3.fixes.fix_tuple_params', 'lib2to3.fixes.fix_types',
'lib2to3.fixes.fix_unicode', 'lib2to3.fixes.fix_urllib',
'lib2to3.fixes.fix_ws_comma', 'lib2to3.fixes.fix_xrange',
'lib2to3.fixes.fix_xreadlines', 'lib2to3.fixes.fix_zip',
'lib2to3.main', 'lib2to3.patcomp', 'lib2to3.pgen2',
'lib2to3.pgen2.conv', 'lib2to3.pgen2.driver', 'lib2to3.pgen2.grammar',
'lib2to3.pgen2.literals', 'lib2to3.pgen2.parse', 'lib2to3.pgen2.pgen',
'lib2to3.pgen2.token', 'lib2to3.pgen2.tokenize', 'lib2to3.pygram',
'lib2to3.pytree', 'lib2to3.refactor', 'linecache', 'linuxaudiodev',
'locale', 'logging', 'logging.config', 'logging.handlers', 'macpath',
'macurl2path', 'mailbox', 'mailcap', 'markupbase', 'marshal', 'math',
'md5', 'mhlib', 'mimetools', 'mimetypes', 'mimify', 'mmap',
'modulefinder', 'multifile', 'multiprocessing',
'multiprocessing.connection', 'multiprocessing.dummy',
'multiprocessing.dummy.connection', 'multiprocessing.forking',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.queues', 'multiprocessing.reduction',
'multiprocessing.sharedctypes', 'multiprocessing.synchronize',
'multiprocessing.util', 'mutex', 'netrc', 'new', 'nis', 'nntplib',
'ntpath', 'nturl2path', 'numbers', 'opcode', 'operator', 'optparse',
'os', 'os2emxpath', 'ossaudiodev', 'parser', 'pdb', 'pickle',
'pickletools', 'pipes', 'pkgutil', 'plat-linux2.CDROM',
'plat-linux2.DLFCN', 'plat-linux2.IN', 'plat-linux2.TYPES',
'platform', 'plistlib', 'popen2', 'poplib', 'posix', 'posixfile',
'posixpath', 'pprint', 'profile', 'pstats', 'pty', 'pwd',
'py_compile', 'pyclbr', 'pydoc', 'pydoc_data', 'pydoc_data.topics',
'pyexpat', 'quopri', 'random', 're', 'readline', 'repr', 'resource',
'rexec', 'rfc822', 'rlcompleter', 'robotparser', 'runpy', 'sched',
'select', 'sets', 'sgmllib', 'sha', 'shelve', 'shlex', 'shutil',
'signal', 'site', 'smtpd', 'smtplib', 'sndhdr', 'socket', 'spwd',
'sqlite3', 'sqlite3.dbapi2', 'sqlite3.dump', 'sre', 'sre_compile',
'sre_constants', 'sre_parse', 'ssl', 'stat', 'statvfs', 'string',
'stringold', 'stringprep', 'strop', 'struct', 'subprocess', 'sunau',
'sunaudio', 'symbol', 'symtable', 'sys', 'sysconfig', 'syslog',
'tabnanny', 'tarfile', 'telnetlib', 'tempfile', 'termios', 'test',
'test.test_support', 'textwrap', 'this', 'thread', 'threading',
'time', 'timeit', 'timing', 'toaiff', 'token', 'tokenize', 'trace',
'traceback', 'tty', 'types', 'unicodedata', 'unittest',
'unittest.__main__', 'unittest.case', 'unittest.loader',
'unittest.main', 'unittest.result', 'unittest.runner',
'unittest.signals', 'unittest.suite', 'unittest.test',
'unittest.test.dummy', 'unittest.test.support',
'unittest.test.test_assertions', 'unittest.test.test_break',
'unittest.test.test_case', 'unittest.test.test_discovery',
'unittest.test.test_functiontestcase', 'unittest.test.test_loader',
'unittest.test.test_program', 'unittest.test.test_result',
'unittest.test.test_runner', 'unittest.test.test_setups',
'unittest.test.test_skipping', 'unittest.test.test_suite',
'unittest.util', 'urllib', 'urllib2', 'urlparse', 'user', 'uu',
'uuid', 'warnings', 'wave', 'weakref', 'webbrowser', 'whichdb',
'wsgiref', 'wsgiref.handlers', 'wsgiref.headers',
'wsgiref.simple_server', 'wsgiref.util', 'wsgiref.validate', 'xdrlib',
'xml', 'xml.dom', 'xml.dom.NodeFilter', 'xml.dom.domreg',
'xml.dom.expatbuilder', 'xml.dom.minicompat', 'xml.dom.minidom',
'xml.dom.pulldom', 'xml.dom.xmlbuilder', 'xml.etree',
'xml.etree.ElementInclude', 'xml.etree.ElementPath',
'xml.etree.ElementTree', 'xml.etree.cElementTree', 'xml.parsers',
'xml.parsers.expat', 'xml.sax', 'xml.sax._exceptions',
'xml.sax.expatreader', 'xml.sax.handler', 'xml.sax.saxutils',
'xml.sax.xmlreader', 'xmllib', 'xmlrpclib', 'xxsubtype', 'zipfile', ]
# Dynamic modules that can't be auto-discovered by the script above
manual_stdlib = ['os.path', ]
py2_stdlib.extend(manual_stdlib)
def _get_import_type(module):
if module in module_cache:
return module_cache[module]
def cache_type(module_type):
module_cache[module] = module_type
return module_type
# Check static stdlib list
if module in py2_stdlib:
return cache_type('stdlib')
# Check if the module is local
try:
_find_module(module, ['.'])
# If the previous line succeeded then it must be a project module
return cache_type('project')
except ImportError:
pass
# Otherwise treat it as third-party - this means we may treat some stdlib
# modules as third-party, but that's okay because we are allowing
# third-party libs in the stdlib section.
return cache_type('third-party')
@core.flake8ext
def hacking_import_groups(logical_line, blank_before, previous_logical,
indent_level, previous_indent_level, physical_line,
noqa):
r"""Check that imports are grouped correctly.
OpenStack HACKING guide recommendation for imports:
imports grouped such that Python standard library imports are together,
third party library imports are together, and project imports are
together
Okay: import os\nimport sys\n\nimport six\n\nimport hacking
Okay: import six\nimport znon_existent_package
Okay: import os\nimport threading
S366: import mock\nimport os
S366: import hacking\nimport os
S366: import hacking\nimport nonexistent
S366: import hacking\nimport mock
"""
if (noqa or blank_before > 0 or
indent_level != previous_indent_level):
return
normalized_line = core.import_normalize(logical_line.strip()).split()
normalized_previous = core.import_normalize(previous_logical.
strip()).split()
def compatible(previous, current):
if previous == current:
return True
if normalized_line and normalized_line[0] == 'import':
current_type = _get_import_type(normalized_line[1])
if normalized_previous and normalized_previous[0] == 'import':
previous_type = _get_import_type(normalized_previous[1])
if not compatible(previous_type, current_type):
yield(0, 'S366: imports not grouped correctly '
'(%s: %s, %s: %s)' %
(normalized_previous[1], previous_type,
normalized_line[1], current_type))
class ImportGroupData(object):
"""A class to hold persistent state data for import group checks.
To verify import grouping, it is necessary to know the current group
for the current file. This can not always be known solely from the
current and previous line, so this class can be used to keep track.
"""
# NOTE(bnemec): *args is needed because the test code tries to run this
# as a flake8 check and passes an argument to it.
def __init__(self, *args):
self.current_group = None
self.current_filename = None
self.current_import = None
together_data = ImportGroupData()
@core.flake8ext
def hacking_import_groups_together(logical_line, blank_lines, indent_level,
previous_indent_level, line_number,
physical_line, filename, noqa):
r"""Check that like imports are grouped together.
OpenStack HACKING guide recommendation for imports:
Imports should be grouped together by type.
Okay: import os\nimport sys
Okay: try:\n import foo\nexcept ImportError:\n pass\n\nimport six
Okay: import abc\nimport mock\n\nimport six
Okay: import eventlet\neventlet.monkey_patch()\n\nimport copy
S367: import mock\n\nimport six
S367: import os\n\nimport sys
S367: import mock\nimport os\n\nimport sys
"""
if line_number == 1 or filename != together_data.current_filename:
together_data.current_group = None
together_data.current_filename = filename
if noqa:
return
def update_current_group(current):
together_data.current_group = current
normalized_line = core.import_normalize(logical_line.strip()).split()
if normalized_line:
if normalized_line[0] == 'import':
current_type = _get_import_type(normalized_line[1])
previous_import = together_data.current_import
together_data.current_import = normalized_line[1]
matched = current_type == together_data.current_group
update_current_group(current_type)
if (matched and indent_level == previous_indent_level and
blank_lines >= 1):
yield(0, 'S367: like imports should be grouped together (%s '
'and %s from %s are separated by whitespace)' %
(previous_import,
together_data.current_import,
current_type))
else:
# Reset on non-import code
together_data.current_group = None | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
invenio.ext.session.model
-------------------------
Implements example of SQLAlchemy session model backend.
"""
from datetime import datetime
from flask.ext.login import current_user
from invenio.ext.sqlalchemy import db
class Session(db.Model):
"""Represents a Session record."""
__tablename__ = 'session'
session_key = db.Column(db.String(32), nullable=False,
server_default='', primary_key=True)
session_expiry = db.Column(db.DateTime, nullable=True, index=True)
session_object = db.Column(db.LargeBinary, nullable=True)
uid = db.Column(db.Integer(15, unsigned=True), nullable=False, index=True)
def get_session(self, name, expired=False):
where = Session.session_key == name
if expired:
where = db.and_(
where, Session.session_expiry >= db.func.current_timestamp())
return self.query.filter(where).one()
def set_session(self, name, value, timeout=None):
uid = current_user.get_id()
session_expiry = datetime.utcnow() + timeout
return Session(session_key=name,
session_object=value,
session_expiry=session_expiry,
uid=uid) | unknown | codeparrot/codeparrot-clean | ||
from .main import TorrentShack
def start():
return TorrentShack()
config = [{
'name': 'torrentshack',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentShack',
'description': 'See <a href="https://www.torrentshack.net/">TorrentShack</a>',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'scene_only',
'type': 'bool',
'default': False,
'description': 'Only allow scene releases.'
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cl_ports
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure Cumulus Switch port attributes (ports.conf)
deprecated:
removed_in: "2.5"
why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
alternative: Use M(nclu) instead.
description:
- Set the initial port attribute defined in the Cumulus Linux ports.conf,
file. This module does not do any error checking at the moment. Be careful
to not include ports that do not exist on the switch. Carefully read the
original ports.conf file for any exceptions or limitations.
For more details go the Configure Switch Port Attribute Documentation at
U(http://docs.cumulusnetworks.com).
options:
speed_10g:
description:
- List of ports to run initial run at 10G.
speed_40g:
description:
- List of ports to run initial run at 40G.
speed_4_by_10g:
description:
- List of 40G ports that will be unganged to run as 4 10G ports.
speed_40g_div_4:
description:
- List of 10G ports that will be ganged to form a 40G port.
'''
EXAMPLES = '''
# Use cl_ports module to manage the switch attributes defined in the
# ports.conf file on Cumulus Linux
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1
- swp32
speed_40g:
- swp2-31
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1-3
- swp6
speed_40g:
- swp4-5
- swp7-32
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
removed_module() | unknown | codeparrot/codeparrot-clean | ||
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import tabs
from openstack_dashboard import api
import openstack_dashboard.dashboards.project.stacks.resource_types.tables \
as project_tables
import openstack_dashboard.dashboards.project.stacks.resource_types.tabs \
as project_tabs
class ResourceTypesView(tables.DataTableView):
table_class = project_tables.ResourceTypesTable
template_name = 'project/stacks.resource_types/index.html'
page_title = _("Resource Types")
def get_data(self):
try:
r_types = sorted(api.heat.resource_types_list(self.request),
key=lambda resource: resource.resource_type)
except Exception:
r_types = []
msg = _('Unable to retrieve stack resource types.')
exceptions.handle(self.request, msg)
return r_types
class DetailView(tabs.TabView):
tab_group_class = project_tabs.ResourceTypeDetailsTabs
template_name = 'project/stacks.resource_types/details.html'
page_title = _("Resource Type Details")
def get_resource_type(self, request, **kwargs):
try:
resource_type_overview = api.heat.resource_type_get(
request,
kwargs['resource_type'])
return resource_type_overview
except Exception:
msg = _('Unable to retrieve resource type details.')
exceptions.handle(request, msg, redirect=self.get_redirect_url())
def get_tabs(self, request, **kwargs):
resource_type_overview = self.get_resource_type(request, **kwargs)
r_type = resource_type_overview['resource_type']
r_type_attributes = resource_type_overview['attributes']
r_type_properties = resource_type_overview['properties']
return self.tab_group_class(
request,
rt=r_type,
rt_attributes=yaml.safe_dump(r_type_attributes, indent=2),
rt_properties=yaml.safe_dump(r_type_properties, indent=2),
**kwargs)
@staticmethod
def get_redirect_url():
return reverse('horizon:project:stacks.resources:index') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# TODO:
# cr.execute('delete from wkf_triggers where model=%s and res_id=%s', (res_type,res_id))
#
import instance
import wkf_expr
import wkf_logs
def create(cr, act_datas, inst_id, ident, stack):
for act in act_datas:
cr.execute("select nextval('wkf_workitem_id_seq')")
id_new = cr.fetchone()[0]
cr.execute("insert into wkf_workitem (id,act_id,inst_id,state) values (%s,%s,%s,'active')", (id_new, act['id'], inst_id))
cr.execute('select * from wkf_workitem where id=%s',(id_new,))
res = cr.dictfetchone()
wkf_logs.log(cr,ident,act['id'],'active')
process(cr, res, ident, stack=stack)
def process(cr, workitem, ident, signal=None, force_running=False, stack=None):
if stack is None:
raise 'Error !!!'
result = True
cr.execute('select * from wkf_activity where id=%s', (workitem['act_id'],))
activity = cr.dictfetchone()
triggers = False
if workitem['state']=='active':
triggers = True
result = _execute(cr, workitem, activity, ident, stack)
if not result:
return False
if workitem['state']=='running':
pass
if workitem['state']=='complete' or force_running:
ok = _split_test(cr, workitem, activity['split_mode'], ident, signal, stack)
triggers = triggers and not ok
if triggers:
cr.execute('select * from wkf_transition where act_from=%s', (workitem['act_id'],))
alltrans = cr.dictfetchall()
for trans in alltrans:
if trans['trigger_model']:
ids = wkf_expr._eval_expr(cr,ident,workitem,trans['trigger_expr_id'])
for res_id in ids:
cr.execute('select nextval(\'wkf_triggers_id_seq\')')
id =cr.fetchone()[0]
cr.execute('insert into wkf_triggers (model,res_id,instance_id,workitem_id,id) values (%s,%s,%s,%s,%s)', (trans['trigger_model'],res_id,workitem['inst_id'], workitem['id'], id))
return result
# ---------------------- PRIVATE FUNCS --------------------------------
def _state_set(cr, workitem, activity, state, ident):
cr.execute('update wkf_workitem set state=%s where id=%s', (state,workitem['id']))
workitem['state'] = state
wkf_logs.log(cr,ident,activity['id'],state)
def _execute(cr, workitem, activity, ident, stack):
result = True
#
# send a signal to parent workflow (signal: subflow.signal_name)
#
signal_todo = []
if (workitem['state']=='active') and activity['signal_send']:
cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (workitem['inst_id'],))
for i in cr.fetchall():
signal_todo.append((i[0], (ident[0],i[1],i[2]), activity['signal_send']))
if activity['kind']=='dummy':
if workitem['state']=='active':
_state_set(cr, workitem, activity, 'complete', ident)
if activity['action_id']:
res2 = wkf_expr.execute_action(cr, ident, workitem, activity)
if res2:
stack.append(res2)
result=res2
elif activity['kind']=='function':
if workitem['state']=='active':
_state_set(cr, workitem, activity, 'running', ident)
returned_action = wkf_expr.execute(cr, ident, workitem, activity)
if type(returned_action) in (dict,):
stack.append(returned_action)
if activity['action_id']:
res2 = wkf_expr.execute_action(cr, ident, workitem, activity)
# A client action has been returned
if res2:
stack.append(res2)
result=res2
_state_set(cr, workitem, activity, 'complete', ident)
elif activity['kind']=='stopall':
if workitem['state']=='active':
_state_set(cr, workitem, activity, 'running', ident)
cr.execute('delete from wkf_workitem where inst_id=%s and id<>%s', (workitem['inst_id'], workitem['id']))
if activity['action']:
wkf_expr.execute(cr, ident, workitem, activity)
_state_set(cr, workitem, activity, 'complete', ident)
elif activity['kind']=='subflow':
if workitem['state']=='active':
_state_set(cr, workitem, activity, 'running', ident)
if activity.get('action', False):
id_new = wkf_expr.execute(cr, ident, workitem, activity)
if not id_new:
cr.execute('delete from wkf_workitem where id=%s', (workitem['id'],))
return False
assert type(id_new)==type(1) or type(id_new)==type(1L), 'Wrong return value: '+str(id_new)+' '+str(type(id_new))
cr.execute('select id from wkf_instance where res_id=%s and wkf_id=%s', (id_new,activity['subflow_id']))
id_new = cr.fetchone()[0]
else:
id_new = instance.create(cr, ident, activity['subflow_id'])
cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (id_new, workitem['id']))
workitem['subflow_id'] = id_new
if workitem['state']=='running':
cr.execute("select state from wkf_instance where id=%s", (workitem['subflow_id'],))
state= cr.fetchone()[0]
if state=='complete':
_state_set(cr, workitem, activity, 'complete', ident)
for t in signal_todo:
instance.validate(cr, t[0], t[1], t[2], force_running=True)
return result
def _split_test(cr, workitem, split_mode, ident, signal=None, stack=None):
if stack is None:
raise 'Error !!!'
cr.execute('select * from wkf_transition where act_from=%s', (workitem['act_id'],))
test = False
transitions = []
alltrans = cr.dictfetchall()
if split_mode=='XOR' or split_mode=='OR':
for transition in alltrans:
if wkf_expr.check(cr, workitem, ident, transition,signal):
test = True
transitions.append((transition['id'], workitem['inst_id']))
if split_mode=='XOR':
break
else:
test = True
for transition in alltrans:
if not wkf_expr.check(cr, workitem, ident, transition,signal):
test = False
break
cr.execute('select count(*) from wkf_witm_trans where trans_id=%s and inst_id=%s', (transition['id'], workitem['inst_id']))
if not cr.fetchone()[0]:
transitions.append((transition['id'], workitem['inst_id']))
if test and len(transitions):
cr.executemany('insert into wkf_witm_trans (trans_id,inst_id) values (%s,%s)', transitions)
cr.execute('delete from wkf_workitem where id=%s', (workitem['id'],))
for t in transitions:
_join_test(cr, t[0], t[1], ident, stack)
return True
return False
def _join_test(cr, trans_id, inst_id, ident, stack):
cr.execute('select * from wkf_activity where id=(select act_to from wkf_transition where id=%s)', (trans_id,))
activity = cr.dictfetchone()
if activity['join_mode']=='XOR':
create(cr,[activity], inst_id, ident, stack)
cr.execute('delete from wkf_witm_trans where inst_id=%s and trans_id=%s', (inst_id,trans_id))
else:
cr.execute('select id from wkf_transition where act_to=%s', (activity['id'],))
trans_ids = cr.fetchall()
ok = True
for (id,) in trans_ids:
cr.execute('select count(*) from wkf_witm_trans where trans_id=%s and inst_id=%s', (id,inst_id))
res = cr.fetchone()[0]
if not res:
ok = False
break
if ok:
for (id,) in trans_ids:
cr.execute('delete from wkf_witm_trans where trans_id=%s and inst_id=%s', (id,inst_id))
create(cr, [activity], inst_id, ident, stack)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
import re
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.sites.models import Site
from tastypie.test import ResourceTestCase
from actstream import follow,action
from actstream.models import Action
from mks.models import Member, Party, Membership, MemberAltname, Knesset
from mks.views import MemberListView
from mks.managers import KnessetManager
from laws.models import Law,Bill,PrivateProposal,Vote,VoteAction
from committees.models import CommitteeMeeting,Committee
from knesset.utils import RequestFactory
import datetime
import feedparser
import json
from backlinks.tests.xmlrpc import TestClientServerProxy
from xmlrpclib import Fault, loads
from urllib import urlencode
from backlinks.models import InboundBacklink
from backlinks.pingback.server import PingbackServer
from django import template
from mks.mock import PINGABLE_MEMBER_ID, NON_PINGABLE_MEMBER_ID
from persons.models import Person, PersonAlias
from mmm.models import Document
TRACKBACK_CONTENT_TYPE = 'application/x-www-form-urlencoded; charset=utf-8'
just_id = lambda x: x.id
class MemberViewsTest(TestCase):
def setUp(self):
# make sure cache is clean, to prevent some failing tests with
# unexpected caches
from django.core.cache import cache
cache.clear()
d = datetime.date.today()
self.knesset = Knesset.objects.create(
number=1,
start_date=d-datetime.timedelta(10))
self.party_1 = Party.objects.create(name='party 1',
knesset=self.knesset)
self.party_2 = Party.objects.create(name='party 2',
knesset=self.knesset)
self.mk_1 = Member.objects.create(name='mk_1',
start_date=datetime.date(2010,1,1),
current_party=self.party_1,
backlinks_enabled=True)
self.mk_2 = Member.objects.create(name='mk_2',
start_date=datetime.date(2010,1,1),
current_party=self.party_1,
backlinks_enabled = False)
self.jacob = User.objects.create_user('jacob', 'jacob@jacobian.org',
'JKM')
self.committee_1 = Committee.objects.create(name='c1')
self.meeting_1 = self.committee_1.meetings.create(
date=d-datetime.timedelta(1),
protocol_text='jacob:\nI am a perfectionist\nadrian:\nI have a deadline')
self.meeting_2 = self.committee_1.meetings.create(
date=d-datetime.timedelta(2),
protocol_text='adrian:\nYou are a perfectionist\njacob:\nYou have a deadline')
self.law = Law.objects.create(title='law 1')
self.pp = PrivateProposal.objects.create(title='private proposal 1', date=datetime.date.today()-datetime.timedelta(3))
self.pp.proposers.add(self.mk_1)
self.bill_1 = Bill.objects.create(stage='1', title='bill 1', law=self.law)
self.bill_1.proposals.add(self.pp)
self.bill_1.proposers.add(self.mk_1)
self.meeting_1.mks_attended.add(self.mk_1)
self.meeting_1.save()
self.meeting_2.mks_attended.add(self.mk_1)
self.meeting_2.save()
self.vote = Vote.objects.create(title='vote 1',time=datetime.datetime.now())
self.vote_action = VoteAction.objects.create(member=self.mk_1, vote=self.vote, type='for', party=self.mk_1.current_party)
self.domain = 'http://' + Site.objects.get_current().domain
def testMemberList(self):
res = self.client.get(reverse('member-list'))
self.assertEqual(res.status_code, 301)
res = self.client.get(reverse('member-stats', kwargs={'stat_type': 'bills_pre'}))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'mks/member_list.html')
object_list = res.context['object_list']
self.assertItemsEqual(map(just_id, object_list), [self.mk_1.id, self.mk_2.id])
def testMemberDetail(self):
res = self.client.get(reverse('member-detail', args=[self.mk_1.id]))
self.assertTemplateUsed(res,
'mks/member_detail.html')
self.assertEqual(res.context['object'].id, self.mk_1.id)
def testMemberDetailOtherVerbs(self):
"""Tests the member detail view with parameters that make it render
actions other than the default ones"""
res = self.client.get('%s?verbs=attended&verbs=voted' %
reverse('member-detail', args=[self.mk_1.id]))
self.assertTemplateUsed(res,
'mks/member_detail.html')
self.assertEqual(res.context['object'].id, self.mk_1.id)
def testPartyList(self):
# party list should redirect to stats by seat
res = self.client.get(reverse('party-list'))
self.assertRedirects(res, reverse('party-stats', kwargs={'stat_type': 'seats'}), 301)
#self.assertTemplateUsed(res, 'mks/party_list.html')
#object_list = res.context['object_list']
#self.assertEqual(map(just_id, object_list),
# [ self.party_1.id, self.party_2.id, ])
def testPartyDetail(self):
res = self.client.get(reverse('party-detail',
args=[self.party_1.id]))
self.assertTemplateUsed(res, 'mks/party_detail.html')
self.assertEqual(res.context['object'].id, self.party_1.id)
def testMemberDetailsContext(self):
# test anonymous user
mk_1_url = self.mk_1.get_absolute_url()
res = self.client.get(mk_1_url)
self.assertFalse(res.context['watched_member'])
# test autherized user
self.assertTrue(self.client.login(username='jacob', password='JKM'))
res = self.client.get(mk_1_url)
self.assertFalse(res.context['watched_member'])
# test autherized user that follows
follow(self.jacob, self.mk_1)
res = self.client.get(mk_1_url)
self.assertTrue(res.context['watched_member'])
def testMemberActivityFeed(self):
res = self.client.get(reverse('member-activity-feed',
args=[self.mk_1.id]))
self.assertEqual(res.status_code,200)
parsed = feedparser.parse(res.content)
# self.assertEqual(len(parsed['entries']),4)
self.assertEqual(parsed['entries'][0]['link'], self.domain +
self.vote.get_absolute_url())
self.assertEqual(parsed['entries'][1]['link'], self.domain +
self.meeting_1.get_absolute_url())
self.assertEqual(parsed['entries'][2]['link'], self.domain +
self.meeting_2.get_absolute_url())
self.assertEqual(parsed['entries'][3]['link'], self.domain +
self.bill_1.get_absolute_url())
def testMemberActivityFeedWithVerbProposed(self):
res = self.client.get(reverse('member-activity-feed',
kwargs={'object_id': self.mk_1.id}),{'verbs':'proposed'})
self.assertEqual(res.status_code,200)
parsed = feedparser.parse(res.content)
self.assertEqual(len(parsed['entries']),1)
res = self.client.get(reverse('member-activity-feed',
kwargs={'object_id': self.mk_2.id}),{'verbs':'proposed'})
self.assertEqual(res.status_code,200)
parsed = feedparser.parse(res.content)
self.assertEqual(len(parsed['entries']),0)
def testMemberActivityFeedWithVerbAttended(self):
res = self.client.get(reverse('member-activity-feed',
kwargs={'object_id': self.mk_1.id}),{'verbs':'attended'})
self.assertEqual(res.status_code,200)
parsed = feedparser.parse(res.content)
self.assertEqual(len(parsed['entries']),2)
res = self.client.get(reverse('member-activity-feed',
kwargs={'object_id': self.mk_2.id}),{'verbs':'attended'})
self.assertEqual(res.status_code,200)
parsed = feedparser.parse(res.content)
self.assertEqual(len(parsed['entries']),0)
def testMemberActivityFeedWithVerbJoined(self):
res = self.client.get(reverse('member-activity-feed',
kwargs={'object_id': self.mk_1.id}),{'verbs':'joined'})
self.assertEqual(res.status_code,200)
parsed = feedparser.parse(res.content)
self.assertEqual(len(parsed['entries']),0)
def testMemberActivityFeedWithVerbPosted(self):
res = self.client.get(reverse('member-activity-feed',
kwargs={'object_id': self.mk_1.id}),{'verbs':'posted'})
self.assertEqual(res.status_code,200)
parsed = feedparser.parse(res.content)
self.assertEqual(len(parsed['entries']),0)
def tearDown(self):
self.party_1.delete()
self.party_2.delete()
self.mk_1.delete()
self.mk_2.delete()
self.jacob.delete()
class MemberBacklinksViewsTest(TestCase):
urls = 'mks.server_urls'
def setUp(self):
self.party_1 = Party.objects.create(name='party 1')
self.party_2 = Party.objects.create(name='party 2')
self.mk_1 = Member.objects.create(name='mk_1',
start_date=datetime.date(2010,1,1),
current_party=self.party_1,
backlinks_enabled=True)
self.mk_2 = Member.objects.create(name='mk_2',
start_date=datetime.date(2010,1,1),
current_party=self.party_1,
backlinks_enabled = False)
self.jacob = User.objects.create_user('jacob', 'jacob@jacobian.org',
'JKM')
self.mk_1.save()
self.mk_2.save()
self.committee_1 = Committee.objects.create(name='c1')
self.meeting_1 = self.committee_1.meetings.create(date=datetime.date.today()-datetime.timedelta(1),
protocol_text='jacob:\nI am a perfectionist\nadrian:\nI have a deadline')
self.meeting_2 = self.committee_1.meetings.create(date=datetime.date.today()-datetime.timedelta(2),
protocol_text='adrian:\nYou are a perfectionist\njacob:\nYou have a deadline')
self.law = Law.objects.create(title='law 1')
self.pp = PrivateProposal.objects.create(title='private proposal 1', date=datetime.date.today()-datetime.timedelta(3))
self.pp.proposers.add(self.mk_1)
self.bill_1 = Bill.objects.create(stage='1', title='bill 1', law=self.law)
self.bill_1.proposals.add(self.pp)
self.bill_1.proposers.add(self.mk_1)
self.meeting_1.mks_attended.add(self.mk_1)
self.meeting_1.save()
self.meeting_2.mks_attended.add(self.mk_1)
self.meeting_2.save()
self.vote = Vote.objects.create(title='vote 1',time=datetime.datetime.now())
self.vote_action = VoteAction.objects.create(member=self.mk_1, vote=self.vote, type='for', party=self.mk_1.current_party)
self.client = Client(SERVER_NAME='example.com')
self.xmlrpc_client = TestClientServerProxy('/pingback/')
self.PINGABLE_MEMBER_ID = str(self.mk_1.id)
self.NON_PINGABLE_MEMBER_ID = str(self.mk_2.id)
def trackbackPOSTRequest(self, path, params):
return self.client.post(path, urlencode(params), content_type=TRACKBACK_CONTENT_TYPE)
def assertTrackBackErrorResponse(self, response, msg):
if response.content.find('<error>1</error>') == -1:
raise self.failureException, msg
'''
def testTrackBackRDFTemplateTag(self):
t = template.Template("{% load trackback_tags %}{% trackback_rdf object_url object_title trackback_url True %}")
c = template.Context({'trackback_url': '/trackback/member/'+self.PINGABLE_MEMBER_ID+'/',
'object_url': self.pingableTargetUrl,
'object_title': 'Pingable Test Entry'})
rendered = t.render(c)
link_re = re.compile(r'dc:identifier="(?P<link>[^"]+)"')
match = link_re.search(rendered)
self.assertTrue(bool(match), 'TrackBack RDF not rendered')
self.assertEquals(match.groups('link')[0], self.pingableTargetUrl,
'TrackBack RDF did not contain a valid target URI')
ping_re = re.compile(r'trackback:ping="(?P<link>[^"]+)"')
match = ping_re.search(rendered)
self.assertTrue(bool(match), 'TrackBack RDF not rendered')
self.assertEquals(match.groups('link')[0], '/trackback/member/'+self.PINGABLE_MEMBER_ID+'/',
'TrackBack RDF did not contain a TrackBack server URI')
'''
def testPingNonLinkingSourceURI(self):
self.assertRaises(Fault,
self.xmlrpc_client.pingback.ping,
'http://example.com/bad-source-document/',
'http://example.com/member/'+PINGABLE_MEMBER_ID+'/')
try:
self.xmlrpc_client.pingback.ping('http://example.com/bad-source-document/',
'http://example.com/member/'+PINGABLE_MEMBER_ID+'/')
except Fault, f:
self.assertEquals(f.faultCode,
17,
'Server did not return "source URI does not link" response')
def testDisallowedMethod(self):
response = self.client.get('/pingback/')
self.assertEquals(response.status_code,
405,
'Server returned incorrect status code for disallowed HTTP method')
def testNonExistentRPCMethod(self):
self.assertRaises(Fault, self.xmlrpc_client.foo)
def testBadPostData(self):
post_data = urlencode({'sourceURI': 'http://example.com/good-source-document/',
'targetURI': 'http://example.com/member/'+PINGABLE_MEMBER_ID+'/'})
response = self.client.post('/pingback/', post_data, TRACKBACK_CONTENT_TYPE)
self.assertRaises(Fault,
loads,
response.content)
def testPingNonExistentTargetURI(self):
self.assertRaises(Fault,
self.xmlrpc_client.pingback.ping,
'http://example.com/member/non-existent-resource/',
'http://example.com/member/non-existent-resource')
try:
self.xmlrpc_client.pingback.ping('http://example.com/member/non-existent-resource/',
'http://example.com/member/non-existent-resource')
except Fault, f:
self.assertEquals(f.faultCode,
32,
'Server did not return "target does not exist" error')
def testPingAlreadyRegistered(self):
self.xmlrpc_client.pingback.ping('http://example.com/another-good-source-document/',
'http://example.com/member/'+PINGABLE_MEMBER_ID+'/')
self.assertRaises(Fault,
self.xmlrpc_client.pingback.ping,
'http://example.com/another-good-source-document/',
'http://example.com/member/'+PINGABLE_MEMBER_ID+'/')
try:
self.xmlrpc_client.pingback.ping('http://example.com/another-good-source-document/',
'http://example.com/member/'+PINGABLE_MEMBER_ID+'/')
except Fault, f:
self.assertEqual(f.faultCode,
48,
'Server did not return "ping already registered" error')
def testPingbackLinkTemplateTag(self):
t = template.Template("{% load pingback_tags %}{% pingback_link pingback_path %}")
c = template.Context({'pingback_path': '/pingback/'})
rendered = t.render(c)
link_re = re.compile(r'<link rel="pingback" href="([^"]+)" ?/?>')
match = link_re.search(rendered)
self.assertTrue(bool(match), 'Pingback link tag did not render')
self.assertEquals(match.groups(0)[0], 'http://example.com/pingback/',
'Pingback link tag rendered incorrectly')
def testPingNonPingableTargetURI(self):
self.assertRaises(Fault,
self.xmlrpc_client.pingback.ping,
'http://example.com/member/non-existent-resource/',
'http://example.com/member/'+str(self.NON_PINGABLE_MEMBER_ID)+'/')
try:
self.xmlrpc_client.pingback.ping('http://example.com/member/non-existent-resource/',
'http://example.com/member/'+str(self.NON_PINGABLE_MEMBER_ID)+'/')
except Fault, f:
self.assertEquals(f.faultCode,
33,
'Server did not return "target not pingable" error')
def testPingSourceURILinks(self):
r = self.xmlrpc_client.pingback.ping('http://example.com/good-source-document/',
'http://example.com/member/'+self.PINGABLE_MEMBER_ID+'/')
self.assertEquals(r,
"Ping from http://example.com/good-source-document/ to http://example.com/member/1/ registered",
"Failed registering ping")
registered_ping = InboundBacklink.objects.get(source_url='http://example.com/good-source-document/',
target_url='http://example.com/member/'+self.PINGABLE_MEMBER_ID+'/')
self.assertEquals(str(registered_ping.target_object.id),
PINGABLE_MEMBER_ID,
'Server did not return "target not pingable" error')
def testDisallowedTrackbackMethod(self):
response = self.client.get('/trackback/member/'+PINGABLE_MEMBER_ID+'/')
self.assertEquals(response.status_code,
405,
'Server returned incorrect status code for disallowed HTTP method')
def testPingNoURLParameter(self):
params = {'title': 'Example', 'excerpt': 'Example'}
response = self.trackbackPOSTRequest('/trackback/member/'+self.PINGABLE_MEMBER_ID+'/',
params)
self.assertTrackBackErrorResponse(response,
'Server did not return error response'
'for ping with no URL parameter')
def testPingBadURLParameter(self):
params = {'url': 'bad url'}
response = self.trackbackPOSTRequest('http://example.com/trackback/member/'+self.PINGABLE_MEMBER_ID+'/',
params)
self.assertTrackBackErrorResponse(response,
'Server did not return error response for ping with bad URL parameter')
def testPingNonExistentTarget(self):
params = {'url': 'http://example.com/good-source-document/'}
response = self.trackbackPOSTRequest('/trackback/member/5000/',
params)
self.assertTrackBackErrorResponse(response,
'Server did not return error response for ping against non-existent resource')
def testPingNonPingableTarget(self):
params = {'url': 'http://example.com/member/'+PINGABLE_MEMBER_ID+'/'}
response = self.trackbackPOSTRequest('/trackback/member/'+self.NON_PINGABLE_MEMBER_ID+'/',
params)
self.assertTrackBackErrorResponse(response,
'Server did not return error response for ping against non-pingable resource')
def testPingSuccess(self):
title = 'Backlinks Test - Test Good Source Document'
excerpt = 'This is a summary of the good source document'
params = {'url': 'http://example.com/good-source-document/', 'title': title, 'excerpt': excerpt}
track_target = '/trackback/member/'+self.PINGABLE_MEMBER_ID+'/'
response = self.trackbackPOSTRequest(track_target,
params)
self.assertTrue(response.content.find('<error>0</error>') > -1,
'Server did not return success response for a valid ping request')
registered_ping = InboundBacklink.objects.get(source_url='http://example.com/good-source-document/',
target_url='http://example.com'+self.mk_1.get_absolute_url())
self.assertEquals(registered_ping.title,
title,
'Server did not use title from ping request when registering')
self.assertEquals(registered_ping.excerpt,
excerpt,
'Server did not use excerpt from ping request when registering')
def tearDown(self):
self.party_1.delete()
self.party_2.delete()
self.mk_1.delete()
self.mk_2.delete()
self.jacob.delete()
class MemberAPITests(ResourceTestCase):
def setUp(self):
super(MemberAPITests, self).setUp()
d = datetime.date.today()
self.knesset = Knesset.objects.create(
number=1,
start_date=d-datetime.timedelta(10))
KnessetManager._current_knesset = self.knesset
self.party_1 = Party.objects.create(name='party 1',
knesset=self.knesset)
matches = [ {
"entity_id": 10012 + i,
"docid": "m00079",
"entity_name": "bbbb",
"entity_type": "COMM",
"url": "http://knesset.gov.il/mmm/data/pdf/m00079.pdf" + str(i),
"title": "aaaaaa" + str(i),
"authors": [
"mk_1"
],
"pub_date": "2000-01-01",
"session_date": None,
"heading": "bbbb",
} for i in xrange(10)]
for match in matches:
match['date'] = datetime.datetime.strptime(match['pub_date'], '%Y-%m-%d').date()
self.mmm_docs = [Document.objects.create(
url = match['url'],
title = match['title'],
publication_date = match['pub_date'],
author_names = match['authors'],
) for match in matches]
self.mk_1 = Member.objects.create(name='mk_1',
start_date=datetime.date(2010,1,1),
current_party=self.party_1,
backlinks_enabled=True,
bills_stats_first = 2,
bills_stats_proposed = 5,
average_weekly_presence_hours = 3.141)
for mmm_doc in self.mmm_docs:
mmm_doc.req_mks = [self.mk_1, ]
PersonAlias.objects.create(name="mk_1_alias",
person=Person.objects.get(mk=self.mk_1))
def testSimpleGet(self):
res1 = self.api_client.get('/api/v2/member/', data={'name': 'mk_1'})
self.assertValidJSONResponse(res1)
ret = self.deserialize(res1)
self.assertEqual(ret['meta']['total_count'], 1)
def testAliases(self):
res1 = self.api_client.get('/api/v2/member/', data={'name': 'mk_1'}, format='json')
self.assertValidJSONResponse(res1)
res2 = self.api_client.get('/api/v2/member/', data={'name': 'mk_1_alias'}, format='json')
self.assertValidJSONResponse(res2)
self.assertEqual(self.deserialize(res1), self.deserialize(res2))
def testMemberList(self):
res1 = self.api_client.get('/api/v2/member/', format = 'json')
self.assertEqual(res1.status_code, 200)
data = json.loads(res1.content)
self.assertEqual(len(data['objects']), 1)
rmks = data['objects'][0]
self.assertEqual(rmks['mmms_count'], 10)
self.assertEqual(rmks['bills_stats_first'], 2)
self.assertEqual(rmks['bills_stats_proposed'], 5)
self.assertEqual(rmks['average_weekly_presence_hours'], 3.141)
def tearDown(self):
for mmm_doc in self.mmm_docs:
mmm_doc.delete()
self.mk_1.delete()
KnessetManager._current_knesset = None
class MemberModelsTests(TestCase):
def testNames(self):
m=Member(name='test member')
self.assertEqual(m.names, ['test member'])
m.save()
MemberAltname(member=m,name='test2').save()
self.assertEqual(m.names, ['test member','test2'])
from agendas.models import Agenda, AgendaVote
class MKAgendasTest(TestCase):
def setUp(self):
self.knesset = Knesset.objects.create(
number=1,
start_date=datetime.date(2010, 1, 1))
self.party_1 = Party.objects.create(
name='party 1',
number_of_seats=1,
knesset=self.knesset)
self.mk_1 = Member.objects.create(name='mk_1',
start_date=datetime.date(2010,1,1),
current_party=self.party_1)
self.mk_2 = Member.objects.create(name='mk_2',
start_date=datetime.date(2010,1,1),
current_party=self.party_1)
self.mk_3 = Member.objects.create(name='mk_3',
start_date=datetime.date(2010,1,1),
current_party=self.party_1)
self.agenda_1 = Agenda.objects.create(name='agenda 1',
description='a bloody good agenda 1',
public_owner_name='Dr. Jacob',
is_public=True)
self.agenda_2 = Agenda.objects.create(name='agenda 2',
description='a bloody good agenda 2',
public_owner_name='Greenpeace',
is_public=True)
self.agenda_3 = Agenda.objects.create(name='agenda 3',
description='a bloody good agenda 3',
public_owner_name='Hidden One',
is_public=False)
self.vote_1 = Vote.objects.create(title='vote 1',time=datetime.datetime.now())
self.vote_2 = Vote.objects.create(title='vote 2',time=datetime.datetime.now())
self.voteactions = [ VoteAction.objects.create(vote=self.vote_1,
member=self.mk_1, type='for', party=self.mk_1.current_party),
VoteAction.objects.create(vote=self.vote_2,
member=self.mk_1, type='for', party=self.mk_1.current_party),
VoteAction.objects.create(vote=self.vote_1,
member=self.mk_2, type='against', party=self.mk_2.current_party),
VoteAction.objects.create(vote=self.vote_2,
member=self.mk_2, type='against', party=self.mk_2.current_party)
]
self.agendavotes = [AgendaVote.objects.create(agenda=self.agenda_1,
vote=self.vote_1,
score=-1,
reasoning="there's got to be a reason 1"),
AgendaVote.objects.create(agenda=self.agenda_2,
vote=self.vote_2,
score=0.5,
reasoning="there's got to be a reason 2"),
AgendaVote.objects.create(agenda=self.agenda_1,
vote=self.vote_2,
score=0.5,
reasoning="there's got to be a reason 3"),
]
self.domain = 'http://' + Site.objects.get_current().domain
def testMemberValues(self):
agenda_values1 = self.mk_1.get_agendas_values()
self.assertEqual(len(agenda_values1), 2)
agenda_values2 = self.mk_2.get_agendas_values()
self.assertEqual(len(agenda_values2), 2)
self.assertEqual(agenda_values1,
{1: {'numvotes': 2, 'rank': 2, 'score': -33.33, 'volume': 100.0},
2: {'numvotes': 1, 'rank': 1, 'score': 100.0, 'volume': 100.0}})
self.assertEqual(agenda_values2,
{1: {'numvotes': 2, 'rank': 1, 'score': 33.33, 'volume': 100.0},
2: {'numvotes': 1, 'rank': 2, 'score': -100.0, 'volume': 100.0}})
agenda_values = self.mk_3.get_agendas_values()
self.assertFalse(agenda_values)
def testAPIv2(self):
res = self.client.get('/api/v2/member/%s/?format=json' % self.mk_1.id)
self.assertEqual(res.status_code, 200)
data = json.loads(res.content)
self.assertEqual(data['name'], 'mk_1')
self.assertEqual(data['party_name'], self.party_1.name)
self.assertEqual(data['party_url'], self.party_1.get_absolute_url())
agendas_uri = data['agendas_uri']
expected_agendas_uri = '/api/v2/member-agendas/%s/' % self.mk_1.id
self.assertEqual(agendas_uri, expected_agendas_uri, "Wrong agendas URI returned for member")
res2 = self.client.get(expected_agendas_uri+'?format=json')
agendas = json.loads(res2.content)
self.assertEqual(agendas['agendas'], [
{'id': 1, 'owner': 'Dr. Jacob', 'absolute_url': '/agenda/1/',
'score': -33.33, 'name': 'agenda 1', 'rank': 2,
'min': -33.33, 'max': 33.33,
'party_min': -33.33, 'party_max': 33.33,
},
{'id': 2, 'owner': 'Greenpeace', 'absolute_url': '/agenda/2/',
'score': 100.0, 'name': 'agenda 2', 'rank': 1,
'min': -100.0, 'max': 100.0,
'party_min': -100.0, 'party_max': 100.0,
}])
def tearDown(self):
for av in self.agendavotes:
av.delete()
for va in self.voteactions:
va.delete()
self.vote_1.delete()
self.vote_2.delete()
self.mk_1.delete()
self.mk_2.delete()
self.party_1.delete()
self.agenda_1.delete()
self.agenda_2.delete()
self.agenda_3.delete() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle local file: links.
"""
import re
import os
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
import urllib
try:
from urllib2 import urlopen
except ImportError:
# Python 3
from urllib.request import urlopen
from datetime import datetime
from . import urlbase, get_index_html
from .. import log, LOG_CHECK, fileutil, mimeutil, LinkCheckerError, url as urlutil
from ..bookmarks import firefox
from .const import WARN_FILE_MISSING_SLASH, WARN_FILE_SYSTEM_PATH
def get_files (dirname):
"""Get iterator of entries in directory. Only allows regular files
and directories, no symlinks."""
for entry in os.listdir(dirname):
fullentry = os.path.join(dirname, entry)
if os.path.islink(fullentry):
continue
if os.path.isfile(fullentry):
yield entry
elif os.path.isdir(fullentry):
yield entry+"/"
def prepare_urlpath_for_nt (path):
"""
URLs like 'file://server/path/' result in a path named '/server/path'.
However urllib.url2pathname expects '////server/path'.
"""
if '|' not in path:
return "////"+path.lstrip("/")
return path
def get_nt_filename (path):
"""Return case sensitive filename for NT path."""
unc, rest = os.path.splitunc(path)
head, tail = os.path.split(rest)
if not tail:
return path
for fname in os.listdir(unc+head):
if fname.lower() == tail.lower():
return os.path.join(get_nt_filename(unc+head), fname)
log.error(LOG_CHECK, "could not find %r in %r", tail, head)
return path
def get_os_filename (path):
"""Return filesystem path for given URL path."""
if os.name == 'nt':
path = prepare_urlpath_for_nt(path)
res = urllib.url2pathname(fileutil.pathencode(path))
if os.name == 'nt' and res.endswith(':') and len(res) == 2:
# Work around http://bugs.python.org/issue11474
res += os.sep
return res
def is_absolute_path (path):
"""Check if given path is absolute. On Windows absolute paths start
with a drive letter. On all other systems absolute paths start with
a slash."""
if os.name == 'nt':
if re.search(r"^[a-zA-Z]:", path):
return True
path = path.replace("\\", "/")
return path.startswith("/")
class FileUrl (urlbase.UrlBase):
"""
Url link with file scheme.
"""
def init (self, base_ref, base_url, parent_url, recursion_level,
aggregate, line, column, page, name, url_encoding, extern):
"""Initialize the scheme."""
super(FileUrl, self).init(base_ref, base_url, parent_url,
recursion_level, aggregate, line, column, page, name, url_encoding, extern)
self.scheme = u'file'
def build_base_url(self):
"""The URL is normed according to the platform:
- the base URL is made an absolute file:// URL
- under Windows platform the drive specifier is normed
"""
if self.base_url is None:
return
base_url = self.base_url
if not (self.parent_url or self.base_ref or base_url.startswith("file:")):
base_url = os.path.expanduser(base_url)
if not is_absolute_path(base_url):
try:
base_url = os.getcwd()+"/"+base_url
except OSError as msg:
# occurs on stale remote filesystems (eg. NFS)
errmsg = _("Could not get current working directory: %(msg)s") % dict(msg=msg)
raise LinkCheckerError(errmsg)
if os.path.isdir(base_url):
base_url += "/"
base_url = "file://"+base_url
if os.name == "nt":
base_url = base_url.replace("\\", "/")
# transform c:/windows into /c|/windows
base_url = re.sub("^file://(/?)([a-zA-Z]):", r"file:///\2|", base_url)
# transform file://path into file:///path
base_url = re.sub("^file://([^/])", r"file:///\1", base_url)
self.base_url = unicode(base_url)
def build_url (self):
"""
Calls super.build_url() and adds a trailing slash to directories.
"""
self.build_base_url()
if self.parent_url is not None:
# URL joining with the parent URL only works if the query
# of the base URL are removed first.
# Otherwise the join function thinks the query is part of
# the file name.
from .urlbase import url_norm
# norm base url - can raise UnicodeError from url.idna_encode()
base_url, is_idn = url_norm(self.base_url, self.encoding)
urlparts = list(urlparse.urlsplit(base_url))
# ignore query part for filesystem urls
urlparts[3] = ''
self.base_url = urlutil.urlunsplit(urlparts)
super(FileUrl, self).build_url()
# ignore query and fragment url parts for filesystem urls
self.urlparts[3] = self.urlparts[4] = ''
if self.is_directory() and not self.urlparts[2].endswith('/'):
self.add_warning(_("Added trailing slash to directory."),
tag=WARN_FILE_MISSING_SLASH)
self.urlparts[2] += '/'
self.url = urlutil.urlunsplit(self.urlparts)
def add_size_info (self):
"""Get size of file content and modification time from filename path."""
if self.is_directory():
# Directory size always differs from the customer index.html
# that is generated. So return without calculating any size.
return
filename = self.get_os_filename()
self.size = fileutil.get_size(filename)
self.modified = datetime.utcfromtimestamp(fileutil.get_mtime(filename))
def check_connection (self):
"""
Try to open the local file. Under NT systems the case sensitivity
is checked.
"""
if (self.parent_url is not None and
not self.parent_url.startswith(u"file:")):
msg = _("local files are only checked without parent URL or when the parent URL is also a file")
raise LinkCheckerError(msg)
if self.is_directory():
self.set_result(_("directory"))
else:
url = fileutil.pathencode(self.url)
self.url_connection = urlopen(url)
self.check_case_sensitivity()
def check_case_sensitivity (self):
"""
Check if url and windows path name match cases
else there might be problems when copying such
files on web servers that are case sensitive.
"""
if os.name != 'nt':
return
path = self.get_os_filename()
realpath = get_nt_filename(path)
if path != realpath:
self.add_warning(_("The URL path %(path)r is not the same as the "
"system path %(realpath)r. You should always use "
"the system path in URLs.") % \
{"path": path, "realpath": realpath},
tag=WARN_FILE_SYSTEM_PATH)
def read_content (self):
"""Return file content, or in case of directories a dummy HTML file
with links to the files."""
if self.is_directory():
data = get_index_html(get_files(self.get_os_filename()))
if isinstance(data, unicode):
data = data.encode("iso8859-1", "ignore")
else:
data = super(FileUrl, self).read_content()
return data
def get_os_filename (self):
"""
Construct os specific file path out of the file:// URL.
@return: file name
@rtype: string
"""
return get_os_filename(self.urlparts[2])
def get_temp_filename (self):
"""Get filename for content to parse."""
return self.get_os_filename()
def is_directory (self):
"""
Check if file is a directory.
@return: True iff file is a directory
@rtype: bool
"""
filename = self.get_os_filename()
return os.path.isdir(filename) and not os.path.islink(filename)
def is_parseable (self):
"""Check if content is parseable for recursion.
@return: True if content is parseable
@rtype: bool
"""
if self.is_directory():
return True
if firefox.has_sqlite and firefox.extension.search(self.url):
return True
if self.content_type in self.ContentMimetypes:
return True
log.debug(LOG_CHECK, "File with content type %r is not parseable.", self.content_type)
return False
def set_content_type (self):
"""Return URL content type, or an empty string if content
type could not be found."""
if self.url:
self.content_type = mimeutil.guess_mimetype(self.url, read=self.get_content)
else:
self.content_type = u""
def get_intern_pattern (self, url=None):
"""Get pattern for intern URL matching.
@return non-empty regex pattern or None
@rtype String or None
"""
if url is None:
url = self.url
if not url:
return None
if url.startswith('file://'):
i = url.rindex('/')
if i > 6:
# remove last filename to make directory internal
url = url[:i+1]
return re.escape(url)
def add_url (self, url, line=0, column=0, page=0, name=u"", base=None):
"""If a local webroot directory is configured, replace absolute URLs
with it. After that queue the URL data for checking."""
webroot = self.aggregate.config["localwebroot"]
if webroot and url and url.startswith(u"/"):
url = webroot + url[1:]
log.debug(LOG_CHECK, "Applied local webroot `%s' to `%s'.", webroot, url)
super(FileUrl, self).add_url(url, line=line, column=column, page=page, name=name, base=base) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.index.processor;
import java.util.HashSet;
import java.util.Set;
import javax.lang.model.element.Element;
import javax.lang.model.element.ElementKind;
/**
* A {@link StereotypesProvider} implementation that provides the
* {@value #STEREOTYPE} stereotype for each package-info.
*
* @author Stephane Nicoll
* @since 5.0
*/
class PackageInfoStereotypesProvider implements StereotypesProvider {
public static final String STEREOTYPE = "package-info";
@Override
public Set<String> getStereotypes(Element element) {
Set<String> stereotypes = new HashSet<>();
if (element.getKind() == ElementKind.PACKAGE) {
stereotypes.add(STEREOTYPE);
}
return stereotypes;
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context-indexer/src/main/java/org/springframework/context/index/processor/PackageInfoStereotypesProvider.java |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Test if the firing number of coincidences after inhibition equals spatial pooler
numActiveColumnsPerInhArea.
TODO: Fix this up to be more unit testy.
"""
import numpy
import unittest2 as unittest
from nupic.research.spatial_pooler import SpatialPooler
numpy.random.seed(100)
class InhibitionObjectTest(unittest.TestCase):
@unittest.skip("Currently fails due to switch from FDRCSpatial2 to SpatialPooler."
"The new SP doesn't have explicit methods to get inhibition.")
# TODO: See https://github.com/numenta/nupic/issues/2071
def testInhibition(self):
"""
Test if the firing number of coincidences after inhibition
equals spatial pooler numActiveColumnsPerInhArea.
"""
# Miscellaneous variables:
# n, w: n, w of encoders
# inputLen: Length of binary input
# synPermConnected: Spatial pooler synPermConnected
# synPermActiveInc: Spatial pooler synPermActiveInc
# connectPct: Initial connect percentage of permanences
# columnDimensions: Number of spatial pooler coincidences
# numActiveColumnsPerInhArea: Spatial pooler numActiveColumnsPerInhArea
# stimulusThreshold: Spatial pooler stimulusThreshold
# spSeed: Spatial pooler for initial permanences
# stimulusThresholdInh: Parameter for inhibition, default value 0.00001
# kDutyCycleFactor: kDutyCycleFactor for dutyCycleTieBreaker in
# Inhibition
# spVerbosity: Verbosity to print other sp initial parameters
# testIter: Testing iterations
n = 100
w = 15
inputLen = 300
columnDimensions = 2048
numActiveColumnsPerInhArea = 40
stimulusThreshold = 0
spSeed = 1956
stimulusThresholdInh = 0.00001
kDutyCycleFactor = 0.01
spVerbosity = 0
testIter = 100
spTest = SpatialPooler(
columnDimensions=(columnDimensions, 1),
inputDimensions=(1, inputLen),
potentialRadius=inputLen / 2,
numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
spVerbosity=spVerbosity,
stimulusThreshold=stimulusThreshold,
seed=spSeed
)
initialPermanence = spTest._initialPermanence()
spTest._masterPotentialM, spTest._masterPermanenceM = (
spTest._makeMasterCoincidences(spTest.numCloneMasters,
spTest._coincRFShape,
spTest.potentialPct,
initialPermanence,
spTest.random))
spTest._updateInhibitionObj()
boostFactors = numpy.ones(columnDimensions)
for i in range(testIter):
spTest._iterNum = i
# random binary input
input_ = numpy.zeros((1, inputLen))
nonzero = numpy.random.random(inputLen)
input_[0][numpy.where (nonzero < float(w)/float(n))] = 1
# overlap step
spTest._computeOverlapsFP(input_,
stimulusThreshold=spTest.stimulusThreshold)
spTest._overlaps *= boostFactors
onCellIndices = numpy.where(spTest._overlaps > 0)
spTest._onCells.fill(0)
spTest._onCells[onCellIndices] = 1
denseOn = spTest._onCells
# update _dutyCycleBeforeInh
spTest.dutyCyclePeriod = min(i + 1, 1000)
spTest._dutyCycleBeforeInh = (
(spTest.dutyCyclePeriod - 1) *
spTest._dutyCycleBeforeInh +denseOn) / spTest.dutyCyclePeriod
dutyCycleTieBreaker = spTest._dutyCycleAfterInh.copy()
dutyCycleTieBreaker *= kDutyCycleFactor
# inhibition step
numOn = spTest._inhibitionObj.compute(
spTest._overlaps + dutyCycleTieBreaker, spTest._onCellIndices,
stimulusThresholdInh, # stimulusThresholdInh
max(spTest._overlaps)/1000, # addToWinners
)
# update _dutyCycleAfterInh
spTest._onCells.fill(0)
onCellIndices = spTest._onCellIndices[0:numOn]
spTest._onCells[onCellIndices] = 1
denseOn = spTest._onCells
spTest._dutyCycleAfterInh = (((spTest.dutyCyclePeriod-1) *
spTest._dutyCycleAfterInh + denseOn) /
spTest.dutyCyclePeriod)
# learning step
spTest._adaptSynapses(onCellIndices, [], input_)
# update boostFactor
spTest._updateBoostFactors()
boostFactors = spTest._firingBoostFactors
# update dutyCycle and boost
if ((spTest._iterNum+1) % 50) == 0:
spTest._updateInhibitionObj()
spTest._updateMinDutyCycles(
spTest._dutyCycleBeforeInh,
spTest.minPctDutyCycleBeforeInh,
spTest._minDutyCycleBeforeInh)
spTest._updateMinDutyCycles(
spTest._dutyCycleAfterInh,
spTest.minPctDutyCycleAfterInh,
spTest._minDutyCycleAfterInh)
# test numOn and spTest.numActiveColumnsPerInhArea
self.assertEqual(numOn, spTest.numActiveColumnsPerInhArea,
"Error at input %s, actual numOn are: %i, "
"numActivePerInhAre is: %s" % (
i, numOn, numActiveColumnsPerInhArea))
if __name__=="__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package phases
import (
"reflect"
"testing"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
)
func TestGetAddonPhaseFlags(t *testing.T) {
tests := []struct {
name string
want []string
}{
{
name: "all",
want: []string{options.CfgPath,
options.KubeconfigPath,
options.KubernetesVersion,
options.ImageRepository,
options.DryRun,
options.APIServerAdvertiseAddress,
options.ControlPlaneEndpoint,
options.APIServerBindPort,
options.NetworkingPodSubnet,
options.FeatureGatesString,
options.NetworkingDNSDomain,
options.NetworkingServiceSubnet,
},
}, {
name: "kube-proxy",
want: []string{options.CfgPath,
options.KubeconfigPath,
options.KubernetesVersion,
options.ImageRepository,
options.DryRun,
options.APIServerAdvertiseAddress,
options.ControlPlaneEndpoint,
options.APIServerBindPort,
options.NetworkingPodSubnet,
},
}, {
name: "coredns",
want: []string{options.CfgPath,
options.KubeconfigPath,
options.KubernetesVersion,
options.ImageRepository,
options.DryRun,
options.FeatureGatesString,
options.NetworkingDNSDomain,
options.NetworkingServiceSubnet,
},
}, {
name: "invalid_name",
want: []string{options.CfgPath,
options.KubeconfigPath,
options.KubernetesVersion,
options.ImageRepository,
options.DryRun,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := getAddonPhaseFlags(tt.name)
if ok := reflect.DeepEqual(got, tt.want); !ok {
t.Errorf("phase init addons.getAddonPhaseFlags() = %v, want %v", got, tt.want)
}
})
}
} | go | github | https://github.com/kubernetes/kubernetes | cmd/kubeadm/app/cmd/phases/init/addons_test.go |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.