code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
import six
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
def _normalize_axis_tuple(axis, ndim):
ret = []
for ax in axis:
ret.append(ax % ndim)
return ret
def _moveaxis(a, source, destination, xp):
if hasattr(xp, 'moveaxis'):
return xp.moveaxis(a, source, destination)
if not all(isinstance(a, int) for a in source):
raise TypeError('int or tuple of int are required.')
if not all(isinstance(a, int) for a in destination):
raise TypeError('int or tuple of int are required.')
if len(source) != len(destination):
raise ValueError('Length of source and destination are '
'different.')
source = _normalize_axis_tuple(source, a.ndim)
destination = _normalize_axis_tuple(destination, a.ndim)
if len(set(source)) != len(source):
raise ValueError('duplicate value in source axis: ({})'.format(
', '.join(map(str, source))))
if len(set(destination)) != len(destination):
raise ValueError('duplicate value in destination axis: ({})'
.format(', '.join(map(str, destination))))
order = [n for n in six.moves.range(a.ndim) if n not in source]
for dest, src in sorted(six.moves.zip(destination, source)):
order.insert(dest, src)
result = a.transpose(order)
return result
class Moveaxis(function_node.FunctionNode):
"""Move axis of an array."""
def __init__(self, source, destination):
if isinstance(source, int):
self.source = (source,)
else:
self.source = source
if isinstance(destination, int):
self.destination = (destination,)
else:
self.destination = destination
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(
in_types[0].dtype.kind == 'f',
)
if self.source is not None:
for axis in self.source:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
if self.destination is not None:
for axis in self.destination:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
def forward(self, inputs):
self.retain_inputs(())
self._in_ndim = inputs[0].ndim
xp = backend.get_array_module(*inputs)
return _moveaxis(inputs[0], self.source, self.destination, xp),
def backward(self, indexes, gy):
return Moveaxis(self.destination, self.source).apply(gy)
def moveaxis(x, source, destination):
"""Move the source axes to the destination.
This function transpose the input ``x`` by moving
the axes ``source`` to the axes ``destination``.
Other axes remain in their original order.
See also :func:`chainer.functions.transpose`,
:func:`chainer.functions.swapaxes`.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
source (int or tuple of int):
Original positions of the axes to move. These must be unique.
destination (int or tuple of int):
Destination positions for each of the original axes.
These must also be unique.
Returns:
~chainer.Variable: Variable whose axis is moved.
.. admonition:: Example
>>> x = np.zeros((2, 3, 4, 5), np.float32)
>>> chainer.functions.moveaxis(x, 0, -1).shape
(3, 4, 5, 2)
>>> chainer.functions.moveaxis(x, (0, 3), (2, 0)).shape
(5, 3, 2, 4)
"""
return Moveaxis(source, destination).apply((x,))[0] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import xmlrpclib
import socket
plc_host='www.planet-lab.eu'
slice_name='certhple_purs1'
auth = { 'AuthMethod' : 'password',
'Username' : '<planetlab user name>',
'AuthString' : '<planetlab user passwd>',
}
api_url="https://%s:443/PLCAPI/"%plc_host
plc_api = xmlrpclib.ServerProxy(api_url,allow_none=True)
# the slice's node ids
node_ids = plc_api.GetSlices(auth,slice_name,['node_ids'])[0]['node_ids']
# get hostname for these nodes
slice_nodes = plc_api.GetNodes(auth,node_ids,['hostname'])
# store in a file
f=open('planetlab.cfg','w')
print >>f,'network = { \n nodes = (';
slsize = len(slice_nodes)
for node in slice_nodes:
slsize = slsize - 1
if(slsize == 0):
print 'For host %s' %(node)
print >>f,' {\n testbed_ip = \"%s\";\n }'%(socket.gethostbyname_ex(node['hostname'])[2][0])
else:
print 'For host %s' %(node)
print >>f,' {\n testbed_ip = \"%s\";\n },'%(socket.gethostbyname_ex(node['hostname'])[2][0])
print >>f,' );\n};';
f.close() | unknown | codeparrot/codeparrot-clean | ||
<!--Copyright 2026 the HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer.
-->
*This model was released on {release_date} and added to Hugging Face Transformers on 2026-01-27.*
# GLM-OCR
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
</div>
## Overview
[GLM-OCR](https://huggingface.co/zai-org/GLM-OCR) is a multimodal OCR (Optical Character Recognition) model designed for complex document understanding from [Z.ai](https://github.com/zai-org/GLM-OCR). The model combines a CogViT visual encoder (pre-trained on large-scale image-text data), a lightweight cross-modal connector with efficient token downsampling, and a GLM-0.5B language decoder.
Key features of GLM-OCR include:
- **Lightweight**: Only 0.9B parameters while achieving state-of-the-art performance (94.62 on OmniDocBench V1.5)
- **Multi-task**: Excels at text recognition, formula recognition, table recognition, and information extraction
- **Multi-modal**: Processes document images for text, formula, and table extraction
This model was contributed by the [zai-org](https://huggingface.co/zai-org) team.
The original code can be found [here](https://github.com/zai-org/GLM-OCR).
## Usage example
### Single image inference
```python
from transformers import AutoProcessor, GlmOcrForConditionalGeneration
import torch
model_id = "zai-org/GLM-OCR"
processor = AutoProcessor.from_pretrained(model_id)
model = GlmOcrForConditionalGeneration.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
)
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg"},
{"type": "text", "text": "Text Recognition:"},
],
}
]
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
output = model.generate(**inputs, max_new_tokens=512)
print(processor.decode(output[0], skip_special_tokens=True))
```
### Batch inference
The model supports batching multiple images for efficient processing.
```python
from transformers import AutoProcessor, GlmOcrForConditionalGeneration
import torch
model_id = "zai-org/GLM-OCR"
processor = AutoProcessor.from_pretrained(model_id)
model = GlmOcrForConditionalGeneration.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
)
# First document
message1 = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg"},
{"type": "text", "text": "Text Recognition:"},
],
}
]
# Second document
message2 = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"},
{"type": "text", "text": "Text Recognition:"},
],
}
]
messages = [message1, message2]
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(model.device)
output = model.generate(**inputs, max_new_tokens=128)
print(processor.batch_decode(output, skip_special_tokens=True))
```
### Flash Attention 2
GLM-OCR supports Flash Attention 2 for faster inference. First, install the latest version of Flash Attention:
```bash
pip install -U flash-attn --no-build-isolation
```
Then load the model with one of the supported kernels of the [kernels-community](https://huggingface.co/kernels-community):
```python
from transformers import GlmOcrForConditionalGeneration
import torch
model = GlmOcrForConditionalGeneration.from_pretrained(
"zai-org/GLM-OCR",
dtype=torch.bfloat16,
attn_implementation="kernels-community/flash-attn2", # other options: kernels-community/vllm-flash-attn3, kernels-community/paged-attention
device_map="auto",
)
```
## GlmOcrConfig
[[autodoc]] GlmOcrConfig
## GlmOcrVisionConfig
[[autodoc]] GlmOcrVisionConfig
## GlmOcrTextConfig
[[autodoc]] GlmOcrTextConfig
## GlmOcrVisionModel
[[autodoc]] GlmOcrVisionModel
- forward
## GlmOcrTextModel
[[autodoc]] GlmOcrTextModel
- forward
## GlmOcrModel
[[autodoc]] GlmOcrModel
- forward
## GlmOcrForConditionalGeneration
[[autodoc]] GlmOcrForConditionalGeneration
- forward | unknown | github | https://github.com/huggingface/transformers | docs/source/en/model_doc/glm_ocr.md |
# frozen_string_literal: true
require "cases/helper"
require "models/post"
require "models/author"
require "models/comment"
require "models/rating"
require "models/member"
require "models/member_type"
require "models/pirate"
require "models/treasure"
require "models/parrot"
require "models/hotel"
require "models/department"
class HasManyThroughDisableJoinsAssociationsTest < ActiveRecord::TestCase
fixtures :posts, :authors, :comments, :pirates, :author_addresses
def setup
@author = authors(:mary)
@post = @author.posts.create(title: "title", body: "body")
@member_type = MemberType.create(name: "club")
@member = Member.create(member_type: @member_type)
@comment = @post.comments.create(body: "text", origin: @member)
@post2 = @author.posts.create(title: "title", body: "body")
@member2 = Member.create(member_type: @member_type)
@comment2 = @post2.comments.create(body: "text", origin: @member2)
@rating1 = @comment.ratings.create(value: 8)
@rating2 = @comment.ratings.create(value: 9)
end
def test_counting_on_disable_joins_through
assert_equal @author.comments.count, @author.no_joins_comments.count
assert_queries_count(2) { @author.no_joins_comments.count }
assert_queries_count(1) { @author.comments.count }
end
def test_counting_on_disable_joins_through_using_custom_foreign_key
assert_equal @author.comments_with_foreign_key.count, @author.no_joins_comments_with_foreign_key.count
assert_queries_count(2) { @author.no_joins_comments_with_foreign_key.count }
assert_queries_count(1) { @author.comments_with_foreign_key.count }
end
def test_pluck_on_disable_joins_through
assert_equal @author.comments.pluck(:id).sort, @author.no_joins_comments.pluck(:id).sort
assert_queries_count(2) { @author.no_joins_comments.pluck(:id) }
assert_queries_count(1) { @author.comments.pluck(:id) }
end
def test_pluck_on_disable_joins_through_using_custom_foreign_key
assert_equal @author.comments_with_foreign_key.pluck(:id).sort, @author.no_joins_comments_with_foreign_key.pluck(:id).sort
assert_queries_count(2) { @author.no_joins_comments_with_foreign_key.pluck(:id) }
assert_queries_count(1) { @author.comments_with_foreign_key.pluck(:id) }
end
def test_fetching_on_disable_joins_through
assert_equal @author.comments.first.id, @author.no_joins_comments.first.id
assert_queries_count(2) { @author.no_joins_comments.first.id }
assert_queries_count(1) { @author.comments.first.id }
end
def test_fetching_on_disable_joins_through_using_custom_foreign_key
assert_equal @author.comments_with_foreign_key.first.id, @author.no_joins_comments_with_foreign_key.first.id
assert_queries_count(2) { @author.no_joins_comments_with_foreign_key.first.id }
assert_queries_count(1) { @author.comments_with_foreign_key.first.id }
end
def test_to_a_on_disable_joins_through
assert_equal @author.comments.sort_by(&:id), @author.no_joins_comments.sort_by(&:id)
@author.reload
assert_queries_count(2) { @author.no_joins_comments.to_a }
assert_queries_count(1) { @author.comments.to_a }
end
def test_appending_on_disable_joins_through
assert_difference(->() { @author.no_joins_comments.reload.size }) do
@post.comments.create(body: "text")
end
assert_queries_count(2) { @author.no_joins_comments.reload.size }
assert_queries_count(1) { @author.comments.reload.size }
end
def test_appending_on_disable_joins_through_using_custom_foreign_key
assert_difference(->() { @author.no_joins_comments_with_foreign_key.reload.size }) do
@post.comments.create(body: "text")
end
assert_queries_count(2) { @author.no_joins_comments_with_foreign_key.reload.size }
assert_queries_count(1) { @author.comments_with_foreign_key.reload.size }
end
def test_empty_on_disable_joins_through
empty_author = authors(:bob)
assert_equal [], assert_queries_count(0) { empty_author.comments.all }
assert_equal [], assert_queries_count(1) { empty_author.no_joins_comments.all }
end
def test_empty_on_disable_joins_through_using_custom_foreign_key
empty_author = authors(:bob)
assert_equal [], assert_queries_count(0) { empty_author.comments_with_foreign_key.all }
assert_equal [], assert_queries_count(1) { empty_author.no_joins_comments_with_foreign_key.all }
end
def test_pluck_on_disable_joins_through_a_through
rating_ids = Rating.where(comment: @comment).pluck(:id).sort
assert_equal rating_ids, assert_queries_count(1) { @author.ratings.pluck(:id).sort }
assert_equal rating_ids, assert_queries_count(3) { @author.no_joins_ratings.pluck(:id).sort }
end
def test_count_on_disable_joins_through_a_through
ratings_count = Rating.where(comment: @comment).count
assert_equal ratings_count, assert_queries_count(1) { @author.ratings.count }
assert_equal ratings_count, assert_queries_count(3) { @author.no_joins_ratings.count }
end
def test_count_on_disable_joins_using_relation_with_scope
assert_equal 2, assert_queries_count(1) { @author.good_ratings.count }
assert_equal 2, assert_queries_count(3) { @author.no_joins_good_ratings.count }
end
def test_to_a_on_disable_joins_with_multiple_scopes
assert_equal [@rating1, @rating2], assert_queries_count(1) { @author.good_ratings.to_a }
assert_equal [@rating1, @rating2], assert_queries_count(3) { @author.no_joins_good_ratings.to_a }
end
def test_preloading_has_many_through_disable_joins
assert_queries_count(3) { Author.all.preload(:good_ratings).map(&:good_ratings) }
assert_queries_count(4) { Author.all.preload(:no_joins_good_ratings).map(&:good_ratings) }
end
def test_polymophic_disable_joins_through_counting
assert_equal 2, assert_queries_count(1) { @author.ordered_members.count }
assert_equal 2, assert_queries_count(3) { @author.no_joins_ordered_members.count }
end
def test_polymophic_disable_joins_through_ordering
assert_equal [@member2, @member], assert_queries_count(1) { @author.ordered_members.to_a }
assert_equal [@member2, @member], assert_queries_count(3) { @author.no_joins_ordered_members.to_a }
end
def test_polymorphic_disable_joins_through_reordering
assert_equal [@member, @member2], assert_queries_count(1) { @author.ordered_members.reorder(id: :asc).to_a }
assert_equal [@member, @member2], assert_queries_count(3) { @author.no_joins_ordered_members.reorder(id: :asc).to_a }
end
def test_polymorphic_disable_joins_through_ordered_scopes
assert_equal [@member2, @member], assert_queries_count(1) { @author.ordered_members.unnamed.to_a }
assert_equal [@member2, @member], assert_queries_count(3) { @author.no_joins_ordered_members.unnamed.to_a }
end
def test_polymorphic_disable_joins_through_ordered_chained_scopes
member3 = Member.create(member_type: @member_type)
member4 = Member.create(member_type: @member_type, name: "named")
@post2.comments.create(body: "text", origin: member3)
@post2.comments.create(body: "text", origin: member4)
assert_equal [member3, @member2, @member], assert_queries_count(1) { @author.ordered_members.unnamed.with_member_type_id(@member_type.id).to_a }
assert_equal [member3, @member2, @member], assert_queries_count(3) { @author.no_joins_ordered_members.unnamed.with_member_type_id(@member_type.id).to_a }
end
def test_polymorphic_disable_joins_through_ordered_scope_limits
assert_equal [@member2], assert_queries_count(1) { @author.ordered_members.unnamed.limit(1).to_a }
assert_equal [@member2], assert_queries_count(3) { @author.no_joins_ordered_members.unnamed.limit(1).to_a }
end
def test_polymorphic_disable_joins_through_ordered_scope_first
assert_equal @member2, assert_queries_count(1) { @author.ordered_members.unnamed.first }
assert_equal @member2, assert_queries_count(3) { @author.no_joins_ordered_members.unnamed.first }
end
def test_order_applied_in_double_join
assert_equal [@member2, @member], assert_queries_count(1) { @author.members.to_a }
assert_equal [@member2, @member], assert_queries_count(3) { @author.no_joins_members.to_a }
end
def test_first_and_scope_applied_in_double_join
assert_equal @member2, assert_queries_count(1) { @author.members.unnamed.first }
assert_equal @member2, assert_queries_count(3) { @author.no_joins_members.unnamed.first }
end
def test_first_and_scope_in_double_join_applies_order_in_memory
disable_joins_sql = capture_sql { @author.no_joins_members.unnamed.first }
assert_no_match(/ORDER BY/, disable_joins_sql.last)
end
def test_limit_and_scope_applied_in_double_join
assert_equal [@member2], assert_queries_count(1) { @author.members.unnamed.limit(1).to_a }
assert_equal [@member2], assert_queries_count(3) { @author.no_joins_members.unnamed.limit(1) }
end
def test_limit_and_scope_in_double_join_applies_limit_in_memory
disable_joins_sql = capture_sql { @author.no_joins_members.unnamed.first }
assert_no_match(/LIMIT 1/, disable_joins_sql.last)
end
end | ruby | github | https://github.com/rails/rails | activerecord/test/cases/associations/has_many_through_disable_joins_associations_test.rb |
import cStringIO, zipfile
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
def compress_kml(kml):
"Returns compressed KMZ from the given KML string."
kmz = cStringIO.StringIO()
zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED)
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
zf.close()
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Renders the response as KML (using the correct MIME type)."
return HttpResponse(loader.render_to_string(*args, **kwargs),
mimetype='application/vnd.google-earth.kml+xml')
def render_to_kmz(*args, **kwargs):
"""
Compresses the KML content and returns as KMZ (using the correct
MIME type).
"""
return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)),
mimetype='application/vnd.google-earth.kmz')
def render_to_text(*args, **kwargs):
"Renders the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs),
mimetype='text/plain') | unknown | codeparrot/codeparrot-clean | ||
"""Alignment with SNAP: http://snap.cs.berkeley.edu/
"""
import os
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.ngsalign import novoalign, postalign
from bcbio.provenance import do
def align(fastq_file, pair_file, index_dir, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted, deduplicated BAM.
TODO: Use streaming with new development version of SNAP to feed into
structural variation preparation de-duplication.
"""
pair_file = pair_file if pair_file else ""
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
assert not data.get("align_split"), "Split alignments not supported with SNAP"
snap = config_utils.get_program("snap", data["config"])
num_cores = data["config"]["algorithm"].get("num_cores", 1)
resources = config_utils.get_resources("snap", data["config"])
rg_info = novoalign.get_rg_info(names)
is_paired = bam.is_paired(fastq_file) if fastq_file.endswith(".bam") else pair_file
if not utils.file_exists(out_file):
with postalign.tobam_cl(data, out_file, is_paired) as (tobam_cl, tx_out_file):
cmd_name = "paired" if is_paired else "single"
cmd = ("{snap} {cmd_name} {index_dir} {fastq_file} {pair_file} "
"-R '{rg_info}' -t {num_cores} -M -o -sam - | ")
do.run(cmd.format(**locals()) + tobam_cl, "SNAP alignment: %s" % names["sample"])
data["work_bam"] = out_file
return data
def align_bam(bam_file, index_dir, names, align_dir, data):
return align(bam_file, None, index_dir, names, align_dir, data)
# Optional galaxy location file. Falls back on remap_index_fn if not found
galaxy_location_file = "snap_indices.loc"
def remap_index_fn(ref_file):
"""Map sequence references to snap reference directory, using standard layout.
"""
snap_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "snap")
assert os.path.exists(snap_dir) and os.path.isdir(snap_dir), snap_dir
return snap_dir | unknown | codeparrot/codeparrot-clean | ||
import {browser, by, element} from 'protractor';
import {bootstrapClientApp, navigateTo, verifyNoBrowserErrors} from './util';
describe('App E2E Tests', () => {
beforeEach(async () => {
// Don't wait for Angular since it is not bootstrapped automatically.
await browser.waitForAngularEnabled(false);
// Load the page without waiting for Angular since it is not bootstrapped automatically.
await navigateTo('');
});
afterEach(async () => {
// Make sure there were no client side errors.
await verifyNoBrowserErrors();
});
it('should reply click event', async () => {
const divElement = element(by.css('#divElement'));
expect(await divElement.getText()).toContain('click not triggered');
// Trigger click
await divElement.click();
// Bootstrap client application
await bootstrapClientApp();
expect(await divElement.getText()).toContain('click triggered');
});
}); | typescript | github | https://github.com/angular/angular | integration/platform-server-hydration/e2e/src/app.e2e-spec.ts |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package forwarding
import (
"bufio"
"bytes"
"net/http"
"os"
"reflect"
"testing"
)
func Test_ForwardedRequest_GenerateParse(t *testing.T) {
testForwardedRequestGenerateParse(t)
}
func Benchmark_ForwardedRequest_GenerateParse_JSON(b *testing.B) {
os.Setenv("VAULT_MESSAGE_TYPE", "json")
var totalSize int64
var numRuns int64
for i := 0; i < b.N; i++ {
totalSize += testForwardedRequestGenerateParse(b)
numRuns++
}
b.Logf("message size per op: %d", totalSize/numRuns)
}
func Benchmark_ForwardedRequest_GenerateParse_JSON_Compressed(b *testing.B) {
os.Setenv("VAULT_MESSAGE_TYPE", "json_compress")
var totalSize int64
var numRuns int64
for i := 0; i < b.N; i++ {
totalSize += testForwardedRequestGenerateParse(b)
numRuns++
}
b.Logf("message size per op: %d", totalSize/numRuns)
}
func Benchmark_ForwardedRequest_GenerateParse_Proto3(b *testing.B) {
os.Setenv("VAULT_MESSAGE_TYPE", "proto3")
var totalSize int64
var numRuns int64
for i := 0; i < b.N; i++ {
totalSize += testForwardedRequestGenerateParse(b)
numRuns++
}
b.Logf("message size per op: %d", totalSize/numRuns)
}
func testForwardedRequestGenerateParse(t testing.TB) int64 {
bodBuf := bytes.NewReader([]byte(`{ "foo": "bar", "zip": { "argle": "bargle", neet: 0 } }`))
req, err := http.NewRequest("FOOBAR", "https://pushit.real.good:9281/snicketysnack?furbleburble=bloopetybloop", bodBuf)
if err != nil {
t.Fatal(err)
}
// We want to get the fields we would expect from an incoming request, so
// we write it out and then read it again
buf1 := bytes.NewBuffer(nil)
err = req.Write(buf1)
if err != nil {
t.Fatal(err)
}
// Read it back in, parsing like a server
bufr1 := bufio.NewReader(buf1)
initialReq, err := http.ReadRequest(bufr1)
if err != nil {
t.Fatal(err)
}
// Generate the request with the forwarded request in the body
req, err = GenerateForwardedHTTPRequest(initialReq, "https://bloopety.bloop:8201")
if err != nil {
t.Fatal(err)
}
// Perform another "round trip"
buf2 := bytes.NewBuffer(nil)
err = req.Write(buf2)
if err != nil {
t.Fatal(err)
}
size := int64(buf2.Len())
bufr2 := bufio.NewReader(buf2)
intreq, err := http.ReadRequest(bufr2)
if err != nil {
t.Fatal(err)
}
// Now extract the forwarded request to generate a final request for processing
finalReq, err := ParseForwardedHTTPRequest(intreq)
if err != nil {
t.Fatal(err)
}
switch {
case initialReq.Method != finalReq.Method:
t.Fatalf("bad method:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
case initialReq.RemoteAddr != finalReq.RemoteAddr:
t.Fatalf("bad remoteaddr:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
case initialReq.Host != finalReq.Host:
t.Fatalf("bad host:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
case !reflect.DeepEqual(initialReq.URL, finalReq.URL):
t.Fatalf("bad url:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq.URL, *finalReq.URL)
case !reflect.DeepEqual(initialReq.Header, finalReq.Header):
t.Fatalf("bad header:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
default:
// Compare bodies
bodBuf.Seek(0, 0)
initBuf := bytes.NewBuffer(nil)
_, err = initBuf.ReadFrom(bodBuf)
if err != nil {
t.Fatal(err)
}
finBuf := bytes.NewBuffer(nil)
_, err = finBuf.ReadFrom(finalReq.Body)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(initBuf.Bytes(), finBuf.Bytes()) {
t.Fatalf("badbody :\ninitialReq:\n%#v\nfinalReq:\n%#v\n", initBuf.Bytes(), finBuf.Bytes())
}
}
return size
} | go | github | https://github.com/hashicorp/vault | helper/forwarding/util_test.go |
/*
* Copyright 2020 Google LLC
*
* Use of this source code is governed by a BSD-style
* license that can be found in the LICENSE file or at
* https://developers.google.com/open-source/licenses/bsd
*/
#ifndef REFTABLE_WRITER_H
#define REFTABLE_WRITER_H
#include "reftable-record.h"
#include <stdint.h>
#include <unistd.h> /* ssize_t */
/* Writing single reftables */
/* reftable_write_options sets options for writing a single reftable. */
struct reftable_write_options {
/* boolean: do not pad out blocks to block size. */
unsigned unpadded : 1;
/* the blocksize. Should be less than 2^24. */
uint32_t block_size;
/* boolean: do not generate a SHA1 => ref index. */
unsigned skip_index_objects : 1;
/* how often to write complete keys in each block. */
uint16_t restart_interval;
/* 4-byte identifier ("sha1", "s256") of the hash.
* Defaults to SHA1 if unset
*/
enum reftable_hash hash_id;
/* Default mode for creating files. If unset, use 0666 (+umask) */
unsigned int default_permissions;
/* boolean: copy log messages exactly. If unset, check that the message
* is a single line, and add '\n' if missing.
*/
unsigned exact_log_message : 1;
/* boolean: Prevent auto-compaction of tables. */
unsigned disable_auto_compact : 1;
/*
* Geometric sequence factor used by auto-compaction to decide which
* tables to compact. Defaults to 2 if unset.
*/
uint8_t auto_compaction_factor;
/*
* The number of milliseconds to wait when trying to lock "tables.list".
* Note that this does not apply to locking individual tables, as these
* should only ever be locked when already holding the "tables.list"
* lock.
*
* Passing 0 will fail immediately when the file is locked, passing a
* negative value will cause us to block indefinitely.
*/
long lock_timeout_ms;
/*
* Optional callback used to fsync files to disk. Falls back to using
* fsync(3P) when unset.
*/
int (*fsync)(int fd);
/*
* Callback function to execute whenever the stack is being reloaded.
* This can be used e.g. to discard cached information that relies on
* the old stack's data. The payload data will be passed as argument to
* the callback.
*/
void (*on_reload)(void *payload);
void *on_reload_payload;
};
/* reftable_block_stats holds statistics for a single block type */
struct reftable_block_stats {
/* total number of entries written */
int entries;
/* total number of key restarts */
uint32_t restarts;
/* total number of blocks */
int blocks;
/* total number of index blocks */
int index_blocks;
/* depth of the index */
int max_index_level;
/* offset of the first block for this type */
uint64_t offset;
/* offset of the top level index block for this type, or 0 if not
* present */
uint64_t index_offset;
};
/* stats holds overall statistics for a single reftable */
struct reftable_stats {
/* total number of blocks written. */
int blocks;
/* stats for ref data */
struct reftable_block_stats ref_stats;
/* stats for the SHA1 to ref map. */
struct reftable_block_stats obj_stats;
/* stats for index blocks */
struct reftable_block_stats idx_stats;
/* stats for log blocks */
struct reftable_block_stats log_stats;
/* disambiguation length of shortened object IDs. */
int object_id_len;
};
struct reftable_writer;
/* Create a new writer. */
int reftable_writer_new(struct reftable_writer **out,
ssize_t (*writer_func)(void *, const void *, size_t),
int (*flush_func)(void *),
void *writer_arg, const struct reftable_write_options *opts);
/*
* Set the range of update indices for the records we will add. When writing a
* table into a stack, the min should be at least
* reftable_stack_next_update_index(), or REFTABLE_API_ERROR is returned.
*
* For transactional updates to a stack, typically min==max, and the
* update_index can be obtained by inspeciting the stack. When converting an
* existing ref database into a single reftable, this would be a range of
* update-index timestamps.
*
* The function should be called before adding any records to the writer. If not
* it will fail with REFTABLE_API_ERROR.
*/
int reftable_writer_set_limits(struct reftable_writer *w, uint64_t min,
uint64_t max);
/*
Add a reftable_ref_record. The record should have names that come after
already added records.
The update_index must be within the limits set by
reftable_writer_set_limits(), or REFTABLE_API_ERROR is returned. It is an
REFTABLE_API_ERROR error to write a ref record after a log record.
*/
int reftable_writer_add_ref(struct reftable_writer *w,
struct reftable_ref_record *ref);
/*
Convenience function to add multiple reftable_ref_records; the function sorts
the records before adding them, reordering the records array passed in.
*/
int reftable_writer_add_refs(struct reftable_writer *w,
struct reftable_ref_record *refs, size_t n);
/*
adds reftable_log_records. Log records are keyed by (refname, decreasing
update_index). The key for the record added must come after the already added
log records.
*/
int reftable_writer_add_log(struct reftable_writer *w,
struct reftable_log_record *log);
/*
Convenience function to add multiple reftable_log_records; the function sorts
the records before adding them, reordering records array passed in.
*/
int reftable_writer_add_logs(struct reftable_writer *w,
struct reftable_log_record *logs, size_t n);
/* reftable_writer_close finalizes the reftable. The writer is retained so
* statistics can be inspected. */
int reftable_writer_close(struct reftable_writer *w);
/* writer_stats returns the statistics on the reftable being written.
This struct becomes invalid when the writer is freed.
*/
const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w);
/* reftable_writer_free deallocates memory for the writer */
void reftable_writer_free(struct reftable_writer *w);
#endif | c | github | https://github.com/git/git | reftable/reftable-writer.h |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import date
from graphql import (
GraphQLObjectType,
GraphQLField,
GraphQLArgument,
GraphQLList,
GraphQLString,
GraphQLInt,
GraphQLFloat,
GraphQLBoolean,
GraphQLNonNull
)
from api.graphql.common import SpotDateType, SpotDatetimeType, SpotIpType, create_spot_node_type, IngestSummaryType
import api.resources.flow as Flow
SuspiciousType = GraphQLObjectType(
name='NetflowSuspiciousType',
fields={
'tstart': GraphQLField(
type=SpotDatetimeType,
description='Time the flow was received by the flow collector',
resolver=lambda root, *_: root.get('tstart')
),
'srcIp': GraphQLField(
type=GraphQLString,
description='Source IP address',
resolver=lambda root, *_: root.get('srcip')
),
'dstIp': GraphQLField(
type=GraphQLString,
description='Destination IP address',
resolver=lambda root, *_: root.get('dstip')
),
'srcPort': GraphQLField(
type=GraphQLInt,
description='Source port',
resolver=lambda root, *_: root.get('sport') or 0
),
'dstPort': GraphQLField(
type=GraphQLInt,
description='Destination port',
resolver=lambda root, *_: root.get('dport') or 0
),
'protocol': GraphQLField(
type=GraphQLString,
description='IP protocol',
resolver=lambda root, *_: root.get('proto')
),
'inPkts': GraphQLField(
type=GraphQLInt,
description='Input packets',
resolver=lambda root, *_: root.get('ipkt') or 0
),
'inBytes': GraphQLField(
type=GraphQLInt,
description='Input bytes',
resolver=lambda root, *_: root.get('ibyt') or 0
),
'outPkts': GraphQLField(
type=GraphQLInt,
description='Output packets',
resolver=lambda root, *_: root.get('opkt') or 0
),
'outBytes': GraphQLField(
type=GraphQLInt,
description='Output bytes',
resolver=lambda root, *_: root.get('obyt') or 0
),
'score': GraphQLField(
type=GraphQLFloat,
description='Spot ML score',
resolver=lambda root, *_: root.get('ml_score') or 0
),
'rank': GraphQLField(
type=GraphQLInt,
description='Spot ML rank',
resolver=lambda root, *_: root.get('rank') or 0
),
'srcIp_isInternal': GraphQLField(
type=GraphQLInt,
description='Internal source IP address context flag',
resolver=lambda root, *_: root.get('srcip_internal')
),
'dstIp_isInternal': GraphQLField(
type=GraphQLInt,
description='Internal destionation IP address context flag',
resolver=lambda root, *_: root.get('dstip_internal')
),
'srcIp_geoloc': GraphQLField(
type=GraphQLString,
description='Source IP geolocation',
resolver=lambda root, *_: root.get('src_geoloc')
),
'dstIp_geoloc': GraphQLField(
type=GraphQLString,
description='Destination IP geolocation',
resolver=lambda root, *_: root.get('dst_geoloc')
),
'srcIp_domain': GraphQLField(
type=GraphQLString,
description='Source IP domain',
resolver=lambda root, *_: root.get('src_domain')
),
'dstIp_domain': GraphQLField(
type=GraphQLString,
description='Destination IP domain',
resolver=lambda root, *_: root.get('dst_domain')
),
'srcIp_rep': GraphQLField(
type=GraphQLString,
description='Source IP reputation metadata',
resolver=lambda root, *_: root.get('src_rep')
),
'dstIp_rep': GraphQLField(
type=GraphQLString,
description='Destination IP reputation metadata',
resolver=lambda root, *_: root.get('dst_rep')
)
}
)
EdgeDetailsType = GraphQLObjectType(
name='NetflowEdgeDetailsType',
fields={
'tstart': GraphQLField(
type=SpotDatetimeType,
description='Time the flow was received by the flow collector',
resolver=lambda root, *_: root.get('tstart')
),
'srcIp': GraphQLField(
type=GraphQLString,
description='Source IP address',
resolver=lambda root, *_: root.get('srcip')
),
'dstIp': GraphQLField(
type=GraphQLString,
description='Destination IP address',
resolver=lambda root, *_: root.get('dstip')
),
'srcPort': GraphQLField(
type=GraphQLString,
description='Source port',
resolver=lambda root, *_: root.get('sport')
),
'dstPort': GraphQLField(
type=GraphQLString,
description='Destination port',
resolver=lambda root, *_: root.get('dport')
),
'protocol': GraphQLField(
type=GraphQLString,
description='IP protocol',
resolver=lambda root, *_: root.get('proto')
),
'flags': GraphQLField(
type=GraphQLString,
description='TCP flags',
resolver=lambda root, *_: root.get('flags')
),
'tos': GraphQLField(
type=GraphQLString,
description='DSCP value',
resolver=lambda root, *_: root.get('tos')
),
'inBytes': GraphQLField(
type=GraphQLInt,
description='Input bytes',
resolver=lambda root, *_: root.get('ibyt') or 0
),
'inPkts': GraphQLField(
type=GraphQLInt,
description='Input packets',
resolver=lambda root, *_: root.get('ipkt') or 0
),
'inIface': GraphQLField(
type=GraphQLString,
description='SNMP input interface id index',
resolver=lambda root, *_: root.get('input')
),
'outIface': GraphQLField(
type=GraphQLString,
description='SNMP output interface id index',
resolver=lambda root, *_: root.get('output')
),
'routerIp': GraphQLField(
type=GraphQLString,
description='Reporting router IP address',
resolver=lambda root, *_: root.get('rip')
),
'outBytes': GraphQLField(
type=GraphQLInt,
description='Output bytes',
resolver=lambda root, *_: root.get('obyt') or 0
),
'outPkts': GraphQLField(
type=GraphQLInt,
description='Output packets',
resolver=lambda root, *_: root.get('opkt') or 0
)
}
)
IpConnectionDetailsType = GraphQLObjectType(
name='NetflowIpConnectionDetailsType',
fields={
'srcIp': GraphQLField(
type=GraphQLString,
description='Source IP address',
resolver=lambda root, *_: root.get('srcip')
),
'dstIp': GraphQLField(
type=GraphQLString,
description='Destination IP address',
resolver=lambda root, *_: root.get('dstip')
),
'inBytes': GraphQLField(
type=GraphQLInt,
description='Input bytes',
resolver=lambda root, *_: root.get('ibyt') or 0
),
'inPkts': GraphQLField(
type=GraphQLInt,
description='Input packets',
resolver=lambda root, *_: root.get('ipkt') or 0
)
}
)
ScoredConnectionType = GraphQLObjectType(
name='NetflowScoredConnectionType',
fields={
'tstart': GraphQLField(
type=SpotDatetimeType,
description='Time the flow was received by the flow collector',
resolver=lambda root, *_: root.get('tstart')
),
'srcIp': GraphQLField(
type=SpotIpType,
description='Source IP address',
resolver=lambda root, *_: root.get('srcip')
),
'srcPort': GraphQLField(
type=GraphQLInt,
description='Source port',
resolver=lambda root, *_: root.get('srcport') or 0
),
'dstIp': GraphQLField(
type=SpotIpType,
description='Destination IP address',
resolver=lambda root, *_: root.get('dstip')
),
'dstPort': GraphQLField(
type=GraphQLInt,
description='Destionation port',
resolver=lambda root, *_: root.get('dstport') or 0
),
'score': GraphQLField(
type=GraphQLInt,
description='Risk score value. 1->High, 2->Medium, 3->Low',
resolver=lambda root, *_: root.get('score') or 0
)
}
)
ThreatDetailsType = GraphQLObjectType(
name='NetflowThreatDetailsType',
fields={
'firstSeen': GraphQLField(
type=SpotDatetimeType,
description='First time two IPs were seen on a particular day of flow traffic data',
resolver=lambda root, *_: root.get('firstseen')
),
'lastSeen': GraphQLField(
type=SpotDatetimeType,
description='Last time two IPs were seen on a particular day of flow traffic data',
resolver=lambda root, *_: root.get('lastseen')
),
'srcIp': GraphQLField(
type=SpotIpType,
description='Source IP address',
resolver=lambda root, *_: root.get('srcip')
),
'dstIp': GraphQLField(
type=SpotIpType,
description='Destination IP address',
resolver=lambda root, *_: root.get('dstip')
),
'srcPort': GraphQLField(
type=GraphQLInt,
description='Source port',
resolver=lambda root, *_: root.get('sport')
),
'dstPort': GraphQLField(
type=GraphQLInt,
description='Destination port',
resolver=lambda root, *_: root.get('dport')
),
'connections': GraphQLField(
type=GraphQLInt,
description='Number of connections on a particular day of flow traffic data',
resolver=lambda root, *_: root.get('conns')
),
'maxPkts': GraphQLField(
type=GraphQLInt,
description='Maximum number of packets tranferred on a single connection',
resolver=lambda root, *_: root.get('maxpkts')
),
'avgPkts': GraphQLField(
type=GraphQLInt,
description='Average number of packets transferred bwteen IPs',
resolver=lambda root, *_: root.get('avgpkts')
),
'maxBytes': GraphQLField(
type=GraphQLInt,
description='Maximum number of bytes tranferred on a single connection',
resolver=lambda root, *_: root.get('maxbyts')
),
'avgBytes': GraphQLField(
type=GraphQLInt,
description='Average number of bytes transferred bwteen IPs',
resolver=lambda root, *_: root.get('avgbyts')
)
}
)
CommentType = GraphQLObjectType(
name='NetflowCommentType',
fields={
'ip': GraphQLField(
type=SpotIpType,
description='High risk IP address',
resolver=lambda root, *_: root.get('ip_threat')
),
'title': GraphQLField(
type=GraphQLString,
description='Threat title',
resolver=lambda root, *_: root.get('title')
),
'text': GraphQLField(
type=GraphQLString,
description='Threat description',
resolver=lambda root, *_: root.get('text')
)
}
)
ThreatsInformationType = GraphQLObjectType(
name='NetflowThreatsType',
fields={
'list': GraphQLField(
type=GraphQLList(ScoredConnectionType),
description='List of suspicious IPs that have been scored',
args={
'date': GraphQLArgument(
type=SpotDateType,
description='A date to use as reference to retrieve the list of scored IPs. Defaults to today'
)
},
resolver=lambda root, args, *
_: Flow.get_scored_connections(date=args.get('date', date.today()))
),
'comments': GraphQLField(
type=GraphQLList(CommentType),
description='A list of comments about threats',
args={
'date': GraphQLArgument(
type=SpotDateType,
description='A date to use as reference to retrieve the list of high risk comments. Defaults to today'
)
},
resolver=lambda root, args, *
_: Flow.story_board(date=args.get('date', date.today()))
)
}
)
IncidentProgressionNodeType = create_spot_node_type(
'NetflowIncidentProgressionNodeType')
ImpactAnalysisNodeType = create_spot_node_type('NetflowImpactAnalysisNodeType', {
'size': GraphQLField(
type=GraphQLInt,
description='Number of inbound, outbound and two-way connections',
resolver=lambda root, *_: root.get('size') or 0
)
})
MapViewGeometryType = GraphQLObjectType(
name='NetflowMapViewGeometryType',
fields={
'coordinates': GraphQLField(
type=GraphQLList(GraphQLFloat),
description='Geo latitude and longitude',
resolver=lambda root, *_: root.get('coordinates')
)
}
)
MapViewPropertiesType = GraphQLObjectType(
name='NetflowMapViewPropertiesType',
fields={
'ip': GraphQLField(
type=SpotIpType,
description='IP',
resolver=lambda root, *_: root.get('ip')
),
'location': GraphQLField(
type=GraphQLString,
description='Name of the IP\'s location',
resolver=lambda root, *_: root.get('location')
),
'type': GraphQLField(
type=GraphQLInt,
description='Property type',
resolver=lambda root, *_: root.get('type')
)
}
)
MapViewIpType = GraphQLObjectType(
name='NetflowMapViewIpType',
fields={
'geometry': GraphQLField(
type=MapViewGeometryType,
description='Geolocalization information',
resolver=lambda root, *_: root.get('geometry')
),
'properties': GraphQLField(
type=MapViewPropertiesType,
description='Metadata',
resolver=lambda root, *_: root.get('properties')
)
}
)
MapViewType = GraphQLObjectType(
name='NetflowMapViewType',
fields={
'srcIps': GraphQLField(
type=GraphQLList(MapViewIpType),
description='A list of source IPs',
resolver=lambda root, *_: root.get('sourceips', [])
),
'dstIps': GraphQLField(
type=GraphQLList(MapViewIpType),
description='A list of destination IPs',
resolver=lambda root, *_: root.get('destips', [])
)
}
)
TimelineType = GraphQLObjectType(
name='NetflowTimelineType',
fields={
'tstart': GraphQLField(
type=GraphQLNonNull(SpotDatetimeType),
description='Connection\'s start time',
resolver=lambda root, *_: root.get('tstart')
),
'tend': GraphQLField(
type=GraphQLNonNull(SpotDatetimeType),
description='Connection\'s end time',
resolver=lambda root, *_: root.get('tend')
),
'srcIp': GraphQLField(
type=GraphQLNonNull(SpotIpType),
description='Source IP address',
resolver=lambda root, *_: root.get('srcip')
),
'dstIp': GraphQLField(
type=GraphQLNonNull(SpotIpType),
description='Destination IP address',
resolver=lambda root, *_: root.get('dstip')
),
'protocol': GraphQLField(
type=GraphQLNonNull(GraphQLString),
description='Connection\'s protocol',
resolver=lambda root, *_: root.get('proto')
),
'srcPort': GraphQLField(
type=GraphQLNonNull(GraphQLInt),
description='Source port',
resolver=lambda root, *_: root.get('sport')
),
'dstPort': GraphQLField(
type=GraphQLNonNull(GraphQLInt),
description='Destination port',
resolver=lambda root, *_: root.get('dport')
),
'pkts': GraphQLField(
type=GraphQLNonNull(GraphQLInt),
description='Packets tranferred between IPs',
resolver=lambda root, *_: root.get('ipkt')
),
'bytes': GraphQLField(
type=GraphQLNonNull(GraphQLInt),
description='Bytes tranferred between IPs',
resolver=lambda root, *_: root.get('ibyt')
)
}
)
ThreatInformationType = GraphQLObjectType(
name='NetflowThreatInformation',
fields={
'details': GraphQLField(
type=GraphQLList(ThreatDetailsType),
description='Detailed information about a high risk IP',
args={
'date': GraphQLArgument(
type=SpotDateType,
description='A date to use as reference for high rist IP information. Defaults to today'
),
'ip': GraphQLArgument(
type=GraphQLNonNull(SpotIpType),
description='Suspicious IP'
)
},
resolver=lambda root, args, *
_: Flow.expanded_search(date=args.get('date', date.today()), ip=args.get('ip'))
),
'incidentProgression': GraphQLField(
type=IncidentProgressionNodeType,
description='Details for the type of connections that conform the activity related to the threat',
args={
'date': GraphQLArgument(
type=SpotDateType,
description='A date to use as reference for incident progression information. Defaults to today'
),
'ip': GraphQLArgument(
type=GraphQLNonNull(SpotIpType),
description='Suspicious IP'
)
},
resolver=lambda root, args, *
_: Flow.incident_progression(date=args.get('date', date.today()), ip=args.get('ip'))
),
'impactAnalysis': GraphQLField(
type=ImpactAnalysisNodeType,
description='Contains the number of inbound, outbound and two-way connections found related to the suspicious IP',
args={
'date': GraphQLArgument(
type=SpotDateType,
description='A date to use as reference for impact analysis information. Defaults to today'
),
'ip': GraphQLArgument(
type=GraphQLNonNull(SpotIpType),
description='Suspicious IP'
)
},
resolver=lambda root, args, *
_: Flow.impact_analysis(date=args.get('date', date.today()), ip=args.get('ip'))
),
'geoLocalization': GraphQLField(
type=MapViewType,
description='Gelocalization info about the IPs related to this threat',
args={
'date': GraphQLArgument(
type=SpotDateType,
description='A date to use as reference for geo localization information. Defaults to today'
),
'ip': GraphQLArgument(
type=GraphQLNonNull(SpotIpType),
description='Suspicious IP'
)
},
resolver=lambda root, args, *
_: Flow.sc_geo(date=args.get('date', date.today()), ip=args.get('ip'))
),
'timeline': GraphQLField(
type=GraphQLList(TimelineType),
description='Lists \'clusters\' of inbound connections to the IP, grouped by time; showing an overall idea of the times during the day with the most activity',
args={
'date': GraphQLArgument(
type=SpotDateType,
description='A date to use as reference for time line information. Defaults to today'
),
'ip': GraphQLArgument(
type=GraphQLNonNull(SpotIpType),
description='Suspicious Ip'
)
},
resolver=lambda root, args, *
_: Flow.time_line(date=args.get('date', date.today()), ip=args.get('ip'))
)
}
)
QueryType = GraphQLObjectType(
name='NetflowQueryType',
fields={
'suspicious': GraphQLField(
type=GraphQLList(SuspiciousType),
description='Flow suspicious connections',
args={
'date': GraphQLArgument(
type=SpotDateType,
description='A date to use as a reference for suspicous connections. Defaults to today'
),
'ip': GraphQLArgument(
type=SpotIpType,
description='IP of interest'
)
},
resolver=lambda root, args, *
_: Flow.suspicious_connections(date=args.get('date', date.today()), ip=args.get('ip'))
),
'edgeDetails': GraphQLField(
type=GraphQLList(EdgeDetailsType),
description='Flow activity between two IPs around a particular moment in time',
args={
'tstart': GraphQLArgument(
type=GraphQLNonNull(SpotDatetimeType),
description='Time of interest'
),
'srcIp': GraphQLArgument(
type=GraphQLNonNull(SpotIpType),
description='Source IP address'
),
'dstIp': GraphQLArgument(
type=GraphQLNonNull(SpotIpType),
description='Destination IP address'
)
},
resolver=lambda root, args, *_: Flow.details(
date=args.get('tstart'),
src_ip=args.get('srcIp'),
dst_ip=args.get('dstIp'))
),
'ipDetails': GraphQLField(
type=GraphQLList(IpConnectionDetailsType),
description='Flow activity details in between IP of interest and other suspicious IPs',
args={
'date': GraphQLArgument(
type=SpotDateType,
description='A date to use as reference for IP network activity details. Defaults to today'
),
'ip': GraphQLArgument(
type=GraphQLNonNull(SpotIpType),
description='IP address of interest'
)
},
resolver=lambda root, args, *
_: Flow.chord_details(date=args.get('date', date.today()), ip=args.get('ip'))
),
'threats': GraphQLField(
type=ThreatsInformationType,
description='Advanced information about threats',
resolver=lambda *_: {}
),
'threat': GraphQLField(
type=ThreatInformationType,
description='Advanced information about a single threat',
resolver=lambda *_: {}
),
'ingestSummary': GraphQLField(
type=GraphQLList(IngestSummaryType),
description='Summary of ingested flows in range',
args={
'startDate': GraphQLArgument(
type=GraphQLNonNull(SpotDateType),
description='Start date'
),
'endDate': GraphQLArgument(
type=GraphQLNonNull(SpotDateType),
description='End date'
)
},
resolver=lambda root, args, *_: Flow.ingest_summary(start_date=args.get('startDate'), end_date=args.get('endDate'))
)
}
)
TYPES = [] | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "library-panel-repeat-options-test",
"labels": {
"test": "library-panel-repeat"
}
},
"spec": {
"annotations": [],
"cursorSync": "Off",
"description": "Testing library panel repeat options migration from v1beta1 to v2alpha1",
"editable": true,
"elements": {
"panel-1": {
"kind": "LibraryPanel",
"spec": {
"id": 1,
"title": "Library Panel with Horizontal Repeat",
"libraryPanel": {
"name": "Library Panel with Horizontal Repeat",
"uid": "lib-panel-repeat-h"
}
}
},
"panel-2": {
"kind": "LibraryPanel",
"spec": {
"id": 2,
"title": "Library Panel with Vertical Repeat",
"libraryPanel": {
"name": "Library Panel with Vertical Repeat",
"uid": "lib-panel-repeat-v"
}
}
},
"panel-3": {
"kind": "LibraryPanel",
"spec": {
"id": 3,
"title": "Library Panel Instance Override",
"libraryPanel": {
"name": "Library Panel with Horizontal Repeat",
"uid": "lib-panel-repeat-h"
}
}
},
"panel-4": {
"kind": "LibraryPanel",
"spec": {
"id": 4,
"title": "Library Panel without Repeat",
"libraryPanel": {
"name": "Library Panel without Repeat",
"uid": "lib-panel-no-repeat"
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-1"
},
"repeat": {
"mode": "variable",
"value": "server",
"direction": "h",
"maxPerRow": 3
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 8,
"width": 6,
"height": 4,
"element": {
"kind": "ElementReference",
"name": "panel-2"
},
"repeat": {
"mode": "variable",
"value": "datacenter",
"direction": "v"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 6,
"y": 8,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-3"
},
"repeat": {
"mode": "variable",
"value": "instance-var",
"direction": "v",
"maxPerRow": 5
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 12,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"test",
"library-panels",
"repeat"
],
"timeSettings": {
"timezone": "browser",
"from": "now-1h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Library Panel Repeat Options Test Dashboard",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/v1beta1.library-panel-repeat-options.v2alpha1.json |
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
class MporaIE(InfoExtractor):
_VALID_URL = r'^https?://(www\.)?mpora\.(?:com|de)/videos/(?P<id>[^?#/]+)'
IE_NAME = 'MPORA'
_TEST = {
'url': 'http://mpora.de/videos/AAdo8okx4wiz/embed?locale=de',
'file': 'AAdo8okx4wiz.mp4',
'md5': 'a7a228473eedd3be741397cf452932eb',
'info_dict': {
'title': 'Katy Curd - Winter in the Forest',
'duration': 416,
'uploader': 'Peter Newman Media',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
data_json = self._search_regex(
r"new FM\.Player\('[^']+',\s*(\{.*?)\).player;", webpage, 'json')
data = json.loads(data_json)
uploader = data['info_overlay'].get('username')
duration = data['video']['duration'] // 1000
thumbnail = data['video']['encodings']['sd']['poster']
title = data['info_overlay']['title']
formats = []
for encoding_id, edata in data['video']['encodings'].items():
for src in edata['sources']:
width_str = self._search_regex(
r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'],
False, default=None)
vcodec = src['type'].partition('/')[2]
formats.append({
'format_id': encoding_id + '-' + vcodec,
'url': src['src'],
'vcodec': vcodec,
'width': int_or_none(width_str),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'uploader': uploader,
'duration': duration,
'thumbnail': thumbnail,
} | unknown | codeparrot/codeparrot-clean | ||
from __future__ import annotations
import asyncio
import contextlib
import logging
import pprint
import signal
import warnings
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, TypeVar
from twisted.internet.defer import Deferred, DeferredList, inlineCallbacks
from scrapy import Spider
from scrapy.addons import AddonManager
from scrapy.core.engine import ExecutionEngine
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.extension import ExtensionManager
from scrapy.settings import Settings, overridden_settings
from scrapy.signalmanager import SignalManager
from scrapy.spiderloader import SpiderLoaderProtocol, get_spider_loader
from scrapy.utils.defer import deferred_from_coro
from scrapy.utils.log import (
configure_logging,
get_scrapy_root_handler,
install_scrapy_root_handler,
log_reactor_info,
log_scrapy_info,
)
from scrapy.utils.misc import build_from_crawler, load_object
from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
from scrapy.utils.reactor import (
_asyncio_reactor_path,
install_reactor,
is_asyncio_reactor_installed,
is_reactor_installed,
set_asyncio_event_loop,
verify_installed_asyncio_event_loop,
verify_installed_reactor,
)
from scrapy.utils.reactorless import install_reactor_import_hook
if TYPE_CHECKING:
from collections.abc import Awaitable, Generator, Iterable
from scrapy.logformatter import LogFormatter
from scrapy.statscollectors import StatsCollector
from scrapy.utils.request import RequestFingerprinterProtocol
logger = logging.getLogger(__name__)
_T = TypeVar("_T")
class Crawler:
def __init__(
self,
spidercls: type[Spider],
settings: dict[str, Any] | Settings | None = None,
init_reactor: bool = False,
):
if isinstance(spidercls, Spider):
raise ValueError("The spidercls argument must be a class, not an object")
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
self.spidercls: type[Spider] = spidercls
self.settings: Settings = settings.copy()
self.spidercls.update_settings(self.settings)
self._update_root_log_handler()
self.addons: AddonManager = AddonManager(self)
self.signals: SignalManager = SignalManager(self)
self._init_reactor: bool = init_reactor
self.crawling: bool = False
self._started: bool = False
self.extensions: ExtensionManager | None = None
self.stats: StatsCollector | None = None
self.logformatter: LogFormatter | None = None
self.request_fingerprinter: RequestFingerprinterProtocol | None = None
self.spider: Spider | None = None
self.engine: ExecutionEngine | None = None
def _update_root_log_handler(self) -> None:
if get_scrapy_root_handler() is not None:
# scrapy root handler already installed: update it with new settings
install_scrapy_root_handler(self.settings)
def _apply_settings(self) -> None:
if self.settings.frozen:
return
self.addons.load_settings(self.settings)
self.stats = load_object(self.settings["STATS_CLASS"])(self)
lf_cls: type[LogFormatter] = load_object(self.settings["LOG_FORMATTER"])
self.logformatter = lf_cls.from_crawler(self)
self.request_fingerprinter = build_from_crawler(
load_object(self.settings["REQUEST_FINGERPRINTER_CLASS"]),
self,
)
use_reactor = self.settings.getbool("TWISTED_ENABLED")
if use_reactor:
reactor_class: str = self.settings["TWISTED_REACTOR"]
event_loop: str = self.settings["ASYNCIO_EVENT_LOOP"]
if self._init_reactor:
# this needs to be done after the spider settings are merged,
# but before something imports twisted.internet.reactor
if reactor_class:
install_reactor(reactor_class, event_loop)
else:
from twisted.internet import reactor # noqa: F401
if reactor_class:
verify_installed_reactor(reactor_class)
if is_asyncio_reactor_installed() and event_loop:
verify_installed_asyncio_event_loop(event_loop)
if self._init_reactor or reactor_class:
log_reactor_info()
else:
logger.debug("Not using a Twisted reactor")
self._apply_reactorless_default_settings()
self.extensions = ExtensionManager.from_crawler(self)
self.settings.freeze()
d = dict(overridden_settings(self.settings))
logger.info(
"Overridden settings:\n%(settings)s", {"settings": pprint.pformat(d)}
)
def _apply_reactorless_default_settings(self):
"""Change some setting defaults when not using a Twisted reactor.
Some settings need different defaults when using and not using a
reactor, but as we can't put this logic into default_settings.py we
change them here when the reactor is not used.
"""
self.settings.set("TELNETCONSOLE_ENABLED", False, priority="default")
# Cannot use @deferred_f_from_coro_f because that relies on the reactor
# being installed already, which is done within _apply_settings(), inside
# this method.
@inlineCallbacks
def crawl(self, *args: Any, **kwargs: Any) -> Generator[Deferred[Any], Any, None]:
"""Start the crawler by instantiating its spider class with the given
*args* and *kwargs* arguments, while setting the execution engine in
motion. Should be called only once.
Return a deferred that is fired when the crawl is finished.
"""
if self.crawling:
raise RuntimeError("Crawling already taking place")
if self._started:
raise RuntimeError(
"Cannot run Crawler.crawl() more than once on the same instance."
)
self.crawling = self._started = True
try:
self.spider = self._create_spider(*args, **kwargs)
self._apply_settings()
self._update_root_log_handler()
self.engine = self._create_engine()
yield deferred_from_coro(self.engine.open_spider_async())
yield deferred_from_coro(self.engine.start_async())
except Exception:
self.crawling = False
if self.engine is not None:
yield deferred_from_coro(self.engine.close_async())
raise
async def crawl_async(self, *args: Any, **kwargs: Any) -> None:
"""Start the crawler by instantiating its spider class with the given
*args* and *kwargs* arguments, while setting the execution engine in
motion. Should be called only once.
.. versionadded:: 2.14
Complete when the crawl is finished.
"""
if self.crawling:
raise RuntimeError("Crawling already taking place")
if self._started:
raise RuntimeError(
"Cannot run Crawler.crawl_async() more than once on the same instance."
)
self.crawling = self._started = True
try:
self.spider = self._create_spider(*args, **kwargs)
self._apply_settings()
self._update_root_log_handler()
self.engine = self._create_engine()
await self.engine.open_spider_async()
await self.engine.start_async()
except Exception:
self.crawling = False
if self.engine is not None:
await self.engine.close_async()
raise
def _create_spider(self, *args: Any, **kwargs: Any) -> Spider:
return self.spidercls.from_crawler(self, *args, **kwargs)
def _create_engine(self) -> ExecutionEngine:
return ExecutionEngine(self, lambda _: self.stop_async())
def stop(self) -> Deferred[None]:
"""Start a graceful stop of the crawler and return a deferred that is
fired when the crawler is stopped."""
warnings.warn(
"Crawler.stop() is deprecated, use stop_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.stop_async())
async def stop_async(self) -> None:
"""Start a graceful stop of the crawler and complete when the crawler is stopped.
.. versionadded:: 2.14
"""
if self.crawling:
self.crawling = False
assert self.engine
if self.engine.running:
await self.engine.stop_async()
@staticmethod
def _get_component(
component_class: type[_T], components: Iterable[Any]
) -> _T | None:
for component in components:
if isinstance(component, component_class):
return component
return None
def get_addon(self, cls: type[_T]) -> _T | None:
"""Return the run-time instance of an :ref:`add-on <topics-addons>` of
the specified class or a subclass, or ``None`` if none is found.
.. versionadded:: 2.12
"""
return self._get_component(cls, self.addons.addons)
def get_downloader_middleware(self, cls: type[_T]) -> _T | None:
"""Return the run-time instance of a :ref:`downloader middleware
<topics-downloader-middleware>` of the specified class or a subclass,
or ``None`` if none is found.
.. versionadded:: 2.12
This method can only be called after the crawl engine has been created,
e.g. at signals :signal:`engine_started` or :signal:`spider_opened`.
"""
if not self.engine:
raise RuntimeError(
"Crawler.get_downloader_middleware() can only be called after "
"the crawl engine has been created."
)
return self._get_component(cls, self.engine.downloader.middleware.middlewares)
def get_extension(self, cls: type[_T]) -> _T | None:
"""Return the run-time instance of an :ref:`extension
<topics-extensions>` of the specified class or a subclass,
or ``None`` if none is found.
.. versionadded:: 2.12
This method can only be called after the extension manager has been
created, e.g. at signals :signal:`engine_started` or
:signal:`spider_opened`.
"""
if not self.extensions:
raise RuntimeError(
"Crawler.get_extension() can only be called after the "
"extension manager has been created."
)
return self._get_component(cls, self.extensions.middlewares)
def get_item_pipeline(self, cls: type[_T]) -> _T | None:
"""Return the run-time instance of a :ref:`item pipeline
<topics-item-pipeline>` of the specified class or a subclass, or
``None`` if none is found.
.. versionadded:: 2.12
This method can only be called after the crawl engine has been created,
e.g. at signals :signal:`engine_started` or :signal:`spider_opened`.
"""
if not self.engine:
raise RuntimeError(
"Crawler.get_item_pipeline() can only be called after the "
"crawl engine has been created."
)
return self._get_component(cls, self.engine.scraper.itemproc.middlewares)
def get_spider_middleware(self, cls: type[_T]) -> _T | None:
"""Return the run-time instance of a :ref:`spider middleware
<topics-spider-middleware>` of the specified class or a subclass, or
``None`` if none is found.
.. versionadded:: 2.12
This method can only be called after the crawl engine has been created,
e.g. at signals :signal:`engine_started` or :signal:`spider_opened`.
"""
if not self.engine:
raise RuntimeError(
"Crawler.get_spider_middleware() can only be called after the "
"crawl engine has been created."
)
return self._get_component(cls, self.engine.scraper.spidermw.middlewares)
class CrawlerRunnerBase(ABC):
def __init__(self, settings: dict[str, Any] | Settings | None = None):
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
AddonManager.load_pre_crawler_settings(settings)
self.settings: Settings = settings
self.spider_loader: SpiderLoaderProtocol = get_spider_loader(settings)
self._crawlers: set[Crawler] = set()
self.bootstrap_failed = False
@property
def crawlers(self) -> set[Crawler]:
"""Set of :class:`crawlers <scrapy.crawler.Crawler>` started by
:meth:`crawl` and managed by this class."""
return self._crawlers
def create_crawler(
self, crawler_or_spidercls: type[Spider] | str | Crawler
) -> Crawler:
"""
Return a :class:`~scrapy.crawler.Crawler` object.
* If ``crawler_or_spidercls`` is a Crawler, it is returned as-is.
* If ``crawler_or_spidercls`` is a Spider subclass, a new Crawler
is constructed for it.
* If ``crawler_or_spidercls`` is a string, this function finds
a spider with this name in a Scrapy project (using spider loader),
then creates a Crawler instance for it.
"""
if isinstance(crawler_or_spidercls, Spider):
raise ValueError(
"The crawler_or_spidercls argument cannot be a spider object, "
"it must be a spider class (or a Crawler object)"
)
if isinstance(crawler_or_spidercls, Crawler):
return crawler_or_spidercls
return self._create_crawler(crawler_or_spidercls)
def _create_crawler(self, spidercls: str | type[Spider]) -> Crawler:
if isinstance(spidercls, str):
spidercls = self.spider_loader.load(spidercls)
return Crawler(spidercls, self.settings)
@abstractmethod
def crawl(
self,
crawler_or_spidercls: type[Spider] | str | Crawler,
*args: Any,
**kwargs: Any,
) -> Awaitable[None]:
raise NotImplementedError
class CrawlerRunner(CrawlerRunnerBase):
"""
This is a convenient helper class that keeps track of, manages and runs
crawlers inside an already setup :mod:`~twisted.internet.reactor`.
The CrawlerRunner object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
This class provides Deferred-based APIs. Use :class:`AsyncCrawlerRunner`
for modern coroutine APIs.
"""
def __init__(self, settings: dict[str, Any] | Settings | None = None):
super().__init__(settings)
if not self.settings.getbool("TWISTED_ENABLED"):
raise RuntimeError(
f"{type(self).__name__} doesn't support TWISTED_ENABLED=False."
)
self._active: set[Deferred[None]] = set()
def crawl(
self,
crawler_or_spidercls: type[Spider] | str | Crawler,
*args: Any,
**kwargs: Any,
) -> Deferred[None]:
"""
Run a crawler with the provided arguments.
It will call the given Crawler's :meth:`~Crawler.crawl` method, while
keeping track of it so it can be stopped later.
If ``crawler_or_spidercls`` isn't a :class:`~scrapy.crawler.Crawler`
instance, this method will try to create one using this parameter as
the spider class given to it.
Returns a deferred that is fired when the crawling is finished.
:param crawler_or_spidercls: already created crawler, or a spider class
or spider's name inside the project to create it
:type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
:class:`~scrapy.spiders.Spider` subclass or string
:param args: arguments to initialize the spider
:param kwargs: keyword arguments to initialize the spider
"""
if isinstance(crawler_or_spidercls, Spider):
raise ValueError(
"The crawler_or_spidercls argument cannot be a spider object, "
"it must be a spider class (or a Crawler object)"
)
crawler = self.create_crawler(crawler_or_spidercls)
return self._crawl(crawler, *args, **kwargs)
@inlineCallbacks
def _crawl(
self, crawler: Crawler, *args: Any, **kwargs: Any
) -> Generator[Deferred[Any], Any, None]:
self.crawlers.add(crawler)
d = crawler.crawl(*args, **kwargs)
self._active.add(d)
try:
yield d
finally:
self.crawlers.discard(crawler)
self._active.discard(d)
self.bootstrap_failed |= not getattr(crawler, "spider", None)
def stop(self) -> Deferred[Any]:
"""
Stops simultaneously all the crawling jobs taking place.
Returns a deferred that is fired when they all have ended.
"""
return DeferredList(deferred_from_coro(c.stop_async()) for c in self.crawlers)
@inlineCallbacks
def join(self) -> Generator[Deferred[Any], Any, None]:
"""
join()
Returns a deferred that is fired when all managed :attr:`crawlers` have
completed their executions.
"""
while self._active:
yield DeferredList(self._active)
class AsyncCrawlerRunner(CrawlerRunnerBase):
"""
This is a convenient helper class that keeps track of, manages and runs
crawlers inside an already setup :mod:`~twisted.internet.reactor`.
The AsyncCrawlerRunner object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
This class provides coroutine APIs. It requires
:class:`~twisted.internet.asyncioreactor.AsyncioSelectorReactor`.
"""
def __init__(self, settings: dict[str, Any] | Settings | None = None):
super().__init__(settings)
self._active: set[asyncio.Task[None]] = set()
def crawl(
self,
crawler_or_spidercls: type[Spider] | str | Crawler,
*args: Any,
**kwargs: Any,
) -> asyncio.Task[None]:
"""
Run a crawler with the provided arguments.
It will call the given Crawler's :meth:`~Crawler.crawl` method, while
keeping track of it so it can be stopped later.
If ``crawler_or_spidercls`` isn't a :class:`~scrapy.crawler.Crawler`
instance, this method will try to create one using this parameter as
the spider class given to it.
Returns a :class:`~asyncio.Task` object which completes when the
crawling is finished.
:param crawler_or_spidercls: already created crawler, or a spider class
or spider's name inside the project to create it
:type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
:class:`~scrapy.spiders.Spider` subclass or string
:param args: arguments to initialize the spider
:param kwargs: keyword arguments to initialize the spider
"""
if isinstance(crawler_or_spidercls, Spider):
raise ValueError(
"The crawler_or_spidercls argument cannot be a spider object, "
"it must be a spider class (or a Crawler object)"
)
if self.settings.getbool("TWISTED_ENABLED"):
if not is_asyncio_reactor_installed():
raise RuntimeError(
f"When TWISTED_ENABLED is True, {type(self).__name__} "
f"requires that the installed Twisted reactor is "
f'"twisted.internet.asyncioreactor.AsyncioSelectorReactor".'
)
elif is_reactor_installed():
raise RuntimeError(
"TWISTED_ENABLED is False but a Twisted reactor is installed."
)
crawler = self.create_crawler(crawler_or_spidercls)
return self._crawl(crawler, *args, **kwargs)
def _crawl(self, crawler: Crawler, *args: Any, **kwargs: Any) -> asyncio.Task[None]:
# At this point the asyncio loop has been installed either by the user
# or by AsyncCrawlerProcess (but it isn't running yet, so no asyncio.create_task()).
loop = asyncio.get_event_loop()
self.crawlers.add(crawler)
task = loop.create_task(crawler.crawl_async(*args, **kwargs))
self._active.add(task)
def _done(_: asyncio.Task[None]) -> None:
self.crawlers.discard(crawler)
self._active.discard(task)
self.bootstrap_failed |= not getattr(crawler, "spider", None)
task.add_done_callback(_done)
return task
async def stop(self) -> None:
"""
Stops simultaneously all the crawling jobs taking place.
Completes when they all have ended.
"""
if self.crawlers:
await asyncio.wait(
[asyncio.create_task(c.stop_async()) for c in self.crawlers]
)
async def join(self) -> None:
"""
Completes when all managed :attr:`crawlers` have completed their
executions.
"""
while self._active:
await asyncio.wait(self._active)
class CrawlerProcessBase(CrawlerRunnerBase):
def __init__(
self,
settings: dict[str, Any] | Settings | None = None,
install_root_handler: bool = True,
):
super().__init__(settings)
configure_logging(self.settings, install_root_handler)
log_scrapy_info(self.settings)
@abstractmethod
def start(
self, stop_after_crawl: bool = True, install_signal_handlers: bool = True
) -> None:
raise NotImplementedError
def _signal_shutdown(self, signum: int, _: Any) -> None:
from twisted.internet import reactor
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
logger.info(
"Received %(signame)s, shutting down gracefully. Send again to force ",
{"signame": signame},
)
reactor.callFromThread(self._graceful_stop_reactor)
def _signal_kill(self, signum: int, _: Any) -> None:
from twisted.internet import reactor
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
logger.info(
"Received %(signame)s twice, forcing unclean shutdown", {"signame": signame}
)
reactor.callFromThread(self._stop_reactor)
def _setup_reactor(self, install_signal_handlers: bool) -> None:
from twisted.internet import reactor
resolver_class = load_object(self.settings["DNS_RESOLVER"])
# We pass self, which is CrawlerProcess, instead of Crawler here,
# which works because the default resolvers only use crawler.settings.
resolver = build_from_crawler(resolver_class, self, reactor=reactor) # type: ignore[arg-type]
resolver.install_on_reactor()
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint("REACTOR_THREADPOOL_MAXSIZE"))
reactor.addSystemEventTrigger("before", "shutdown", self._stop_dfd)
if install_signal_handlers:
reactor.addSystemEventTrigger(
"after", "startup", install_shutdown_handlers, self._signal_shutdown
)
@abstractmethod
def _stop_dfd(self) -> Deferred[Any]:
raise NotImplementedError
@inlineCallbacks
def _graceful_stop_reactor(self) -> Generator[Deferred[Any], Any, None]:
try:
yield self._stop_dfd()
finally:
self._stop_reactor()
def _stop_reactor(self, _: Any = None) -> None:
from twisted.internet import reactor
# raised if already stopped or in shutdown stage
with contextlib.suppress(RuntimeError):
reactor.stop()
class CrawlerProcess(CrawlerProcessBase, CrawlerRunner):
"""
A class to run multiple scrapy crawlers in a process simultaneously.
This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
for starting a :mod:`~twisted.internet.reactor` and handling shutdown
signals, like the keyboard interrupt command Ctrl-C. It also configures
top-level logging.
This utility should be a better fit than
:class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
:mod:`~twisted.internet.reactor` within your application.
The CrawlerProcess object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
:param install_root_handler: whether to install root logging handler
(default: True)
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
This class provides Deferred-based APIs. Use :class:`AsyncCrawlerProcess`
for modern coroutine APIs.
"""
def __init__(
self,
settings: dict[str, Any] | Settings | None = None,
install_root_handler: bool = True,
):
super().__init__(settings, install_root_handler)
self._initialized_reactor: bool = False
logger.debug("Using CrawlerProcess")
def _create_crawler(self, spidercls: type[Spider] | str) -> Crawler:
if isinstance(spidercls, str):
spidercls = self.spider_loader.load(spidercls)
init_reactor = not self._initialized_reactor
self._initialized_reactor = True
return Crawler(spidercls, self.settings, init_reactor=init_reactor)
def _stop_dfd(self) -> Deferred[Any]:
return self.stop()
def start(
self, stop_after_crawl: bool = True, install_signal_handlers: bool = True
) -> None:
"""
This method starts a :mod:`~twisted.internet.reactor`, adjusts its pool
size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache
based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
If ``stop_after_crawl`` is True, the reactor will be stopped after all
crawlers have finished, using :meth:`join`.
:param bool stop_after_crawl: stop or not the reactor when all
crawlers have finished
:param bool install_signal_handlers: whether to install the OS signal
handlers from Twisted and Scrapy (default: True)
"""
from twisted.internet import reactor
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(self._stop_reactor)
self._setup_reactor(install_signal_handlers)
reactor.run(installSignalHandlers=install_signal_handlers) # blocking call
class AsyncCrawlerProcess(CrawlerProcessBase, AsyncCrawlerRunner):
"""
A class to run multiple scrapy crawlers in a process simultaneously.
This class extends :class:`~scrapy.crawler.AsyncCrawlerRunner` by adding support
for starting a :mod:`~twisted.internet.reactor` and handling shutdown
signals, like the keyboard interrupt command Ctrl-C. It also configures
top-level logging.
This utility should be a better fit than
:class:`~scrapy.crawler.AsyncCrawlerRunner` if you aren't running another
:mod:`~twisted.internet.reactor` within your application.
The AsyncCrawlerProcess object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
:param install_root_handler: whether to install root logging handler
(default: True)
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
This class provides coroutine APIs. It requires
:class:`~twisted.internet.asyncioreactor.AsyncioSelectorReactor`.
"""
def __init__(
self,
settings: dict[str, Any] | Settings | None = None,
install_root_handler: bool = True,
):
super().__init__(settings, install_root_handler)
logger.debug("Using AsyncCrawlerProcess")
# We want the asyncio event loop to be installed early, so that it's
# always the correct one. And as we do that, we can also install the
# reactor here.
# The ASYNCIO_EVENT_LOOP setting cannot be overridden by add-ons and
# spiders when using AsyncCrawlerProcess.
loop_path = self.settings["ASYNCIO_EVENT_LOOP"]
if not self.settings.getbool("TWISTED_ENABLED"):
if is_reactor_installed():
raise RuntimeError(
"TWISTED_ENABLED is False but a Twisted reactor is installed."
)
set_asyncio_event_loop(loop_path)
install_reactor_import_hook()
elif is_reactor_installed():
# The user could install a reactor before this class is instantiated.
# We need to make sure the reactor is the correct one and the loop
# type matches the setting.
verify_installed_reactor(_asyncio_reactor_path)
if loop_path:
verify_installed_asyncio_event_loop(loop_path)
else:
install_reactor(_asyncio_reactor_path, loop_path)
self._initialized_reactor = True
def _stop_dfd(self) -> Deferred[Any]:
return deferred_from_coro(self.stop())
def start(
self, stop_after_crawl: bool = True, install_signal_handlers: bool = True
) -> None:
"""
This method starts a :mod:`~twisted.internet.reactor`, adjusts its pool
size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache
based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
If ``stop_after_crawl`` is True, the reactor will be stopped after all
crawlers have finished, using :meth:`join`.
:param bool stop_after_crawl: stop or not the reactor when all
crawlers have finished
:param bool install_signal_handlers: whether to install the OS signal
handlers from Twisted and Scrapy (default: True)
"""
if not self.settings.getbool("TWISTED_ENABLED"):
self._start_asyncio(stop_after_crawl, install_signal_handlers)
else:
self._start_twisted(stop_after_crawl, install_signal_handlers)
def _start_asyncio(
self, stop_after_crawl: bool, install_signal_handlers: bool
) -> None:
# Very basic and will need multiple improvements.
# TODO https://docs.python.org/3/library/asyncio-runner.html#handling-keyboard-interruption
# TODO various exception handling
# TODO consider asyncio.run()
loop = asyncio.get_event_loop()
if stop_after_crawl:
join_task = loop.create_task(self.join())
join_task.add_done_callback(lambda _: loop.stop())
try:
loop.run_forever() # blocking call
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
def _start_twisted(
self, stop_after_crawl: bool, install_signal_handlers: bool
) -> None:
from twisted.internet import reactor
if stop_after_crawl:
loop = asyncio.get_event_loop()
join_task = loop.create_task(self.join())
join_task.add_done_callback(self._stop_reactor)
self._setup_reactor(install_signal_handlers)
reactor.run(installSignalHandlers=install_signal_handlers) # blocking call | python | github | https://github.com/scrapy/scrapy | scrapy/crawler.py |
# -*- coding: utf-8 -*-
"""
Asset management plugin for Pelican
===================================
This plugin allows you to use the `webassets`_ module to manage assets such as
CSS and JS files.
The ASSET_URL is set to a relative url to honor Pelican's RELATIVE_URLS
setting. This requires the use of SITEURL in the templates::
<link rel="stylesheet" href="{{ SITEURL }}/{{ ASSET_URL }}">
.. _webassets: https://webassets.readthedocs.org/
"""
from __future__ import unicode_literals
import os
import logging
from pelican import signals
logger = logging.getLogger(__name__)
try:
import webassets
from webassets import Environment
from webassets.ext.jinja2 import AssetsExtension
except ImportError:
webassets = None
def add_jinja2_ext(pelican):
"""Add Webassets to Jinja2 extensions in Pelican settings."""
pelican.settings['JINJA_EXTENSIONS'].append(AssetsExtension)
def create_assets_env(generator):
"""Define the assets environment and pass it to the generator."""
theme_static_dir = generator.settings['THEME_STATIC_DIR']
assets_src = os.path.join(generator.output_path, theme_static_dir)
generator.env.assets_environment = Environment(
assets_src, theme_static_dir)
if 'ASSET_CONFIG' in generator.settings:
for item in generator.settings['ASSET_CONFIG']:
generator.env.assets_environment.config[item[0]] = item[1]
if 'ASSET_BUNDLES' in generator.settings:
for name, args, kwargs in generator.settings['ASSET_BUNDLES']:
generator.env.assets_environment.register(name, *args, **kwargs)
if logging.getLevelName(logger.getEffectiveLevel()) == "DEBUG":
generator.env.assets_environment.debug = True
if 'ASSET_SOURCE_PATHS' in generator.settings:
# the default load path gets overridden if additional paths are
# specified, add it back
generator.env.assets_environment.append_path(assets_src)
for path in generator.settings['ASSET_SOURCE_PATHS']:
full_path = os.path.join(generator.theme, path)
generator.env.assets_environment.append_path(full_path)
def register():
"""Plugin registration."""
if webassets:
signals.initialized.connect(add_jinja2_ext)
signals.generator_init.connect(create_assets_env)
else:
logger.warning('`assets` failed to load dependency `webassets`.'
'`assets` plugin not loaded.') | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.http
/**
* A message either from the client or the server,
* that has [headers] associated.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.HttpMessage)
*/
public interface HttpMessage {
/**
* Message [Headers]
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.HttpMessage.headers)
*/
public val headers: Headers
}
/**
* A builder message either for the client or the server,
* that has a [headers] builder associated.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.HttpMessageBuilder)
*/
public interface HttpMessageBuilder {
/**
* MessageBuilder [HeadersBuilder]
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.HttpMessageBuilder.headers)
*/
public val headers: HeadersBuilder
} | kotlin | github | https://github.com/ktorio/ktor | ktor-http/common/src/io/ktor/http/HttpMessage.kt |
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgets/eventcal.py
# Event Calendar widget
# author: Andy Robinson
__version__=''' $Id: eventcal.py 3345 2008-12-12 17:55:22Z damian $ '''
__doc__="""This file is a
"""
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
from reportlab.graphics.shapes import Line, Rect, Polygon, Drawing, Group, String, Circle, Wedge
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.widgetbase import Widget
from reportlab.graphics import renderPDF
class EventCalendar(Widget):
def __init__(self):
self.x = 0
self.y = 0
self.width = 300
self.height = 150
self.timeColWidth = None # if declared, use it; otherwise auto-size.
self.trackRowHeight = 20
self.data = [] # list of Event objects
self.trackNames = None
self.startTime = None #displays ALL data on day if not set
self.endTime = None # displays ALL data on day if not set
self.day = 0
# we will keep any internal geometry variables
# here. These are computed by computeSize(),
# which is the first thing done when drawing.
self._talksVisible = [] # subset of data which will get plotted, cache
self._startTime = None
self._endTime = None
self._trackCount = 0
self._colWidths = []
self._colLeftEdges = [] # left edge of each column
def computeSize(self):
"Called at start of draw. Sets various column widths"
self._talksVisible = self.getRelevantTalks(self.data)
self._trackCount = len(self.getAllTracks())
self.computeStartAndEndTimes()
self._colLeftEdges = [self.x]
if self.timeColWidth is None:
w = self.width / (1 + self._trackCount)
self._colWidths = [w] * (1+ self._trackCount)
for i in range(self._trackCount):
self._colLeftEdges.append(self._colLeftEdges[-1] + w)
else:
self._colWidths = [self.timeColWidth]
w = (self.width - self.timeColWidth) / self._trackCount
for i in range(self._trackCount):
self._colWidths.append(w)
self._colLeftEdges.append(self._colLeftEdges[-1] + w)
def computeStartAndEndTimes(self):
"Work out first and last times to display"
if self.startTime:
self._startTime = self.startTime
else:
for (title, speaker, trackId, day, start, duration) in self._talksVisible:
if self._startTime is None: #first one
self._startTime = start
else:
if start < self._startTime:
self._startTime = start
if self.endTime:
self._endTime = self.endTime
else:
for (title, speaker, trackId, day, start, duration) in self._talksVisible:
if self._endTime is None: #first one
self._endTime = start + duration
else:
if start + duration > self._endTime:
self._endTime = start + duration
def getAllTracks(self):
tracks = []
for (title, speaker, trackId, day, hours, duration) in self.data:
if trackId is not None:
if trackId not in tracks:
tracks.append(trackId)
tracks.sort()
return tracks
def getRelevantTalks(self, talkList):
"Scans for tracks actually used"
used = []
for talk in talkList:
(title, speaker, trackId, day, hours, duration) = talk
assert trackId <> 0, "trackId must be None or 1,2,3... zero not allowed!"
if day == self.day:
if (((self.startTime is None) or ((hours + duration) >= self.startTime))
and ((self.endTime is None) or (hours <= self.endTime))):
used.append(talk)
return used
def scaleTime(self, theTime):
"Return y-value corresponding to times given"
axisHeight = self.height - self.trackRowHeight
# compute fraction between 0 and 1, 0 is at start of period
proportionUp = ((theTime - self._startTime) / (self._endTime - self._startTime))
y = self.y + axisHeight - (axisHeight * proportionUp)
return y
def getTalkRect(self, startTime, duration, trackId, text):
"Return shapes for a specific talk"
g = Group()
y_bottom = self.scaleTime(startTime + duration)
y_top = self.scaleTime(startTime)
y_height = y_top - y_bottom
if trackId is None:
#spans all columns
x = self._colLeftEdges[1]
width = self.width - self._colWidths[0]
else:
#trackId is 1-based and these arrays have the margin info in column
#zero, so no need to add 1
x = self._colLeftEdges[trackId]
width = self._colWidths[trackId]
lab = Label()
lab.setText(text)
lab.setOrigin(x + 0.5*width, y_bottom+0.5*y_height)
lab.boxAnchor = 'c'
lab.width = width
lab.height = y_height
lab.fontSize = 6
r = Rect(x, y_bottom, width, y_height, fillColor=colors.cyan)
g.add(r)
g.add(lab)
#now for a label
# would expect to color-code and add text
return g
def draw(self):
self.computeSize()
g = Group()
# time column
g.add(Rect(self.x, self.y, self._colWidths[0], self.height - self.trackRowHeight, fillColor=colors.cornsilk))
# track headers
x = self.x + self._colWidths[0]
y = self.y + self.height - self.trackRowHeight
for trk in range(self._trackCount):
wid = self._colWidths[trk+1]
r = Rect(x, y, wid, self.trackRowHeight, fillColor=colors.yellow)
s = String(x + 0.5*wid, y, 'Track %d' % trk, align='middle')
g.add(r)
g.add(s)
x = x + wid
for talk in self._talksVisible:
(title, speaker, trackId, day, start, duration) = talk
r = self.getTalkRect(start, duration, trackId, title + '\n' + speaker)
g.add(r)
return g
def test():
"Make a conference event for day 1 of UP Python 2003"
d = Drawing(400,200)
cal = EventCalendar()
cal.x = 50
cal.y = 25
cal.data = [
# these might be better as objects instead of tuples, since I
# predict a large number of "optionsl" variables to affect
# formatting in future.
#title, speaker, track id, day, start time (hrs), duration (hrs)
# track ID is 1-based not zero-based!
('Keynote: Why design another programming language?', 'Guido van Rossum', None, 1, 9.0, 1.0),
('Siena Web Service Architecture', 'Marc-Andre Lemburg', 1, 1, 10.5, 1.5),
('Extreme Programming in Python', 'Chris Withers', 2, 1, 10.5, 1.5),
('Pattern Experiences in C++', 'Mark Radford', 3, 1, 10.5, 1.5),
('What is the Type of std::toupper()', 'Gabriel Dos Reis', 4, 1, 10.5, 1.5),
('Linguistic Variables: Clear Thinking with Fuzzy Logic ', 'Walter Banks', 5, 1, 10.5, 1.5),
('lunch, short presentations, vendor presentations', '', None, 1, 12.0, 2.0),
("CORBA? Isn't that obsolete", 'Duncan Grisby', 1, 1, 14.0, 1.5),
("Python Design Patterns", 'Duncan Booth', 2, 1, 14.0, 1.5),
("Inside Security Checks and Safe Exceptions", 'Brandon Bray', 3, 1, 14.0, 1.5),
("Studying at a Distance", 'Panel Discussion, Panel to include Alan Lenton & Francis Glassborow', 4, 1, 14.0, 1.5),
("Coding Standards - Given the ANSI C Standard why do I still need a coding Standard", 'Randy Marques', 5, 1, 14.0, 1.5),
("RESTful Python", 'Hamish Lawson', 1, 1, 16.0, 1.5),
("Parsing made easier - a radical old idea", 'Andrew Koenig', 2, 1, 16.0, 1.5),
("C++ & Multimethods", 'Julian Smith', 3, 1, 16.0, 1.5),
("C++ Threading", 'Kevlin Henney', 4, 1, 16.0, 1.5),
("The Organisation Strikes Back", 'Alan Griffiths & Sarah Lees', 5, 1, 16.0, 1.5),
('Birds of a Feather meeting', '', None, 1, 17.5, 2.0),
('Keynote: In the Spirit of C', 'Greg Colvin', None, 2, 9.0, 1.0),
('The Infinite Filing Cabinet - object storage in Python', 'Jacob Hallen', 1, 2, 10.5, 1.5),
('Introduction to Python and Jython for C++ and Java Programmers', 'Alex Martelli', 2, 2, 10.5, 1.5),
('Template metaprogramming in Haskell', 'Simon Peyton Jones', 3, 2, 10.5, 1.5),
('Plenty People Programming: C++ Programming in a Group, Workshop with a difference', 'Nico Josuttis', 4, 2, 10.5, 1.5),
('Design and Implementation of the Boost Graph Library', 'Jeremy Siek', 5, 2, 10.5, 1.5),
('lunch, short presentations, vendor presentations', '', None, 2, 12.0, 2.0),
("Building GUI Applications with PythonCard and PyCrust", 'Andy Todd', 1, 2, 14.0, 1.5),
("Integrating Python, C and C++", 'Duncan Booth', 2, 2, 14.0, 1.5),
("Secrets and Pitfalls of Templates", 'Nicolai Josuttis & David Vandevoorde', 3, 2, 14.0, 1.5),
("Being a Mentor", 'Panel Discussion, Panel to include Alan Lenton & Francis Glassborow', 4, 2, 14.0, 1.5),
("The Embedded C Extensions to C", 'Willem Wakker', 5, 2, 14.0, 1.5),
("Lightning Talks", 'Paul Brian', 1, 2, 16.0, 1.5),
("Scripting Java Applications with Jython", 'Anthony Eden', 2, 2, 16.0, 1.5),
("Metaprogramming and the Boost Metaprogramming Library", 'David Abrahams', 3, 2, 16.0, 1.5),
("A Common Vendor ABI for C++ -- GCC's why, what and not", 'Nathan Sidwell & Gabriel Dos Reis', 4, 2, 16.0, 1.5),
("The Timing and Cost of Choices", 'Hubert Matthews', 5, 2, 16.0, 1.5),
('Birds of a Feather meeting', '', None, 2, 17.5, 2.0),
('Keynote: The Cost of C & C++ Compatibility', 'Andy Koenig', None, 3, 9.0, 1.0),
('Prying Eyes: Generic Observer Implementations in C++', 'Andrei Alexandrescu', 1, 2, 10.5, 1.5),
('The Roadmap to Generative Programming With C++', 'Ulrich Eisenecker', 2, 2, 10.5, 1.5),
('Design Patterns in C++ and C# for the Common Language Runtime', 'Brandon Bray', 3, 2, 10.5, 1.5),
('Extreme Hour (XH): (workshop) - Jutta Eckstein and Nico Josuttis', 'Jutta Ecstein', 4, 2, 10.5, 1.5),
('The Lambda Library : Unnamed Functions for C++', 'Jaako Jarvi', 5, 2, 10.5, 1.5),
('lunch, short presentations, vendor presentations', '', None, 3, 12.0, 2.0),
('Reflective Metaprogramming', 'Daveed Vandevoorde', 1, 3, 14.0, 1.5),
('Advanced Template Issues and Solutions (double session)', 'Herb Sutter',2, 3, 14.0, 3),
('Concurrent Programming in Java (double session)', 'Angelika Langer', 3, 3, 14.0, 3),
('What can MISRA-C (2nd Edition) do for us?', 'Chris Hills', 4, 3, 14.0, 1.5),
('C++ Metaprogramming Concepts and Results', 'Walter E Brown', 5, 3, 14.0, 1.5),
('Binding C++ to Python with the Boost Python Library', 'David Abrahams', 1, 3, 16.0, 1.5),
('Using Aspect Oriented Programming for Enterprise Application Integration', 'Arno Schmidmeier', 4, 3, 16.0, 1.5),
('Defective C++', 'Marc Paterno', 5, 3, 16.0, 1.5),
("Speakers' Banquet & Birds of a Feather meeting", '', None, 3, 17.5, 2.0),
('Keynote: The Internet, Software and Computers - A Report Card', 'Alan Lenton', None, 4, 9.0, 1.0),
('Multi-Platform Software Development; Lessons from the Boost libraries', 'Beman Dawes', 1, 5, 10.5, 1.5),
('The Stability of the C++ ABI', 'Steve Clamage', 2, 5, 10.5, 1.5),
('Generic Build Support - A Pragmatic Approach to the Software Build Process', 'Randy Marques', 3, 5, 10.5, 1.5),
('How to Handle Project Managers: a survival guide', 'Barb Byro', 4, 5, 10.5, 1.5),
('lunch, ACCU AGM', '', None, 5, 12.0, 2.0),
('Sauce: An OO recursive descent parser; its design and implementation.', 'Jon Jagger', 1, 5, 14.0, 1.5),
('GNIRTS ESAC REWOL - Bringing the UNIX filters to the C++ iostream library.', 'JC van Winkel', 2, 5, 14.0, 1.5),
('Pattern Writing: Live and Direct', 'Frank Buschmann & Kevlin Henney', 3, 5, 14.0, 3.0),
('The Future of Programming Languages - A Goldfish Bowl', 'Francis Glassborow and friends', 3, 5, 14.0, 1.5),
('Honey, I Shrunk the Threads: Compile-time checked multithreaded transactions in C++', 'Andrei Alexandrescu', 1, 5, 16.0, 1.5),
('Fun and Functionality with Functors', 'Lois Goldthwaite', 2, 5, 16.0, 1.5),
('Agile Enough?', 'Alan Griffiths', 4, 5, 16.0, 1.5),
("Conference Closure: A brief plenary session", '', None, 5, 17.5, 0.5),
]
#return cal
cal.day = 1
d.add(cal)
for format in ['pdf']:#,'gif','png']:
out = d.asString(format)
open('eventcal.%s' % format, 'wb').write(out)
print 'saved eventcal.%s' % format
if __name__=='__main__':
test() | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AuroraDNS DNS Driver
"""
import base64
import json
import hmac
import datetime
from hashlib import sha256
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
from libcloud.common.types import InvalidCredsError, ProviderError
from libcloud.common.types import LibcloudError
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.dns.types import RecordType, ZoneDoesNotExistError
from libcloud.dns.types import ZoneAlreadyExistsError, RecordDoesNotExistError
API_HOST = 'api.auroradns.eu'
# Default TTL required by libcloud, but doesn't do anything in AuroraDNS
DEFAULT_ZONE_TTL = 3600
DEFAULT_ZONE_TYPE = 'master'
VALID_RECORD_PARAMS_EXTRA = ['ttl', 'prio', 'health_check_id', 'disabled']
class AuroraDNSHealthCheckType(object):
"""
Healthcheck type.
"""
HTTP = 'HTTP'
HTTPS = 'HTTPS'
TCP = 'TCP'
class HealthCheckError(LibcloudError):
error_type = 'HealthCheckError'
def __init__(self, value, driver, health_check_id):
self.health_check_id = health_check_id
super(HealthCheckError, self).__init__(value=value, driver=driver)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<%s in %s, health_check_id=%s, value=%s>' %
(self.error_type, repr(self.driver),
self.health_check_id, self.value))
class HealthCheckDoesNotExistError(HealthCheckError):
error_type = 'HealthCheckDoesNotExistError'
class AuroraDNSHealthCheck(object):
"""
AuroraDNS Healthcheck resource.
"""
def __init__(self, id, type, hostname, ipaddress, port, interval, path,
threshold, health, enabled, zone, driver, extra=None):
"""
:param id: Healthcheck id
:type id: ``str``
:param hostname: Hostname or FQDN of the target
:type hostname: ``str``
:param ipaddress: IPv4 or IPv6 address of the target
:type ipaddress: ``str``
:param port: The port on the target to monitor
:type port: ``int``
:param interval: The interval of the health check
:type interval: ``int``
:param path: The path to monitor on the target
:type path: ``str``
:param threshold: The threshold of before marking a check as failed
:type threshold: ``int``
:param health: The current health of the health check
:type health: ``bool``
:param enabled: If the health check is currently enabled
:type enabled: ``bool``
:param zone: Zone instance.
:type zone: :class:`Zone`
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.type = type
self.hostname = hostname
self.ipaddress = ipaddress
self.port = int(port) if port else None
self.interval = int(interval)
self.path = path
self.threshold = int(threshold)
self.health = bool(health)
self.enabled = bool(enabled)
self.zone = zone
self.driver = driver
self.extra = extra or {}
def update(self, type=None, hostname=None, ipaddress=None, port=None,
interval=None, path=None, threshold=None, enabled=None,
extra=None):
return self.driver.ex_update_healthcheck(healthcheck=self, type=type,
hostname=hostname,
ipaddress=ipaddress,
port=port, path=path,
interval=interval,
threshold=threshold,
enabled=enabled, extra=extra)
def delete(self):
return self.driver.ex_delete_healthcheck(healthcheck=self)
def __repr__(self):
return ('<AuroraDNSHealthCheck: zone=%s, id=%s, type=%s, hostname=%s, '
'ipaddress=%s, port=%d, interval=%d, health=%s, provider=%s'
'...>' %
(self.zone.id, self.id, self.type, self.hostname,
self.ipaddress, self.port, self.interval, self.health,
self.driver.name))
class AuroraDNSResponse(JsonResponse):
def success(self):
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
status = int(self.status)
error = {'driver': self, 'value': ''}
if status == httplib.UNAUTHORIZED:
error['value'] = 'Authentication failed'
raise InvalidCredsError(**error)
elif status == httplib.FORBIDDEN:
error['value'] = 'Authorization failed'
error['http_status'] = status
raise ProviderError(**error)
elif status == httplib.NOT_FOUND:
context = self.connection.context
if context['resource'] == 'zone':
error['zone_id'] = context['id']
raise ZoneDoesNotExistError(**error)
elif context['resource'] == 'record':
error['record_id'] = context['id']
raise RecordDoesNotExistError(**error)
elif context['resource'] == 'healthcheck':
error['health_check_id'] = context['id']
raise HealthCheckDoesNotExistError(**error)
elif status == httplib.CONFLICT:
context = self.connection.context
if context['resource'] == 'zone':
error['zone_id'] = context['id']
raise ZoneAlreadyExistsError(**error)
elif status == httplib.BAD_REQUEST:
context = self.connection.context
body = self.parse_body()
raise ProviderError(value=body['errormsg'],
http_code=status, driver=self)
class AuroraDNSConnection(ConnectionUserAndKey):
host = API_HOST
responseCls = AuroraDNSResponse
def calculate_auth_signature(self, secret_key, method, url, timestamp):
b64_hmac = base64.b64encode(
hmac.new(b(secret_key),
b(method) + b(url) + b(timestamp),
digestmod=sha256).digest()
)
return b64_hmac.decode('utf-8')
def gen_auth_header(self, api_key, secret_key, method, url, timestamp):
signature = self.calculate_auth_signature(secret_key, method, url,
timestamp)
auth_b64 = base64.b64encode(b('%s:%s' % (api_key, signature)))
return 'AuroraDNSv1 %s' % (auth_b64.decode('utf-8'))
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
if method in ("POST", "PUT"):
headers = {'Content-Type': 'application/json; charset=UTF-8'}
t = datetime.datetime.utcnow()
timestamp = t.strftime('%Y%m%dT%H%M%SZ')
headers['X-AuroraDNS-Date'] = timestamp
headers['Authorization'] = self.gen_auth_header(self.user_id, self.key,
method, action,
timestamp)
return super(AuroraDNSConnection, self).request(action=action,
params=params,
data=data,
method=method,
headers=headers)
class AuroraDNSDriver(DNSDriver):
name = 'AuroraDNS'
website = 'https://www.pcextreme.nl/en/aurora/dns'
connectionCls = AuroraDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'NS',
RecordType.SOA: 'SOA',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
RecordType.DS: 'DS',
RecordType.PTR: 'PTR',
RecordType.SSHFP: 'SSHFP',
RecordType.TLSA: 'TLSA'
}
HEALTHCHECK_TYPE_MAP = {
AuroraDNSHealthCheckType.HTTP: 'HTTP',
AuroraDNSHealthCheckType.HTTPS: 'HTTPS',
AuroraDNSHealthCheckType.TCP: 'TCP'
}
def iterate_zones(self):
res = self.connection.request('/zones')
for zone in res.parse_body():
yield self.__res_to_zone(zone)
def iterate_records(self, zone):
self.connection.set_context({'resource': 'zone', 'id': zone.id})
res = self.connection.request('/zones/%s/records' % zone.id)
for record in res.parse_body():
yield self.__res_to_record(zone, record)
def get_zone(self, zone_id):
self.connection.set_context({'resource': 'zone', 'id': zone_id})
res = self.connection.request('/zones/%s' % zone_id)
zone = res.parse_body()
return self.__res_to_zone(zone)
def get_record(self, zone_id, record_id):
self.connection.set_context({'resource': 'record', 'id': record_id})
res = self.connection.request('/zones/%s/records/%s' % (zone_id,
record_id))
record = res.parse_body()
zone = self.get_zone(zone_id)
return self.__res_to_record(zone, record)
def create_zone(self, domain, type='master', ttl=None, extra=None):
self.connection.set_context({'resource': 'zone', 'id': domain})
res = self.connection.request('/zones', method='POST',
data=json.dumps({'name': domain}))
zone = res.parse_body()
return self.__res_to_zone(zone)
def create_record(self, name, zone, type, data, extra=None):
if name is None:
name = ""
rdata = {
'name': name,
'type': self.RECORD_TYPE_MAP[type],
'content': data
}
rdata = self.__merge_extra_data(rdata, extra)
if 'ttl' not in rdata:
rdata['ttl'] = DEFAULT_ZONE_TTL
self.connection.set_context({'resource': 'zone', 'id': zone.id})
res = self.connection.request('/zones/%s/records' % zone.id,
method='POST',
data=json.dumps(rdata))
record = res.parse_body()
return self.__res_to_record(zone, record)
def delete_zone(self, zone):
self.connection.set_context({'resource': 'zone', 'id': zone.id})
self.connection.request('/zones/%s' % zone.id, method='DELETE')
return True
def delete_record(self, record):
self.connection.set_context({'resource': 'record', 'id': record.id})
self.connection.request('/zones/%s/records/%s' % (record.zone.id,
record.id),
method='DELETE')
return True
def list_record_types(self):
types = []
for record_type in self.RECORD_TYPE_MAP.keys():
types.append(record_type)
return types
def update_record(self, record, name, type, data, extra=None):
rdata = {}
if name is not None:
rdata['name'] = name
if type is not None:
rdata['type'] = self.RECORD_TYPE_MAP[type]
if data is not None:
rdata['content'] = data
rdata = self.__merge_extra_data(rdata, extra)
self.connection.set_context({'resource': 'record', 'id': record.id})
self.connection.request('/zones/%s/records/%s' % (record.zone.id,
record.id),
method='PUT',
data=json.dumps(rdata))
return self.get_record(record.zone.id, record.id)
def ex_list_healthchecks(self, zone):
"""
List all Health Checks in a zone.
:param zone: Zone to list health checks for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`AuroraDNSHealthCheck`
"""
healthchecks = []
self.connection.set_context({'resource': 'zone', 'id': zone.id})
res = self.connection.request('/zones/%s/health_checks' % zone.id)
for healthcheck in res.parse_body():
healthchecks.append(self.__res_to_healthcheck(zone, healthcheck))
return healthchecks
def ex_get_healthcheck(self, zone, health_check_id):
"""
Get a single Health Check from a zone
:param zone: Zone in which the health check is
:type zone: :class:`Zone`
:param health_check_id: ID of the required health check
:type health_check_id: ``str``
:return: :class:`AuroraDNSHealthCheck`
"""
self.connection.set_context({'resource': 'healthcheck',
'id': health_check_id})
res = self.connection.request('/zones/%s/health_checks/%s'
% (zone.id, health_check_id))
check = res.parse_body()
return self.__res_to_healthcheck(zone, check)
def ex_create_healthcheck(self, zone, type, hostname, port, path,
interval, threshold, ipaddress=None,
enabled=True, extra=None):
"""
Create a new Health Check in a zone
:param zone: Zone in which the health check should be created
:type zone: :class:`Zone`
:param type: The type of health check to be created
:type type: :class:`AuroraDNSHealthCheckType`
:param hostname: The hostname of the target to monitor
:type hostname: ``str``
:param port: The port of the target to monitor. E.g. 80 for HTTP
:type port: ``int``
:param path: The path of the target to monitor. Only used by HTTP
at this moment. Usually this is simple /.
:type path: ``str``
:param interval: The interval of checks. 10, 30 or 60 seconds.
:type interval: ``int``
:param threshold: The threshold of failures before the healthcheck is
marked as failed.
:type threshold: ``int``
:param ipaddress: (optional) The IP Address of the target to monitor.
You can pass a empty string if this is not required.
:type ipaddress: ``str``
:param enabled: (optional) If this healthcheck is enabled to run
:type enabled: ``bool``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:return: :class:`AuroraDNSHealthCheck`
"""
cdata = {
'type': self.HEALTHCHECK_TYPE_MAP[type],
'hostname': hostname,
'ipaddress': ipaddress,
'port': int(port),
'interval': int(interval),
'path': path,
'threshold': int(threshold),
'enabled': enabled
}
self.connection.set_context({'resource': 'zone', 'id': zone.id})
res = self.connection.request('/zones/%s/health_checks' % zone.id,
method='POST',
data=json.dumps(cdata))
healthcheck = res.parse_body()
return self.__res_to_healthcheck(zone, healthcheck)
def ex_update_healthcheck(self, healthcheck, type=None,
hostname=None, ipaddress=None, port=None,
path=None, interval=None, threshold=None,
enabled=None, extra=None):
"""
Update an existing Health Check
:param zone: The healthcheck which has to be updated
:type zone: :class:`AuroraDNSHealthCheck`
:param type: (optional) The type of health check to be created
:type type: :class:`AuroraDNSHealthCheckType`
:param hostname: (optional) The hostname of the target to monitor
:type hostname: ``str``
:param ipaddress: (optional) The IP Address of the target to monitor.
You can pass a empty string if this is not required.
:type ipaddress: ``str``
:param port: (optional) The port of the target to monitor. E.g. 80
for HTTP
:type port: ``int``
:param path: (optional) The path of the target to monitor.
Only used by HTTP at this moment. Usually just '/'.
:type path: ``str``
:param interval: (optional) The interval of checks.
10, 30 or 60 seconds.
:type interval: ``int``
:param threshold: (optional) The threshold of failures before the
healthcheck is marked as failed.
:type threshold: ``int``
:param enabled: (optional) If this healthcheck is enabled to run
:type enabled: ``bool``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:return: :class:`AuroraDNSHealthCheck`
"""
cdata = {}
if type is not None:
cdata['type'] = self.HEALTHCHECK_TYPE_MAP[type]
if hostname is not None:
cdata['hostname'] = hostname
if ipaddress is not None:
if len(ipaddress) == 0:
cdata['ipaddress'] = None
else:
cdata['ipaddress'] = ipaddress
if port is not None:
cdata['port'] = int(port)
if path is not None:
cdata['path'] = path
if interval is not None:
cdata['interval'] = int(interval)
if threshold is not None:
cdata['threshold'] = threshold
if enabled is not None:
cdata['enabled'] = bool(enabled)
self.connection.set_context({'resource': 'healthcheck',
'id': healthcheck.id})
self.connection.request('/zones/%s/health_checks/%s'
% (healthcheck.zone.id,
healthcheck.id),
method='PUT',
data=json.dumps(cdata))
return self.ex_get_healthcheck(healthcheck.zone,
healthcheck.id)
def ex_delete_healthcheck(self, healthcheck):
"""
Remove an existing Health Check
:param zone: The healthcheck which has to be removed
:type zone: :class:`AuroraDNSHealthCheck`
"""
self.connection.set_context({'resource': 'healthcheck',
'id': healthcheck.id})
self.connection.request('/zones/%s/health_checks/%s'
% (healthcheck.zone.id,
healthcheck.id),
method='DELETE')
return True
def __res_to_record(self, zone, record):
if len(record['name']) == 0:
name = None
else:
name = record['name']
extra = {}
extra['created'] = record['created']
extra['modified'] = record['modified']
extra['disabled'] = record['disabled']
extra['ttl'] = record['ttl']
extra['priority'] = record['prio']
return Record(id=record['id'], name=name,
type=record['type'],
data=record['content'], zone=zone,
driver=self.connection.driver, ttl=record['ttl'],
extra=extra)
def __res_to_zone(self, zone):
return Zone(id=zone['id'], domain=zone['name'],
type=DEFAULT_ZONE_TYPE,
ttl=DEFAULT_ZONE_TTL, driver=self.connection.driver,
extra={'created': zone['created'],
'servers': zone['servers'],
'account_id': zone['account_id'],
'cluster_id': zone['cluster_id']})
def __res_to_healthcheck(self, zone, healthcheck):
return AuroraDNSHealthCheck(id=healthcheck['id'],
type=healthcheck['type'],
hostname=healthcheck['hostname'],
ipaddress=healthcheck['ipaddress'],
health=healthcheck['health'],
threshold=healthcheck['threshold'],
path=healthcheck['path'],
interval=healthcheck['interval'],
port=healthcheck['port'],
enabled=healthcheck['enabled'],
zone=zone, driver=self.connection.driver)
def __merge_extra_data(self, rdata, extra):
if extra is not None:
for param in VALID_RECORD_PARAMS_EXTRA:
if param in extra:
rdata[param] = extra[param]
return rdata | unknown | codeparrot/codeparrot-clean | ||
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Configuration of Docker stack Docs."""
from __future__ import annotations
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import logging
import os
from typing import Any
from docs.utils.conf_constants import (
AIRFLOW_FAVICON_PATH,
AIRFLOW_REPO_ROOT_PATH,
AUTOAPI_OPTIONS,
BASIC_AUTOAPI_IGNORE_PATTERNS,
BASIC_SPHINX_EXTENSIONS,
CHART_DOC_PATH,
SMARTQUOTES_EXCLUDES,
SPELLING_WORDLIST_PATH,
SPHINX_DESIGN_STATIC_PATH,
SUPPRESS_WARNINGS,
filter_autoapi_ignore_entries,
get_autodoc_mock_imports,
get_html_context,
get_html_sidebars,
get_html_theme_options,
get_intersphinx_mapping,
get_rst_epilogue,
)
import airflow
PACKAGE_NAME = "docker-stack"
DOCKER_STACK_DOCS_PATH = AIRFLOW_REPO_ROOT_PATH / "docker-stack-docs"
os.environ["AIRFLOW_PACKAGE_NAME"] = PACKAGE_NAME
PACKAGE_VERSION: str = "stable"
# Adds to environment variables for easy access from other plugins like airflow_intersphinx.
os.environ["AIRFLOW_PACKAGE_NAME"] = PACKAGE_NAME
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ["BUILDING_AIRFLOW_DOCS"] = "TRUE"
# Use for generate rst_epilog and other post-generation substitutions
global_substitutions = {
"version": PACKAGE_VERSION,
"airflow-version": airflow.__version__,
"experimental": "This is an :ref:`experimental feature <experimental>`.",
}
# == Sphinx configuration ======================================================
# -- Project information -------------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
# General information about the project.
project = PACKAGE_NAME
# # The version info for the project you're documenting
version = PACKAGE_VERSION
# The full version, including alpha/beta/rc tags.
release = PACKAGE_VERSION
# -- General configuration -----------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html
rst_epilog = get_rst_epilogue(PACKAGE_VERSION, False)
smartquotes_excludes = SMARTQUOTES_EXCLUDES
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = BASIC_SPHINX_EXTENSIONS
extensions.extend(["extra_files_with_substitutions"])
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: list[str] = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["templates"]
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# -- Options for HTML output ---------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_airflow_theme"
html_title = f"{PACKAGE_NAME} Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers
# use this as the icon for tabs, windows and bookmarks. It should be a
# Windows-style icon file (.ico), which is 16x16 or 32x32 pixels large.
html_favicon = AIRFLOW_FAVICON_PATH.as_posix()
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [SPHINX_DESIGN_STATIC_PATH.as_posix()]
# Substitute in links
manual_substitutions_in_generated_html = ["build.html", "index.html"]
html_css_files = ["custom.css"]
# -- Theme configuration -------------------------------------------------------
# Custom sidebar templates, maps document names to template names.
html_sidebars = get_html_sidebars(PACKAGE_VERSION)
# If false, no index is generated.
html_use_index = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
html_theme_options: dict[str, Any] = get_html_theme_options()
conf_py_path = "/docker-stack-docs/"
# A dictionary of values to pass into the template engine's context for all pages.
html_context = get_html_context(conf_py_path)
# == Extensions configuration ==================================================
# -- Options for sphinx.ext.autodoc --------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
# This value contains a list of modules to be mocked up. This is useful when some external dependencies
# are not met at build time and break the building process.
autodoc_mock_imports = get_autodoc_mock_imports()
# The default options for autodoc directives. They are applied to all autodoc directives automatically.
autodoc_default_options = {"show-inheritance": True, "members": True}
autodoc_typehints = "description"
autodoc_typehints_description_target = "documented"
autodoc_typehints_format = "short"
# -- Options for sphinx.ext.intersphinx ----------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
# This config value contains names of other projects that should
# be linked to in this documentation.
# Inventories are only downloaded once by docs/exts/docs_build/fetch_inventories.py.
intersphinx_mapping = get_intersphinx_mapping()
# -- Options for sphinx.ext.viewcode -------------------------------------------
# See: https://www.sphinx-doc.org/es/master/usage/extensions/viewcode.html
# If this is True, viewcode extension will emit viewcode-follow-imported event to resolve the name of
# the module by other extensions. The default is True.
viewcode_follow_imported_members = True
# -- Options for sphinx-autoapi ------------------------------------------------
# See: https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs: list[str] = [CHART_DOC_PATH.as_posix()]
# A list of patterns to ignore when finding files
autoapi_ignore = BASIC_AUTOAPI_IGNORE_PATTERNS
autoapi_log = logging.getLogger("sphinx.autoapi.mappers.base")
autoapi_log.addFilter(filter_autoapi_ignore_entries)
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = True
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = "_api"
# Whether to insert the generated documentation into the TOC tree. If this is False, the default AutoAPI
# index page is not generated and you will need to include the generated documentation in a
# TOC tree entry yourself.
autoapi_add_toctree_entry = False
# By default autoapi will include private members -- we don't want that!
autoapi_options = AUTOAPI_OPTIONS
suppress_warnings = SUPPRESS_WARNINGS
# -- Options for ext.exampleinclude --------------------------------------------
exampleinclude_sourceroot = os.path.abspath(".")
# -- Options for ext.redirects -------------------------------------------------
redirects_file = "redirects.txt"
# -- Options for sphinxcontrib-spelling ----------------------------------------
spelling_word_list_filename = [SPELLING_WORDLIST_PATH.as_posix()]
spelling_exclude_patterns = ["changelog.rst"]
spelling_ignore_contributor_names = False
spelling_ignore_importable_modules = True
graphviz_output_format = "svg" | python | github | https://github.com/apache/airflow | docker-stack-docs/conf.py |
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"k8s.io/api/flowcontrol/v1beta2"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/flowcontrol"
)
// LimitedPriorityLevelConfiguration.AssuredConcurrencyShares has been
// renamed to NominalConcurrencyShares in v1beta3.
func Convert_v1beta2_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in *v1beta2.LimitedPriorityLevelConfiguration, out *flowcontrol.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
if err := autoConvert_v1beta2_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in, out, nil); err != nil {
return err
}
out.NominalConcurrencyShares = in.AssuredConcurrencyShares
return nil
}
// LimitedPriorityLevelConfiguration.AssuredConcurrencyShares has been
// renamed to NominalConcurrencyShares in v1beta3.
func Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta2_LimitedPriorityLevelConfiguration(in *flowcontrol.LimitedPriorityLevelConfiguration, out *v1beta2.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
if err := autoConvert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta2_LimitedPriorityLevelConfiguration(in, out, nil); err != nil {
return err
}
out.AssuredConcurrencyShares = in.NominalConcurrencyShares
return nil
} | go | github | https://github.com/kubernetes/kubernetes | pkg/apis/flowcontrol/v1beta2/conversion.go |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OiR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import re
import sys
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import while_v2 # pylint: disable=unused-import
# pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg(do_constant_folding=True):
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=do_constant_folding)))
def isum(s, maximum_iterations=None):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(
c, b, [i, s], maximum_iterations=maximum_iterations)
return r_s
def enqueue_print_op(s):
"""Enqueues an op that prints a message to be captured in the test."""
return logging_ops.print_v2("ControlFlowOpsTest: " + s)
def filter_test_messages(s):
"""Returns a list of messages printed by enqueue_print_op."""
prefix = "ControlFlowOpsTest: "
return [l[len(prefix):] for l in s.split("\n") if l.startswith(prefix)]
def tf_function_in_tf2(f):
if tf2.enabled():
# In TF1 do not wrap with tf.function so that we can test the v1 control
# flow code path.
return def_function.function(f)
return f
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase, parameterized.TestCase):
@test_util.run_v1_only("b/120545219")
def testRefIdentity(self):
with self.cached_session():
v = variables.VariableV1(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(isinstance(v2, ops.Tensor))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v2))
@test_util.run_v1_only("b/120545219")
def testRefEnter(self):
with self.cached_session():
v = variables.VariableV1(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = gen_control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v3))
@test_util.run_v1_only("b/120545219")
def testRefSwitch(self):
with self.cached_session():
v = variables.VariableV1(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v2))
def testEnterMulExit(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = gen_control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = gen_control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = self.evaluate(exit_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_deprecated_v1
def testEnterShapePropagation(self):
with self.cached_session():
v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)
# If is_constant=True, the shape information should be propagated.
enter_v_constant = gen_control_flow_ops.enter(
v, "frame1", is_constant=True)
self.assertEqual(enter_v_constant.shape, [2])
# Otherwise, the shape should be unknown.
enter_v_non_constant = gen_control_flow_ops.enter(
v, "frame2", is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values
ind = merge_op.indices
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
@test_util.run_v1_only("b/120545219")
def testSwitchDeadBranch(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
self.evaluate(dead_branch)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeLess(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.arange(1, 7), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddIdentity(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddMul(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testLoop_false(self):
with self.cached_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = gen_control_flow_ops.enter(false, "foo_1", False)
enter_n = gen_control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = self.evaluate(exit_n)
self.assertAllEqual(10, result)
@test_util.run_deprecated_v1
def testLoop_1(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testLoop_2(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testDifferentFrame(self):
with self.cached_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = gen_control_flow_ops.enter(data, "foo_1", False)
enter_2 = gen_control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
@test_util.run_deprecated_v1
def testCondBool(self):
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegex(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
@test_util.run_deprecated_v1
def testCondInt(self):
p = array_ops.placeholder(dtypes.bool, shape=[])
v = constant_op.constant(10)
fn1 = lambda: math_ops.add(v, 1)
fn2 = lambda: math_ops.subtract(v, 1)
y = control_flow_ops.cond(p, fn1, fn2)
grad = gradients_impl.gradients(y, [v])
self.assertAllEqual([None], grad)
def testCondOutputShape(self):
x = constant_op.constant(1.0)
b = control_flow_ops.cond(
constant_op.constant(True), lambda: math_ops.square(x),
lambda: math_ops.subtract(x, 1.))
self.assertEqual(b.shape, tensor_shape.TensorShape([]))
@test_util.run_v1_only("b/120545219")
def testFetchable(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_fetchable(t.op):
sess.run(t, feed_dict={x: 3})
else:
with self.assertRaisesRegex(ValueError,
"has been marked as not fetchable"):
sess.run(t, feed_dict={x: 3})
@test_util.disable_control_flow_v2("Not relevant")
@test_util.run_v1_only("b/120545219")
def testFeedable(self):
with self.cached_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegex(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([10])
indices = constant_op.constant([0])
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values
ind = r.indices
self.assertAllEqual([11], val)
self.assertAllEqual([0], ind)
def testCondMismatchedIndexedSlices(self):
@def_function.function
def foo():
values = constant_op.constant([10])
indices = constant_op.constant([0])
x = ops.IndexedSlices(values, indices)
with self.assertRaisesRegex(TypeError,
"Cannot reconcile tf.cond 0-th outputs"):
control_flow_ops.cond(
constant_op.constant(True),
lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices),
lambda: math_ops.add(x.values, 1), indices)
foo()
def testCondSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values)
self.assertAllEqual([[1], [4]], r.indices)
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondRaggedTensor(self):
rt = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.concat([rt + 2, [[100]]], axis=0)
fn2 = lambda: rt[:2] - 2
result = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3, 4, 5, 6, 7, 8, 100], result.values)
self.assertAllEqual([0, 2, 3, 6, 7], result.row_splits)
@test_util.run_v1_only("b/120545219")
def testCondResource(self):
with self.cached_session():
rv = resource_variable_ops.ResourceVariable(True)
self.evaluate(variables.global_variables_initializer())
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(
1.0, self.evaluate(control_flow_ops.cond(rv, case, lambda: t)))
@test_util.run_deprecated_v1
def testCondResourceGradShape(self):
rv1 = resource_variable_ops.ResourceVariable([1.0, 2.0])
rv2 = resource_variable_ops.ResourceVariable([3.0, 4.0])
pred = constant_op.constant(True)
result = control_flow_ops.cond(pred, lambda: rv1, lambda: rv2)
grads = gradients_impl.gradients(result, [rv1, rv2])
self.assertAllEqual(grads[0].shape.as_list(), [2])
self.assertAllEqual(grads[1].shape.as_list(), [2])
@test_util.run_v1_only("b/120545219")
def testCondWithTensorArrayGrad(self):
with self.cached_session() as sess:
with ops.device(test.gpu_device_name()):
pred = array_ops.placeholder(dtypes.bool, [])
x = constant_op.constant([1.0, 2.0, 3.0])
y = control_flow_ops.cond(
pred, lambda: map_fn.map_fn(lambda z: z * 2.0, x),
lambda: constant_op.constant([1.0, 1.0, 1.0]))
g = gradients_impl.gradients(y, x)[0]
self.assertAllEqual(sess.run(g, {pred: True}), [2.0, 2.0, 2.0])
self.assertAllEqual(sess.run(g, {pred: False}), [0.0, 0.0, 0.0])
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlicesDifferentTypes(self):
with self.cached_session():
values = constant_op.constant([10])
i_32 = ops.convert_to_tensor([0], name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor([0], name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values
ind = r.indices
self.assertAllEqual([11], val)
self.assertAllEqual([0], ind)
self.assertTrue(ind.dtype == np.int64)
@test_util.run_v1_only("b/120545219")
def testCondColocation(self):
with self.session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = self.evaluate(r)
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
# TODO(b/116526896): Enable GPU tests.
# self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.cached_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = self.evaluate(r)
self.assertAllEqual(9, result)
def testCond_3(self):
with self.cached_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = self.evaluate(r)
self.assertAllEqual(12, result)
@test_util.run_in_graph_and_eager_modes
def testCondPruning(self):
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
def f():
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertEqual(len(r), 2)
return r[1]
f_defun = eager_function.defun(f)
if not context.executing_eagerly():
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(f())
self.assertEqual(True, result)
# Only second cond result was fetched, so v1 assign shouldn't run.
self.assertEqual(7, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
result = f_defun()
self.assertEqual(True, self.evaluate(result))
# Both v1 and v2 branch assignments should be run in defun.
self.assertEqual(1, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
def testCond_5(self):
with self.cached_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, self.evaluate(count))
@test_util.run_v1_only("b/120545219")
def testCond_6(self):
with self.cached_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(r)
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], self.evaluate(r))
@parameterized.parameters(dtypes.float32, dtypes.float64)
@test_util.run_v1_only("Uses tf.gradients")
def testCondResourceGrad(self, dtype):
init = constant_op.constant([7.], dtype=dtype)
v1 = variables.Variable(init)
age = constant_op.constant(3., dtype=dtype)
pred = math_ops.greater(age, 4.)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, v1)[0]
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(grad, [1.])
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCond_Device(self):
x = constant_op.constant(-10.)
# True branch function defined outside of device scope
def true_fn():
return math_ops.exp(x)
with ops.device("CPU:0"):
r = control_flow_ops.cond(
constant_op.constant(True), true_fn, lambda: 0.)
self.assertIn("cpu", r.device.lower())
with session.Session() as sess:
options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(r, options=options, run_metadata=run_metadata)
# We expect that everything runs on CPU, even if GPU is available.
self.assertEqual(len(run_metadata.partition_graphs), 1)
def _count_matching_switch_nodes_on_device(self, run_metadata, device_str,
dtype):
# Returns the number of Switch nodes with type dtype placed on
# `device_str`.
device_graphs = [
g for g in run_metadata.partition_graphs
if device_str in g.node[0].device
]
self.assertLen(device_graphs, 1)
switch_nodes = [
n for n in device_graphs[0].node
if n.op == "Switch" and n.attr["T"].type == dtype.as_datatype_enum
]
return len(switch_nodes)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCondSwitchColocatedWithInputWhenInputExplicitlyPlacedOnCPU(self):
x = array_ops.placeholder(dtypes.float32)
# `arg` is used in the cond then branch so a Switch node is created for it.
# We test that the Switch node gets placed on the same device as `arg`.
# We force `arg` to be on CPU here.
with ops.device("CPU:0"):
arg = x + 10.
def true_fn():
with ops.device("CPU:0"):
return arg + 1
r = control_flow_ops.cond(constant_op.constant(True), true_fn, lambda: 0.)
with session.Session() as sess:
run_metadata = config_pb2.RunMetadata()
options = config_pb2.RunOptions(output_partition_graphs=True)
sess.run(
r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata)
self.assertLen(run_metadata.partition_graphs, 2)
# Check that the Switch for `arg` gets placed on CPU.
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "CPU",
dtypes.float32), 1)
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "GPU",
dtypes.float32), 0)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCondSwitchColocatedWithInputWhenInputPlacedOnCPU(self):
x = array_ops.placeholder(dtypes.float32)
# `arg` is used in the cond then branch so a Switch node is created for it.
# We test that the Switch node gets placed on the same device as `arg`.
# Since arg is a dataset (and only has a CPU kernel), it gets placed on CPU
# by placer.
arg = dataset_ops.Dataset.range(8)
def true_fn():
return cardinality.cardinality(arg)
r = control_flow_ops.cond(
constant_op.constant(True), true_fn,
lambda: constant_op.constant(0, dtypes.int64))
with session.Session() as sess:
run_metadata = config_pb2.RunMetadata()
options = config_pb2.RunOptions(output_partition_graphs=True)
sess.run(
r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata)
self.assertLen(run_metadata.partition_graphs, 2)
# Check that the Switch for `arg` gets placed on CPU.
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "CPU",
dtypes.variant), 1)
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "GPU",
dtypes.variant), 0)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCondSwitchColocatedWithInputWhenInputOnGPU(self):
x = array_ops.placeholder(dtypes.float32)
# `arg` is used in the cond then branch so a Switch node is created for it.
# We test that the Switch node gets placed on the same device as `arg`.
# Note: `arg` gets placed on GPU by default by the placer.
arg = x + 10.
def true_fn():
with ops.device("CPU:0"):
return arg + 1
r = control_flow_ops.cond(constant_op.constant(True), true_fn, lambda: 0.)
with session.Session() as sess:
run_metadata = config_pb2.RunMetadata()
options = config_pb2.RunOptions(output_partition_graphs=True)
sess.run(
r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 2)
# Check that the Switch for `arg` gets placed on GPU.
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "CPU",
dtypes.float32), 0)
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "GPU",
dtypes.float32), 1)
def testCondAccessTrueBranchTensorInFalseBranchRaises(self):
@def_function.function
def f():
c = constant_op.constant(1.)
inputs = {"c": c}
def true_fn(inputs):
inputs["c"] = array_ops.identity(inputs["c"], name="true_branch")
return inputs["c"]
def false_fn(inputs):
return array_ops.identity(inputs["c"])
pred = constant_op.constant(True)
return control_flow_ops.cond(
pred, lambda: true_fn(inputs), lambda: false_fn(inputs))
# This was needed for backwards compatibility with TF2 Estimators which
# rely on variable names.
prefix = "cond/" if context.executing_eagerly() else ""
with self.assertRaisesRegex(
ValueError,
"Tensor %strue_branch:0 in true_fn is accessed from false_fn." %
prefix):
f()
def testSwitchCaseAccessBranch1TensorInBranch4Raises(self):
@def_function.function
def f():
c = constant_op.constant(1.)
inputs = {"c": c}
def br1_fn(inputs):
inputs["c"] = array_ops.identity(inputs["c"], name="br1_identity")
return inputs["c"]
def br4_fn(inputs):
return array_ops.identity(inputs["c"])
def other_fn():
return array_ops.identity(c)
return control_flow_ops.switch_case(
constant_op.constant(2),
[other_fn, lambda: br1_fn(inputs), other_fn, other_fn,
lambda: br4_fn(inputs)])
# This was needed for backwards compatibility with TF2 Estimators which
# rely on variable names.
prefix = "switch_case/indexed_case/" if context.executing_eagerly() else ""
with self.assertRaisesRegex(
ValueError, "Tensor %sbr1_identity:0 in branch 1 is "
"accessed from branch 4." % prefix):
f()
def testCondListOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, y), math_ops.add(x, y)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertListEqual([210, 210], test_result)
def testTupleOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: (math_ops.add(x, y), math_ops.add(x, y))
fn2 = lambda: (y, y)
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertTupleEqual((210, 210), test_result)
def testDictOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)}
fn2 = lambda: {"a": y, "b": y}
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertDictEqual({"a": 210, "b": 210}, test_result)
def testEmbeddedListOutput(self):
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [[math_ops.add(x, y), math_ops.add(x, y)]]
fn2 = lambda: [[y, y]]
# Pass strict=True flag as cond_v2 allows for tensors to be
# in nested output structures as singletons
r = control_flow_ops.cond(pred, fn1, fn2, strict=True)
test_result = self.evaluate(r)
self.assertListEqual([[210, 210]], test_result)
def testEmbeddedTupleOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: ((math_ops.add(x, y), math_ops.add(x, y)))
fn2 = lambda: ((y, y))
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertTupleEqual(((210, 210)), test_result)
def testEmbeddedDictOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": {"c": math_ops.add(x, y)},
"b": {"d": math_ops.add(x, y)}}
fn2 = lambda: {"a": {"c": y},
"b": {"d": y}}
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertDictEqual({"a": {"c": 210}, "b": {"d": 210}}, test_result)
@test_util.run_v1_only("b/120545219")
def testCheckNestedOutputStruct(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)}
fn2 = lambda: {"c": y, "d": y}
v1_msg = "The two structures don't have the same nested structure"
v2_msg = ("true_fn and false_fn arguments to tf.cond must have the same "
"number, type, and overall structure of return values.")
with self.assertRaisesRegex(
TypeError if control_flow_util.ENABLE_CONTROL_FLOW_V2 else ValueError,
v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):
control_flow_ops.cond(pred, fn1, fn2)
@test_util.run_deprecated_v1
def testCondRef(self):
with self.cached_session():
x = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testCondWithControl(self):
with self.cached_session() as sess:
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
result = sess.run(r, feed_dict={control_holder: 5.})
self.assertEqual(5, result)
@test_util.run_v1_only("b/120545219")
def testUninitializedRefIdentity(self):
with self.cached_session() as sess:
v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops.ref_identity(v_f)
v_t_op = gen_array_ops.ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], self.evaluate(merged_op.output))
def testCondSwitchIdentity(self):
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testDisableLoweringSwitchMerge(self):
if test_util.is_gpu_available():
self.skipTest(
"Single threaded executor doesn't support partitioned graphs. "
"Skipping GPU test.")
# Make pred feedable to ensure we don't constant-fold it out.
run_opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata_no_lowering = config_pb2.RunMetadata()
run_metadata_with_lowering = config_pb2.RunMetadata()
config = opt_cfg(do_constant_folding=False)
pred = array_ops.placeholder_with_default(
constant_op.constant(True), shape=())
r = control_flow_ops.cond(pred, lambda: True, lambda: False)
with session.Session(config=config) as sess:
r_value = sess.run(
r, options=run_opts, run_metadata=run_metadata_with_lowering)
self.assertEqual(r_value, True)
# Use the single threaded executor, which disables control flow lowering.
config.experimental.executor_type = "SINGLE_THREADED_EXECUTOR"
with session.Session(config=config) as sess:
r_value = sess.run(
r, options=run_opts, run_metadata=run_metadata_no_lowering)
self.assertEqual(r_value, True)
self.assertTrue( # pylint: disable=g-complex-comprehension
any("switch" in ns.node_name
for dev_stat in run_metadata_with_lowering.step_stats.dev_stats
for ns in dev_stat.node_stats))
self.assertTrue( # pylint: disable=g-complex-comprehension
all("switch" not in ns.node_name
for dev_stat in run_metadata_no_lowering.step_stats.dev_stats
for ns in dev_stat.node_stats))
@test_util.run_v1_only("b/120545219")
def testCondGrad_1(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(1.0, self.evaluate(grad))
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testCondComputeGradAfterSessRunFails(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
def true_fn():
a = x * x
return a * a
def false_fn():
return x * x
r = control_flow_ops.cond(pred, true_fn, false_fn)
self.assertAllEqual(r, 10000.)
grad = gradients_impl.gradients(r, [x])[0]
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Connecting to invalid output 1 of source node cond which has 1 "
r"outputs. Try using "
"tf.compat.v1.experimental.output_all_intermediates\(True\)."):
self.evaluate(grad)
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testCondComputeGradAfterSessRun(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
def true_fn():
a = x * x
return a * a
def false_fn():
return x * x
r = control_flow_ops.cond(pred, true_fn, false_fn)
self.assertAllEqual(r, 10000.)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(grad, 4000.)
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testNestedCondComputeGradAfterSessRun(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
def true_fn():
def inner_true_fn():
a = x * x
return a * a
def inner_false_fn():
return x * x
return control_flow_ops.cond(
constant_op.constant(True), inner_true_fn, inner_false_fn)
def false_fn():
return x * x
r = control_flow_ops.cond(pred, true_fn, false_fn)
self.assertAllEqual(r, 10000.)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(grad, 4000.)
@test_util.run_deprecated_v1
def testCondGrad_2(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
@test_util.disable_control_flow_v2(
"b/110550782 (gradient w.r.t external variable)")
@test_util.run_deprecated_v1
def testCondGrad_3(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
ox = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
def fn1(x):
m = x * x
return gradients_impl.gradients(m, [ox])[0]
fn2 = lambda: math_ops.multiply(ox, 3.0)
y = math_ops.multiply(7.0, ox)
r = control_flow_ops.cond(pred, lambda: fn1(y), fn2)
self.assertAllEqual(980.0, r.eval(feed_dict={c: 1}))
self.assertAllEqual(30.0, r.eval(feed_dict={c: 3}))
@test_util.run_deprecated_v1
def testCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
allow_soft_placement=True)
with self.cached_session(use_gpu=True, config=config) as sess:
pred = array_ops.placeholder(dtypes.bool, [])
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
with ops.device("/cpu:0"):
z = control_flow_ops.cond(pred, lambda: x * y * 2.0, lambda: 2.0)
with ops.device("/cpu:1"):
grad = gradients_impl.gradients(z, x)[0]
with ops.device("/cpu:0"):
grad_grad = gradients_impl.gradients(grad, x)[0]
self.assertEqual(sess.run(grad, {pred: True, x: 1.0, y: 2.0}), 4.0)
self.assertEqual(sess.run(grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
# v1 control flow gets None second derivative for some reason.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertIsNone(grad_grad)
return
self.assertEqual(sess.run(grad_grad, {pred: True, x: 1.0, y: 2.0}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
@test_util.run_v1_only("b/120545219")
def testNestedCond_Simple(self):
with self.cached_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, self.evaluate(result))
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, self.evaluate(result))
@test_util.run_v1_only("b/120545219")
def testCondGrad_Gather(self):
with self.cached_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
# The following `grad` is a Tensor since it is the aggregation of an
# IndexedSlice and a Tensor. It is an `IndexedSlices` with control flow
# v2.
grad = gradients_impl.gradients(r, [v1])[0]
self.evaluate(variables.global_variables_initializer())
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertIsInstance(grad, ops.IndexedSlices)
grad_value = sess.run(grad, feed_dict={c: 1})
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_value), [1.0, 1.0])
grad_value = sess.run(grad, feed_dict={c: 3})
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_value), [0.0, 2.0])
@test_util.run_deprecated_v1
def testCondGrad_ResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the
# ResourceVariable.sparse_read gradient function returns IndexedSlices.
var = resource_variable_ops.ResourceVariable(
np.ones((4, 2), dtype=np.float32))
x = constant_op.constant(1.0)
r = control_flow_ops.cond(
constant_op.constant(True),
lambda: x * math_ops.reduce_sum(var.sparse_read([1, 2])),
lambda: constant_op.constant(np.zeros((2, 3)),
dtype=dtypes.float32))
grad = gradients_impl.gradients(r, var)[0]
self.evaluate(variables.global_variables_initializer())
grad_val = self.evaluate(grad)
self.assertIsInstance(grad_val, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_val), [[0., 0.],
[1., 1.],
[1., 1.],
[0., 0.]])
def testCondGrad_MultiGather(self):
# NOTE(skyewm): this test is interesting because the array_ops.gather and
# ResourceVariable.sparse_read gradient functions returns IndexedSlices.
var = resource_variable_ops.ResourceVariable(
np.ones((4, 2), dtype=np.float32))
x1 = constant_op.constant(np.ones((3, 3), dtype=np.float32))
x2 = constant_op.constant(2.0)
def true_fn():
y1 = var.sparse_read([1, 2])
y2 = array_ops.gather(x1, [2]) * x2
y3 = x2 * [1., 1., 1.]
return y1, y2, y3
def false_fn():
y1 = np.zeros((2, 2), dtype=np.float32)
y2 = array_ops.gather(x1, [2]) * x2
y3 = array_ops.gather(x1, [2])
return y1, y2, y3
@def_function.function
def foo():
r = control_flow_ops.cond(constant_op.constant(True), true_fn, false_fn)
return gradients_impl.gradients(r, [var, x1, x2])
grad = foo()
self.evaluate(variables.global_variables_initializer())
var_grad, x1_grad, x2_grad = self.evaluate(grad)
self.assertIsInstance(var_grad, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(var_grad), [[0., 0.],
[1., 1.],
[1., 1.],
[0., 0]])
self.assertIsInstance(x1_grad, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(x1_grad), [[0., 0., 0.],
[0., 0., 0.],
[2., 2., 2.]])
self.assertIsInstance(x1_grad, ops.IndexedSlicesValue)
self.assertEqual(gradient_checker_v2._to_numpy(x2_grad), 6.)
@test_util.run_v1_only("b/120545219")
def testCondPredicateTensor(self):
"""Regression test for lowering predicate from non-first output of an op."""
@eager_function.defun
def foo():
return constant_op.constant("foo"), constant_op.constant(True)
r = control_flow_ops.cond(foo()[1], lambda: 1.0, lambda: 2.0)
self.assertEqual(self.evaluate(r), 1.0)
@test_util.run_v1_only("Tests Session.run() pruning logic.")
def testCondFeedConstantPredicate(self):
with self.cached_session() as sess:
value = constant_op.constant(37.0)
predicate = constant_op.constant(True)
cond_output = control_flow_ops.cond(
predicate, lambda: constant_op.constant(0.0), lambda: value)
result = array_ops.identity(cond_output)
self.assertEqual(37.0, sess.run(result, feed_dict={predicate: False}))
self.assertEqual(0.0, sess.run(result, feed_dict={predicate: True}))
self.assertEqual(0.0, sess.run(result))
@test_util.run_v1_only("Tests Session.run() pruning logic.")
def testCondFeedPlaceholderWithDefaultPredicate(self):
with self.cached_session() as sess:
value = constant_op.constant(37.0)
predicate = array_ops.placeholder_with_default(
constant_op.constant(True), [])
cond_output = control_flow_ops.cond(
predicate, lambda: constant_op.constant(0.0), lambda: value)
result = array_ops.identity(cond_output)
self.assertAllEqual(37.0, sess.run(result, feed_dict={predicate: False}))
self.assertAllEqual(0.0, sess.run(result, feed_dict={predicate: True}))
self.assertAllEqual(0.0, sess.run(result))
@test_util.run_in_graph_and_eager_modes
def testCondAutoControlDeps(self):
if test_util.is_gpu_available():
self.skipTest("b/128676188 causes OOM on opensource gpu tests")
print_prefix = "testCondAutoControlDeps: "
def branch_fn():
enqueue_print_op("A")
enqueue_print_op("B")
with ops.control_dependencies([enqueue_print_op("C")]):
return constant_op.constant(10)
def build_cond():
return control_flow_ops.cond(
constant_op.constant(True), branch_fn, lambda: 0)
def build_nested_cond():
return control_flow_ops.cond(
constant_op.constant(True), build_cond, lambda: 0)
# In v1 graph mode, pruning should make only "C" print.
if not context.executing_eagerly():
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_nested_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
# In defuns, all prints should execute in program order.
# This doesn't work with legacy control flow.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
@eager_function.defun
def cond():
return build_cond()
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(cond()), 10)
self.assertEqual(["A", "B", "C"],
filter_test_messages(printed.contents()))
@eager_function.defun
def nested_cond():
return build_nested_cond()
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(nested_cond()), 10)
self.assertEqual(["A", "B", "C"],
filter_test_messages(printed.contents()))
# wrap_function should prune.
def pruned_cond():
return build_cond()
pruned_cond = wrap_function.wrap_function(pruned_cond, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
def pruned_nested_cond():
return build_nested_cond()
pruned_nested_cond = wrap_function.wrap_function(pruned_nested_cond, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_nested_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
@test_util.run_in_graph_and_eager_modes
def testWhileAutoControlDeps(self):
# Legacy while_loop fails this test because it produces deprecation notices
# in stderr.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2: return
def cond(i, unused_x):
enqueue_print_op("A")
return i < 2
def body(i, x):
enqueue_print_op("B")
with ops.control_dependencies([enqueue_print_op("C")]):
x = array_ops.identity(x)
with ops.control_dependencies([enqueue_print_op("D")]):
return i + 1, x
def build_while():
return control_flow_ops.while_loop(
cond, body, [constant_op.constant(0), constant_op.constant(0)])
def build_nested_while():
return control_flow_ops.cond(
constant_op.constant(True), build_while, lambda: [0, 0])
# In v1 graph mode, pruning should make only "D" print.
if not context.executing_eagerly():
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_while()[0]), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_nested_while()[0]), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
# In defuns, all prints should execute in program order.
@eager_function.defun
def while_loop():
return build_while()[0]
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(while_loop()), 2)
self.assertEqual(["A", "B", "C", "D", "A", "B", "C", "D", "A"],
filter_test_messages(printed.contents()))
@eager_function.defun
def nested_while_loop():
return build_nested_while()[0]
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(nested_while_loop()), 2)
self.assertEqual(["A", "B", "C", "D", "A", "B", "C", "D", "A"],
filter_test_messages(printed.contents()))
# wrap_function should prune.
def pruned_while():
return build_while()[0]
pruned_while = wrap_function.wrap_function(pruned_while, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_while()), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
def pruned_nested_while():
return build_nested_while()[0]
pruned_nested_while = wrap_function.wrap_function(pruned_nested_while, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_nested_while()), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
# Microbenchmark: 256,000 iterations/s.
def testWhile_1(self):
with self.cached_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependencies(self):
with self.cached_session():
v = variables.Variable(0.0)
self.evaluate(v.initializer)
increment = v.assign_add(1.0).read_value()
def body_fn(i):
with ops.control_dependencies([increment]):
return i + 1
result = control_flow_ops.while_loop(cond=lambda i: i < 2,
body=body_fn, loop_vars=[1])
self.assertAllEqual(result, 2)
self.assertAllEqual(v.read_value(), 1.0)
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependenciesNoInput(self):
with self.cached_session():
v = variables.Variable(0.0)
self.evaluate(v.initializer)
# TODO(apassos): figure out why the reading is necessary here.
increment = v.assign_add(1.0).read_value()
def body_fn(unused_i):
with ops.control_dependencies([increment]):
return constant_op.constant(5, name="five")
result = control_flow_ops.while_loop(cond=lambda i: i < 5,
body=body_fn, loop_vars=[0])
self.evaluate(result)
self.assertAllEqual(self.evaluate(v), 1.0)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefs_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops.ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = self.evaluate(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.cached_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, self.evaluate(r))
def testWhileWithMaximumIterations(self):
with self.cached_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)
self.assertEqual(1, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testXLAGradInLoop(self):
# We have an optimization that moves certain reduction ops, this test makes
# sure we don't do that for XLA ops.
# Use dynamic inputs, which triggers the creation of "BroadcastGradientArgs"
# and "Shape" op.
input1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
input2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
def cond(i1, i2):
return False
def body(i1, i2):
return math_ops.add(i1, i2), math_ops.add(i1, i2)
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
out1, _ = control_flow_ops.while_loop(
cond, body, (input1, input2), maximum_iterations=2)
g = gradients_impl.gradients(out1, [input1])
for op in out1.graph.get_operations():
# Test that the "Shape" is directly passed to BroadcastGradientArgs
# instead of being pushed to the stack.
if op.type == "BroadcastGradientArgs":
self.assertEqual(op.inputs[0].op.type, "Shape")
self.assertEqual(op.inputs[1].op.type, "Shape")
xla_context.Exit()
@test_util.disable_control_flow_v2("b/115776323 (max_iters)")
@test_util.run_v1_only("b/120545219")
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def training_loop_with_gradient(i):
out = control_flow_ops.while_loop(
lambda i_, _: i_ < 3,
lambda i_, j: [i_ + 1, j * v], [0, 1.0],
maximum_iterations=i)
g = gradients_impl.gradients(out, v)
with ops.control_dependencies(g):
return i + 1
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
# Create training loop, ensure we can call gradient() of
# while_loop inside the training loop.
loop = control_flow_ops.while_loop(lambda i: i < 3,
training_loop_with_gradient, [0])
xla_context.Exit()
loop_execute = array_ops.identity(loop) # Because loop is not fetchable.
# Should execute without issue.
self.assertEqual(3, self.evaluate(loop_execute))
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.skipTest("WhileV2 does lazy evaluation of maximum_iterations")
v = constant_op.constant(1.0)
def inner_body(i, x):
out = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, j: [i + 1, j * v], [0, x],
maximum_iterations=i)
return out
def create_while_loop(maximum_iterations=None):
return control_flow_ops.while_loop(
lambda i, _: i < 3,
inner_body, [0, 1.0],
maximum_iterations=maximum_iterations)
loop_no_xla = create_while_loop(maximum_iterations=5)
# maximum_iterations is fine outside of an XLA scope
gs = gradients_impl.gradients(loop_no_xla, v)
self.evaluate(gs) # This should execute without error.
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop_no_maxiter = create_while_loop()
loop_with_maxiter = create_while_loop(maximum_iterations=2)
xla_context.Exit()
with self.assertRaisesRegex(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside "
r"XLA while_loop because maximum_iterations was not passed to "
r"the tf.while_loop call \('.+'\)."):
_ = gradients_impl.gradients(loop_no_maxiter, v)
with self.assertRaisesRegex(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.+' for while_loop context "
r"'.+' must be statically known \(e.g. a constant value or known "
r"shape dimension\), or be defined at or outside the while loop "
r"context '.*' \(currently defined in '.*'\)"):
_ = gradients_impl.gradients(loop_with_maxiter, v)
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):
v = constant_op.constant(1.0)
def create_while_loop():
max_iter_holder = []
def create_mi():
max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=()))
return 1.0
_ = control_flow_ops.cond(
constant_op.constant(True), create_mi, create_mi)
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=max_iter_holder[0])
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
with self.assertRaisesRegex(ValueError, r"must be from the same graph.*"):
loop = create_while_loop()
xla_context.Exit()
else:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop = create_while_loop()
xla_context.Exit()
with self.assertRaisesRegex(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.*Placeholder:0' for "
r"while_loop context '.+' must be statically known \(e.g. a constant "
r"value or known shape dimension\), or be defined at or outside the "
r"while loop context '' \(currently defined in 'cond/.+'\)"):
_ = gradients_impl.gradients(loop, v)
@test_util.run_v1_only("b/120545219")
def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):
if test_util.is_gpu_available():
self.skipTest("b/128646372, b/128645947 fails in opensource build")
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
with ops.device("/cpu:0"):
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
final_with_xla_context = create_while_loop()
xla_context.Exit()
final_without_xla_context = create_while_loop()
with self.session(use_gpu=False) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata_without_xla_context = config_pb2.RunMetadata()
run_metadata = config_pb2.RunMetadata()
final_value_without_xla_context = sess.run(
final_without_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata_without_xla_context)
final_value_with_xla_context = sess.run(
final_with_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata)
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
# With while_v2 on xla, run_metadata only contains the unlowered While
# op so node_stats does not have statistics for the pushes. So as a
# loose check we check the pushes in the lowered version.
for dev in run_metadata_without_xla_context.step_stats.dev_stats:
if "/device:CPU" in dev.device:
node_stats = dev.node_stats
stack_push_count = len([
x for x in node_stats
if re.match(r".*TensorListPushBack_?\d*", x.node_name)
])
else:
for dev in run_metadata.step_stats.dev_stats:
if "/device:CPU" in dev.device:
node_stats = dev.node_stats
stack_push_op = "StackPushV2"
stack_push_count = len(
[x for x in node_stats if x.node_name.endswith("StackPushV2")])
# Pushes to the stack = product of maximum_iterations values;
# the last two "3"s comes from size(p), when p == [0, 0, 0].
self.assertEqual(stack_push_count, 5 * 3 * 3, str(node_stats))
self.assertAllClose(final_value_with_xla_context,
final_value_without_xla_context)
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
@test_util.run_deprecated_v1
def testWhile_3(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3]
self.assertAllEqual(10100, result)
@test_util.run_deprecated_v1
def testWhile_4(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3]
self.assertAllEqual(42, result)
@test_util.run_v1_only("b/120545219")
def testWhile_5(self):
with self.cached_session():
def compute(i, c, o):
c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),
compute, [i, c, o], [
i.get_shape(),
tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2]
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testWhile_Device(self):
# Body function defined outside of device scope
def body(x):
return math_ops.exp(x)
with ops.device("CPU:0"):
r = control_flow_ops.while_loop(
lambda x: x < 10, body, [constant_op.constant(-10.)])
self.assertIn("cpu", r.device.lower())
with session.Session() as sess:
options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(r, options=options, run_metadata=run_metadata)
# We expect that everything runs on CPU, even if GPU is available.
self.assertEqual(len(run_metadata.partition_graphs), 1)
@test_util.disable_control_flow_v2("b/116338794 (buffer_reuse)")
@test_util.run_v1_only("b/120545219")
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.cached_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_2(self):
self._testWhile_Gpu_2(use_gpu=False)
self._testWhile_Gpu_2(use_gpu=True)
def testWhileShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m],
[i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), self.evaluate(r))
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShape(self):
x = constant_op.constant([2.0, 4.0], name="values")
i = constant_op.constant(0)
c = lambda i, _: math_ops.less(i, 10)
b = lambda i, x: [i + 1, x + 1]
with self.assertRaisesRegex(ValueError, "is not compatible with"):
# Shape of x is [2], but we specify a shape of [5].
control_flow_ops.while_loop(
c, b, [i, x], [i.shape, tensor_shape.TensorShape([5])])
@test_util.run_in_graph_and_eager_modes
def testWhileBadBodyReturn(self):
x = constant_op.constant([2.0, 4.0], name="values")
i = constant_op.constant(0)
c = lambda i, *x: math_ops.less(i, 10)
# body accepts N values and returns N+1 values.
b = lambda i, *x: (i, i) + x
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
control_flow_ops.while_loop(c, b, [i, x])
@test_util.run_deprecated_v1
def testWhileWithNonTensorInput_Scalar(self):
with self.cached_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, self.evaluate(r))
def testWhileWithNonTensorInput_Vector(self):
with self.cached_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], self.evaluate(r))
def testWhileShapeInference(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].shape.is_compatible_with([8, 2]))
@test_util.run_v1_only("b/120545219")
def testWhileShapeInferenceBadShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
b = lambda i, j: [i + 1, array_ops.concat([j, j], 0)]
with self.assertRaisesRegex(
ValueError,
r"Input tensor 'ones:0' enters the loop with shape \(2, 2\), but has "
r"shape \(4, 2\) after one iteration. To allow the shape to vary "
r"across iterations, use the `shape_invariants` argument of "
r"tf.while_loop to specify a less-specific shape."):
control_flow_ops.while_loop(c, b, [i, m])
def testWhileShapeInferenceSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b1(i, x): # modifies values. (shape of components is not changed.)
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
def b2(i, x): # adds new values. (shape of components is changed.)
return [
i + 1,
sparse_ops.sparse_add(
x,
sparse_tensor.SparseTensor(
indices=math_ops.cast(
array_ops.fill([1, 1], i), dtypes.int64),
values=array_ops.fill([1], 1.0),
dense_shape=x.dense_shape))
]
def b3(i, x): # modifies rank. (shape of all components is changed.)
return [
i + 1,
sparse_tensor.SparseTensor(
array_ops.concat([x.indices, [[i], [i]]], axis=1), x.values * 2.0,
array_ops.concat([x.dense_shape, [10]], axis=0))
]
def check_shapes(r, indices, values, dense_shape):
self.assertTrue(r.indices.shape.is_compatible_with(indices))
self.assertTrue(r.values.shape.is_compatible_with(values))
self.assertTrue(r.dense_shape.shape.is_compatible_with(dense_shape))
# Default shape invariant; b1 only modifies values.
_, r = control_flow_ops.while_loop(c, b1, [i, x])
check_shapes(r, indices=[None, 1], values=[None], dense_shape=[1])
# Default shape invariant; b2 adds new values
_, r = control_flow_ops.while_loop(c, b2, [i, x])
check_shapes(r, indices=[None, 1], values=[None], dense_shape=[1])
# Explicit shape invariant, allowing any rank; b1 only modifies values.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
# Explicit shape invariant, allowing any rank; b3 modifies rank.
_, r = control_flow_ops.while_loop(
c, b3, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
# Shape invariant with ndims=None. Technically, this isn't supported
# according to the docs, but we support it for backwards compatibility.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape(None)])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
_, r = control_flow_ops.while_loop(
c, b3, [i, x],
[i.get_shape(), tensor_shape.TensorShape(None)])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShapeSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
c = lambda i, _: i < 10
b1 = lambda i, x: [i+1, x]
def b2(i, x): # modifies rank. (shape of all components is changed.)
return [
i + 1,
sparse_tensor.SparseTensor(
array_ops.concat([x.indices, [[i], [i]]], axis=1), x.values * 2.0,
array_ops.concat([x.dense_shape, [10]], axis=0))
]
# Explicit shape invariant, with a specific (incompatible) rank.
with self.assertRaisesRegex(ValueError, "is not compatible with"):
control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
# Default shape invariant, but b2 modifies rank (which is not allowed).
with self.assertRaises(ValueError):
control_flow_ops.while_loop(c, b2, [i, x])
def testWhileShapeInferenceIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0], 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0], 2)
self.assertTrue(r.values.get_shape().is_compatible_with([None, 2]))
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShapeIndexedSlices(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
c = lambda i, _: 10
b = lambda i, x: [i+1, x]
# Explicit shape invariant, with a specific (incompatible) rank.
with self.assertRaisesRegex(ValueError, "is not compatible with"):
control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
def testWhileShapeInferenceRaggedTensor(self):
i = constant_op.constant(0)
x = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])
c = lambda i, _: i < 10
def b1(i, x): # Adds new values to rows (but doesn't create new rows)
return [
i + 1,
array_ops.concat([x, x], axis=1)
]
def b2(i, x): # Adds new rows.
return [
i + 1,
array_ops.concat([x, x], axis=0)
]
def check_shapes(r, values, splits):
self.assertTrue(r.values.shape.is_compatible_with(values))
self.assertTrue(r.row_splits.shape.is_compatible_with(splits))
# Default shape invariant; b1 adds new values to rows.
_, r = control_flow_ops.while_loop(c, b1, [i, x])
check_shapes(r, values=[None], splits=[4])
# Default shape invariant; b2 adds new rows (not allowed).
if not context.executing_eagerly():
with self.assertRaises(ValueError):
_, r = control_flow_ops.while_loop(c, b2, [i, x])
# Explicit shape invariant; b1 adds new values to rows.
# (deprecated: use TensorShape instead of RaggedTensorSpec)
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, None])])
check_shapes(r, values=[None], splits=[None])
# Explicit shape invariant; b1 adds new values to rows.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), ragged_tensor.RaggedTensorSpec([None, None],
dtypes.int32)])
check_shapes(r, values=[None], splits=[None])
# Explicit shape invariant; b2 adds new rows.
_, r = control_flow_ops.while_loop(
c, b2, [i, x],
[i.get_shape(), ragged_tensor.RaggedTensorSpec([None, None],
dtypes.int32)])
check_shapes(r, values=[None], splits=[None])
def testWhileShapeInferenceRaggedTensorRaggedRank2(self):
i = constant_op.constant(0)
x = ragged_factory_ops.constant([[[1, 2], [3], [4, 5, 6]],
[[], [8, 9, 10]]])
c = lambda i, _: i < 10
def b(i, x):
return [
i + 1,
array_ops.concat([x, x[..., i:i+1]], axis=-1)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.row_splits.shape.as_list(), [3])
self.assertTrue(r.values.row_splits.shape.as_list() in ([6], [None]))
self.assertTrue(r.values.values.shape.as_list() in ([49], [None]))
def testWhileShapeInvariantTensorSpec(self):
i = constant_op.constant(0)
x = constant_op.constant([1])
c = lambda i, _: i < 10
b = lambda i, x: (i + 1, array_ops.stack([x, x]))
shape_invariants = [
tensor_spec.TensorSpec([], dtype=dtypes.int32),
tensor_spec.TensorSpec(None, dtype=dtypes.int32)]
control_flow_ops.while_loop(c, b, [i, x], shape_invariants)
# TODO(b/131265085) Remove this decorator when bug is fixed.
@test_util.build_as_function_and_v1_graph
def testWhileShapeInvariantWrongTypeSpecType(self):
c = lambda i, _: i < 10
b = lambda i, x: (i + 1, x)
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor([[0]], [1.0], [10])
shape_invariants = [
tensor_spec.TensorSpec([], dtype=dtypes.int32),
sparse_tensor.SparseTensorSpec([None])]
control_flow_ops.while_loop(c, b, [i, x], shape_invariants)
x2 = constant_op.constant([1])
with self.assertRaises(TypeError):
control_flow_ops.while_loop(c, b, [i, x2], shape_invariants)
x3 = ragged_factory_ops.constant([[1, 2], [3]])
with self.assertRaises(TypeError):
control_flow_ops.while_loop(c, b, [i, x3], shape_invariants)
i2 = constant_op.constant(0.0)
with self.assertRaises(TypeError):
control_flow_ops.while_loop(c, b, [i2, x], shape_invariants)
# TODO(b/131265085) Remove this decorator when bug is fixed.
@test_util.build_as_function_and_v1_graph
def testWhileShapeInvariantBadType(self):
i = constant_op.constant(0)
x = constant_op.constant([1])
c = lambda i, _: i < 10
b = lambda i, x: (i + 1, x)
with self.assertRaises((ValueError, TypeError)):
control_flow_ops.while_loop(c, b, [i, x], ["foo", "bar"])
def _testNestedWhile_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, self.evaluate(r))
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.cached_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, self.evaluate(r))
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_1(self):
with self.cached_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1])
@test_util.run_deprecated_v1
def testWhileWithControl_2(self):
with self.cached_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, self.evaluate(res))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_3(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_4(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_5(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.cached_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, self.evaluate(loop))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondWithControl_1(self):
with self.cached_session():
v = variable_scope.get_variable(
"v", [], initializer=init_ops.constant_initializer(2))
i0 = constant_op.constant(0)
with ops.control_dependencies([i0]):
def loop_condition(i):
return i < 4
def loop_body(i):
some_cond = control_flow_ops.cond(
constant_op.constant(True),
lambda: state_ops.assign(v, math_ops.square(v)), lambda: v)
with ops.control_dependencies([some_cond]):
return i + 1
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(4, self.evaluate(r))
self.assertAllClose(65536.0, self.evaluate(v))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondExitControl(self):
with self.cached_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(6.0, self.evaluate(r))
self.assertEqual(99, self.evaluate(v))
def testCondWhile_1(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, self.evaluate(r))
def testCondWhile_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, self.evaluate(r))
def _testCondWhile_3(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10., sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
@test_util.run_deprecated_v1
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
with self.cached_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_3(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
allow_soft_placement=True)
with self.cached_session(use_gpu=True, config=config) as sess:
pred = array_ops.placeholder(dtypes.bool, [])
x_init = constant_op.constant(1.0)
with ops.device("/cpu:0"):
z = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, control_flow_ops.cond(
pred, lambda: x * 2.0, lambda: 10.0)),
[0, x_init])
with ops.device("/cpu:1"):
grad = gradients_impl.gradients(z, x_init)[0]
with ops.device("/cpu:0"):
grad_grad = gradients_impl.gradients(grad, x_init)[0]
self.assertEqual(sess.run(grad, {pred: True}), 8.0)
self.assertEqual(sess.run(grad, {pred: False}), 0.0)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
return
self.assertEqual(sess.run(grad_grad, {pred: True}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False}), 0.0)
# NOTE: It is ok to have parallel_iterations > 1
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1
def testWhileUpdateVariable_1(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3, self.evaluate(r))
result = self.evaluate(select)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_2(self):
with self.cached_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3, self.evaluate(r))
result1 = self.evaluate(select1)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = self.evaluate(select2)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_3(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
result = r[1]
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_4(self):
with self.cached_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
self.evaluate(variables.global_variables_initializer())
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_5(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
self.evaluate(variables.global_variables_initializer())
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_a))
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_6(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
self.evaluate(variables.global_variables_initializer())
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(55, self.evaluate(var_b))
self.assertEqual(10, self.evaluate(var_a))
@test_util.run_v1_only("b/120545219")
def testWhileQueue_1(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], self.evaluate(r))
for i in xrange(10):
self.assertEqual([i], self.evaluate(q.dequeue()))
@test_util.run_v1_only("b/120545219")
def testWhileTimeOut(self):
run_options = config_pb2.RunOptions(timeout_in_ms=1)
with self.cached_session() as sess:
n = constant_op.constant(0)
c = lambda x: True
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(r, options=run_options)
@test_util.disable_control_flow_v2("b/117119329 (stack)")
@test_util.run_v1_only("b/120545219")
def testWhileStack_1(self):
with self.cached_session():
s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops.stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x],
[r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, self.evaluate(rx))
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
) else "/device:CPU:0"
graph = ops.Graph()
with graph.as_default():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = graph.get_operations()
r_devices = [(op.name, op.device) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_dev_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_dev_name in dev)
else:
self.assertFalse(gpu_dev_name in dev)
with self.session(graph=graph) as sess:
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/116351701 (colocation)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Square(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Shape(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v],
[n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
@test_util.run_deprecated_v1
def testWhileGrad_BaseShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testWhileGradAfterSessionRun(self):
v0 = constant_op.constant(2.)
r = control_flow_ops.while_loop(
lambda _: True, lambda v: v * v, [v0], maximum_iterations=3)
self.assertAllEqual(r, 256.)
grad = gradients_impl.gradients(r, v0)[0]
self.assertAllClose(grad, 1024.)
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testNestedWhileGradAfterSessionRun(self):
v0 = constant_op.constant(2.)
def body(v):
inner_v0 = constant_op.constant(1.)
return control_flow_ops.while_loop(
lambda _: True, lambda x: x * v, [inner_v0], maximum_iterations=2)
r = control_flow_ops.while_loop(
lambda _: True, body, [v0], maximum_iterations=3)
self.assertAllEqual(r, 256.)
grad = gradients_impl.gradients(r, v0)[0]
self.assertAllClose(grad, 1024.)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_MultipleUses(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_LoopAdd(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, self.evaluate(r))
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.cached_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = self.evaluate([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
@test_util.run_deprecated_v1
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def testWhileGradInControlDeps(self):
@def_function.function
def f():
x_init = constant_op.constant(2.)
loop_cond = lambda i, x: math_ops.less(i, 2)
loop_body = lambda i, x: [i + 1, x**2]
_, x = control_flow_ops.while_loop(loop_cond, loop_body, [0, x_init])
with ops.control_dependencies([x]):
(grad,) = gradients_impl.gradients(x, x_init)
return grad
self.assertAllEqual(f(), 4. * 2.**3) # 4 * x_init ^ 3
@test_util.run_deprecated_v1
def testTfFunctionInV1WhileLoop(self):
# This test specifically tests that creating a Const node inside a
# tf.function inside a v1 while_loop while inlining is turned on works.
config = opt_cfg()
assert config.graph_options.optimizer_options.do_function_inlining
with session.Session(config=config):
@def_function.function
def loop_body(i):
# Here we create the const.
return i + 1.
loop_cond = lambda i: True
x = control_flow_ops.while_loop(
loop_cond, loop_body, [0.], maximum_iterations=5)
self.assertAllEqual(x, 5.)
def _testNestedWhileCondWhileGrad(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
@test_util.run_deprecated_v1
def testNestedWhileCondWhileGradGpu(self):
self._testNestedWhileCondWhileGrad(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Variable(self):
with self.cached_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(216.0, r[0])
@test_util.run_deprecated_v1
def testWhileGrad_ResourceVariable(self):
with self.cached_session():
a = resource_variable_ops.ResourceVariable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
g = gradients_impl.gradients(r, a)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(216.0, g[0])
def testWhileGrad_EagerResourceVariable(self):
with context.eager_mode():
a = resource_variable_ops.ResourceVariable(
np.ones([2, 2], dtype=np.float32))
v = constant_op.constant(1.0)
@eager_function.defun
def fn():
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, x * math_ops.reduce_sum(a) * v),
[0, 1.0])[1]
return gradients_impl.gradients(r, [v])[0]
self.assertEqual(self.evaluate(fn()), 32.)
def testWhileGrad_ResourceVarInFunctionCall(self):
@def_function.function
def foo(x, var):
return x + math_ops.reduce_sum(var.sparse_read([1, 3]))
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 2., 3., 4.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 2., 0., 2.])
def testWhileGrad_ResourceVarInNestedFunctionCall(self):
@def_function.function
def foo(x, var):
return x + math_ops.reduce_sum(var.sparse_read([1, 3]))
@def_function.function
def foo2(x, var):
return foo(x, var)
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo2(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 1., 1., 1.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 2., 0., 2.])
def testWhileGrad_ResourceVarInLoopInFunctionCall(self):
if test.is_gpu_available():
self.skipTest("b/128635252")
@def_function.function
def foo(x, var):
return control_flow_ops.while_loop(
lambda j, _: j < 3,
lambda j, y: (j + 1,
y + math_ops.reduce_sum(var.sparse_read([1, 2]))),
[0, x])[1]
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 1., 1., 1.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 6., 6., 0.])
def testWhileCondGrad_ResourceVarInFunctionCall(self):
@def_function.function
def foo(x, var):
return x + var.sparse_read([1])[0]
def body(i, x):
return (i + 1, control_flow_ops.cond(
math_ops.equal(i % 2, 0),
lambda: foo(x, var1),
lambda: foo(x, var2)))
@def_function.function
def bar(var1, var2):
r = control_flow_ops.while_loop(
lambda i, _: i < 4, body, [0, 0.0])
return gradients_impl.gradients(r, [var1, var2])
var1 = resource_variable_ops.ResourceVariable([1., 2., 3.])
var2 = resource_variable_ops.ResourceVariable([4., 5.])
self.evaluate(variables.global_variables_initializer())
grads = self.evaluate(bar(var1, var2))
self.assertAllEqual(gradient_checker_v2._to_numpy(grads[0]), [0., 2., 0.])
self.assertAllEqual(gradient_checker_v2._to_numpy(grads[1]), [0., 2.])
@test_util.run_deprecated_v1
def testWhileGrad_ResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the gradient is the
# aggregation result of IndexedSlices and Tensors.
var = resource_variable_ops.ResourceVariable(np.ones(5),
dtype=dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, x * math_ops.reduce_sum(var.sparse_read([1, 3]))),
[0, constant_op.constant(1.0)])[1]
grad = gradients_impl.gradients(r, var)[0]
self.evaluate(variables.global_variables_initializer())
grad_val = self.evaluate(grad)
arr = gradient_checker_v2._to_numpy(grad_val)
self.assertAllEqual(arr, [0., 12., 0., 12., 0.])
@test_util.run_deprecated_v1
def testWhileGrad_MultiResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the gradient is the
# aggregation result of IndexedSlices and Tensors.
var1 = resource_variable_ops.ResourceVariable(np.ones(5),
dtype=dtypes.float32)
var2 = resource_variable_ops.ResourceVariable(np.ones(3),
dtype=dtypes.float32)
x1_init = constant_op.constant([0., 0.])
x2_init = constant_op.constant(1.)
x3_init = constant_op.constant(1.)
def body(i, unused_x1, x2, x3):
y1 = var1.sparse_read([1, 3])
y2 = x2 * 2
y3 = x3 * math_ops.reduce_sum(var2.sparse_read([0]))
return i + 1, y1, y2, y3
r = control_flow_ops.while_loop(
lambda i, x1, x2, x3: i < 3, body,
[0, x1_init, x2_init, x3_init])[1:]
var1_grad, var2_grad = gradients_impl.gradients(r, [var1, var2])
self.evaluate(variables.global_variables_initializer())
var1_grad_val = self.evaluate(var1_grad)
var2_grad_val = self.evaluate(var2_grad)
self.assertAllEqual(gradient_checker_v2._to_numpy(var1_grad_val),
[0., 1., 0., 1., 0.])
self.assertAllEqual(gradient_checker_v2._to_numpy(var2_grad_val),
[3., 0., 0.])
def testWhileGrad_Gather(self):
# NOTE(skyewm): this test is interesting because the gather gradient
# function returns an IndexedSlices.
@tf_function_in_tf2
def fn():
x = constant_op.constant([1., 1., 1., 1., 1.])
y = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, x + array_ops.gather(x, [0])),
[0, x[:1]])[1]
z = y * 3.0
grad = gradients_impl.gradients(z, x)[0]
return y, grad
y, grad = fn()
self.assertEqual(self.evaluate(y), 8.)
self.assertAllEqual(self.evaluate(grad), [24., 0., 0., 0., 0.])
def testWhileGrad_GatherNoFanOut(self):
# NOTE(skyewm): this test is interesting because the gather gradient
# function returns an IndexedSlices.
@tf_function_in_tf2
def fn():
x = constant_op.constant([1., 1., 1., 1., 1.])
y = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, array_ops.gather(x, [0])),
[0, x[:1]])[1]
z = y * 3.0
grad = gradients_impl.gradients(z, x)[0]
return y, grad
y, grad = fn()
self.assertEqual(self.evaluate(y), 1.)
self.assertAllEqual(self.evaluate(grad), [3., 0., 0., 0., 0.])
@test_util.run_v1_only("b/120545219")
def testWhileGradInCond(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)[0]
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.disable_control_flow_v2("b/116340060")
@test_util.run_v1_only("b/120545219")
def testGradInWhileWrtInitialLoopVal(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=(), name="x")
y = x + 1
def body(i, v):
z = v * 2
return i + 1, gradients_impl.gradients(z, x)[0]
with self.assertRaisesRegex(
ValueError,
"Cannot compute gradient inside while loop with respect to op 'x'. "
"We do not support taking the gradient wrt or through the initial "
"value of a loop variable. Gradients can be computed through "
"loop invariants or wrt the input parameters to the loop body."):
control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])
@test_util.run_v1_only("b/120545219")
def testWhileGradInWhile(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.run_v1_only("b/120545219")
def testCondGradInNestedWhiles(self):
def outer_body(i, x):
_, x = control_flow_ops.while_loop(
lambda j, x: j < 3, inner_body, [0, 0.0])
return i + 1, x
def inner_body(j, x):
y = control_flow_ops.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x)
return j + 1, gradients_impl.gradients(y, x)[0]
i, x = control_flow_ops.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0])
with self.cached_session() as sess:
i_val, x_val = self.evaluate([i, x])
self.assertEqual(i_val, 3)
self.assertAllClose(x_val, 1.0)
@test_util.run_gpu_only
def testGpuResourceAccess(self):
with ops.device(test.gpu_device_name()):
var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0))
@def_function.function
def foo():
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, control_flow_ops.cond(
constant_op.constant(True),
lambda: x + var,
lambda: x)),
[0, 0.0])[1]
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(foo()), 9.0)
def testNestedResourceAccess(self):
var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0))
@eager_function.defun
def test_fn():
x = constant_op.constant(0.0)
r = control_flow_ops.while_loop(
# Outer loop condition
lambda i, y: i < 2,
# Outer loop body
lambda i, y: (i + 1, y + control_flow_ops.cond(
constant_op.constant(True),
# True branch
lambda: control_flow_ops.while_loop(
# Inner loop condition
lambda j, z: j < 3,
# Inner loop body
lambda j, z: (j + 1, z + math_ops.square(var)),
# Inner initial loop value
[0, y])[1],
# False branch
lambda: (0.0))),
# Outer initial loop value
[0, x])[1]
grad = gradients_impl.gradients(r, x)[0]
return r, grad
self.evaluate(variables.global_variables_initializer())
r, grad = self.evaluate(test_fn())
# 2 * 3 * 3^2
self.assertEqual(r, 81.0)
# v1 control flow gets the wrong answer!!!
# Gradient computation:
# f(x) = x + 3^2
# inner_loop(x) = f(f(f(x))) = x + 3*3^2 = x + 27
# g(x) = x + inner_loop(x) = 2x + 27
# outer_loop(x) = g(g(x)) = 4x + 81
# outer_loop'(x) = 4
# Note that v1 control flow gets 4.0 as well if the cond is removed.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(grad, 4.0)
def testWhile_NestedInput(self):
with self.cached_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
self.evaluate(r_flattened))
@test_util.run_v1_only("b/120545219")
def testWhile_NestedBadArityFails(self):
with self.cached_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegex(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ys_xs(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0])
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0])
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0])
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0])
@test_util.run_deprecated_v1
def testWhileGrad_Dependency(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0])
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoGradient(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0])
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoDependency(self):
with self.cached_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
duration = array_ops.zeros([], dtype=dtypes.int32)
def cond(duration, tensor, _):
del tensor
return duration < 10
def body(duration, tensor, _):
return (duration + 1, tensor, tensor)
loop_vars = [duration, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
@test_util.run_deprecated_v1
def testWhileGrad_Const(self):
with self.cached_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
duration = constant_op.constant(0, name="t")
def cond(duration, _):
return duration < 1
def body(duration, _):
return duration + 1, c1
loop_vars = [duration, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_SerialTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ParallelTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, self.evaluate(y_f_d)) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0,
self.evaluate(g)) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_SerialInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileGrad_ParallelInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.cached_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return map_fn.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return map_fn.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
self.evaluate(variables.global_variables_initializer())
self.evaluate(train_op)
self.assertAllClose(2.999, var.read_value())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
@test_util.run_deprecated_v1
def testWhileCondGrad_UnknownShape(self):
with self.cached_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
@test_util.run_deprecated_v1
def testWhileGrad_Concat(self):
with self.cached_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
self.assertAllClose([[0.98000002, 1.98000002]], self.evaluate(x))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefsWithGradients_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0.)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.float32_ref)
def body(i, x):
self.assertEqual(x.dtype, dtypes.float32_ref)
return [i + 1, gen_array_ops.ref_identity(x)]
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.VariableV1(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.float32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
@test_util.deprecated_graph_mode_only
def testWhileGrad_IndexedSlices(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.deprecated_graph_mode_only
def testWhileGrad_SparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.deprecated_graph_mode_only
def testCallGradInLoop(self):
with self.cached_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(
c, b, [i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, self.evaluate(output_grad)[1])
@test_util.run_deprecated_v1
def testWhileAndTensorArray(self):
with self.cached_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
map_fn.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileAndTensorArray(self):
n = constant_op.constant(3.0)
def Body(row, ta):
def InnerBody(row, col, ta):
# Note: row and col are 1-based.
ta = ta.write(
math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32), row * col)
return row, col + 1., ta
ta = control_flow_ops.while_loop(
lambda _, col, _1: col <= n,
InnerBody, [row, constant_op.constant(1.), ta],
return_same_structure=False)[2]
return row + 1., ta
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=9)
ta = control_flow_ops.while_loop(
lambda row, _: row <= n,
Body, [constant_op.constant(1.), ta],
return_same_structure=False)[1]
output = array_ops.reshape(ta.stack(), [3, 3])
self.assertAllEqual(
self.evaluate(output), [[1., 2., 3.], [2., 4., 6.], [3., 6., 9.]])
# TODO(b/117675481): This does not work with current TA. Enable with new TA.
# grad = gradients_impl.gradients(output, [n])
# self.assertEqual(self.evaluate(grad), 3.5)
@test_util.run_deprecated_v1
def testWhileGrad_StopGrad(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, self.evaluate(r))
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileGrad_StopGradInside(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, self.evaluate(r))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileGrad_StopGradInsideNoShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
grad_y = gradients_impl.gradients(rx, y)[0]
grad_x = gradients_impl.gradients(rx, x)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(grad_y, feed_dict=feed_dict))
self.assertAllClose([156.0, 400.0], sess.run(grad_x, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any(name in op.name for op in all_ops))
@test_util.run_deprecated_v1
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
with self.assertRaisesRegex(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
@test_util.run_deprecated_v1
def testStopGradOnWhileGrad(self):
with self.cached_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1
def testWhileGradientWithNontrainablePath1(self):
q = variables.Variable([7., 8.])
def cond(_, y):
del y
return False
def body(x, _):
return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
self.evaluate(q.initializer)
self.assertAllClose([0., 0.], self.evaluate(dy_dq))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGradientWithNontrainablePath2(self):
q = variables.Variable([7., 8.])
def cond(_, y):
return math_ops.equal(y, 0.)
def body(x, _):
zero = constant_op.constant(0, dtype=dtypes.int64)
return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
self.evaluate(q.initializer)
self.assertAllClose([1., 1.], self.evaluate(dy_dq))
@test_util.run_v1_only("b/120545219")
def testIssue16504(self):
c = constant_op.constant(np.arange(100), dtype=dtypes.float32)
w = variables.Variable(
initial_value=np.ones(100), dtype=dtypes.float32) / 100
k = variables.Variable(0, dtype=dtypes.int32)
chg_w = constant_op.constant(np.inf, dtype=dtypes.float32)
def cond(k, _, chg_w):
return math_ops.logical_and(k < 10, chg_w > 1e-3)
def body(k, w, chg_w):
grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w)
w_n = w * math_ops.exp(-0.1 * grad)
w_n /= math_ops.reduce_sum(w_n)
chg_w = (
math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum(
math_ops.abs(w)))
return k + 1, w_n, chg_w
_, w, _ = control_flow_ops.while_loop(cond, body, [k, w, chg_w])
grad, = gradients_impl.gradients(w, c)
self.assertIsNotNone(grad)
@test_util.run_v1_only("b/120545219")
def testStopGradMultiFlows(self):
with self.cached_session():
def body(i, y, r):
x = variable_scope.get_variable(
"x",
shape=(),
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
y *= x
return [i + 1, y, r + math_ops.reduce_sum(y)]
i0 = constant_op.constant(0)
y0 = array_ops.ones(5)
r0 = constant_op.constant(0.0)
cond = lambda i, y, r: i < 1
_, _, r = control_flow_ops.while_loop(
cond, body, [i0, y0, r0], back_prop=True)
vars_ = variables.global_variables()
grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(result))
@test_util.run_v1_only("b/120545219")
def testOneValueCond(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
@test_util.run_deprecated_v1
def testExampleCond(self):
with self.cached_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
@test_util.run_v1_only("b/120545219")
def testCase(self):
with self.cached_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1, 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2, 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3, 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError("Input error:"):
self.evaluate(r4)
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5, -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6, 0)
@test_util.run_v1_only("b/120545219")
def testCaseSideEffects(self):
with self.cached_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, self.evaluate(r2))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, -1, 2])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, self.evaluate(r1))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, 1, -1])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, self.evaluate(r0))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [0, -1, -1])
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testOneOpCond(self):
with self.cached_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(v))
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, self.evaluate(v))
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithOpsDependencies(self):
with self.cached_session() as sess:
v = variables.VariableV1(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = self.evaluate([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
@test_util.run_v1_only("b/120545219")
def testWithTensorDependencies(self):
with self.cached_session():
v = variables.VariableV1(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v)
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, self.evaluate(c2_with_c1_dep))
# Ensure that 'v' is initialized
self.assertAllClose(0.0, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithIndexedSlicesDependencies(self):
with self.cached_session():
v = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(gather_v_at_1)
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]],
self.evaluate(gather_v_at_1_after_init))
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v))
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.VariableV1([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = variables.VariableV1([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.VariableV1([0.0], name="vdef")
with ops.device("/job:worker/device:GPU:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testGroup(self):
with self.cached_session() as sess:
v1 = variables.VariableV1([0.0])
v2 = variables.VariableV1([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = self.evaluate([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
@test_util.run_v1_only("b/120545219")
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
@test_util.run_deprecated_v1
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
@test_util.run_v1_only("b/120545219")
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.VariableV1(p1, validate_shape=False)
v2 = variables.VariableV1(p2, validate_shape=False)
v3 = variables.VariableV1(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.VariableV1([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.VariableV1(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
@test_util.run_deprecated_v1
def testRunLoopTensor(self):
with self.cached_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, self.evaluate(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
@test_util.run_v1_only("b/120545219")
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(self.evaluate(r[1]), 65536.0)
@test_util.run_v1_only("b/120545219")
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.cached_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()])
grad = gradients_impl.gradients(r, x)[0]
self.assertEqual(self.evaluate(r[1]), 65536.0)
self.assertEqual(self.evaluate(grad), 524288.0)
# while_v2 does not have stacks.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "StackV2"
]), 1)
@test_util.run_v1_only("b/120545219")
def testQIntSwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_qint = constant_op.constant(np.array([42]), dtypes.qint8)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.switch(constant_qint, cond)
result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testQIntRefSwitchMerge(self):
with self.cached_session(use_gpu=test.is_gpu_available()) as sess:
var_qint = gen_state_ops.variable(
shape=[1], dtype=dtypes.qint8, name="v", container="", shared_name="")
assign_op = state_ops.assign(
var_qint, constant_op.constant(np.array([42]), dtypes.qint8))
self.evaluate(assign_op)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.ref_switch(var_qint, cond)
result = control_flow_ops.ref_merge([v_f, v_t])
self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testUInt64SwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_uint64 = constant_op.constant(np.array([42]), dtypes.uint64)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.switch(constant_uint64, cond)
result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result)
def testSwitchEagerMode(self):
if not context.executing_eagerly():
return
input_data = [1, 2, 3, 4]
vf, vt = control_flow_ops.switch(input_data, False)
self.assertAllEqual(vf, input_data)
self.assertAllEqual(vt, [])
@test_util.run_deprecated_v1
def testQIntArgAndRet(self):
@function.Defun(dtypes.qint8)
def func(x):
return x
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
qint = constant_op.constant(np.array([42]), dtypes.qint8)
result = func(qint)
self.evaluate(result)
def testSparseIdentity(self):
st1 = sparse_tensor.SparseTensor([[0, 5]], ['x'], [10, 10])
st2 = control_flow_ops._Identity(st1)
self.assertAllEqual(st1.indices, st2.indices)
self.assertAllEqual(st1.values, st2.values)
self.assertAllEqual(st1.dense_shape, st2.dense_shape)
def testSparseEnterExit(self):
st1 = sparse_tensor.SparseTensor([[0, 5]], ['x'], [10, 10])
st2 = control_flow_ops._Enter(st1, "foo_1")
st3 = control_flow_ops.exit(st2)
self.assertAllEqual(st1.indices, st3.indices)
self.assertAllEqual(st1.values, st3.values)
self.assertAllEqual(st1.dense_shape, st3.dense_shape)
def _buildWhileWithShapeInvariants(self, shape_invariants):
r = constant_op.constant([1, 2])
def cond(_):
return False
def body(_):
return constant_op.constant([1])
return control_flow_ops.while_loop(
cond, body, [r], shape_invariants=shape_invariants)
def testWhileOutputShapeWithShapeInvariantsUnknownRank(self):
@def_function.function
def runTest():
while_output = self._buildWhileWithShapeInvariants(
[tensor_shape.TensorShape(None)])
self.assertIsNone(while_output.shape.rank)
runTest()
def testWhileOutputShapeWithShapeInvariantsPartialShape(self):
@def_function.function
def runTest():
while_output = self._buildWhileWithShapeInvariants(
[tensor_shape.TensorShape([None])])
self.assertAllEqual(while_output.shape.as_list(), [None])
runTest()
def testFunctionInWhile(self):
@def_function.function
def body(x):
return x + 1
r = control_flow_ops.while_loop(lambda x: x < 5, body, [0])
self.assertAllEqual(r, 5.)
class ControlFlowContextCheckTest(test.TestCase):
def _getWhileTensor(self):
"""Creates and returns a tensor from a while context."""
tensor = []
def body(i):
if not tensor:
tensor.append(constant_op.constant(1))
return i + tensor[0]
control_flow_ops.while_loop(lambda i: i < 10, body, [0])
return tensor[0]
def _getCondTensor(self):
cond_tensor = []
def true_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
return cond_tensor[0]
@test_util.run_v1_only("b/120545219")
def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegex(
ValueError,
"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
"is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor)
@test_util.run_v1_only("b/120545219")
def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegex(
ValueError, "Cannot use 'while/Const_1' as input to 'cond/Add' because "
"'while/Const_1' is in a while loop. See info log for more details."):
# TODO(skyewm): this passes if we return while_tensor directly instead
# of using it as input to another op.
control_flow_ops.cond(
math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0))
@test_util.run_v1_only("b/120545219")
def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegex(
ValueError,
"Cannot use 'while/Const_1' as input to 'while_1/Add' because they are "
"in different while loops. See info log for more details."):
control_flow_ops.while_loop(lambda i: i < 10,
lambda x: math_ops.add(1, while_tensor), [0])
with self.assertRaisesRegex(
ValueError,
"Cannot use 'while/Const_1' as input to 'while_2/NextIteration' "
"because they are in different while loops. See info log for more "
"details."):
control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])
def testValidCondContext(self):
# Accessing a tensor from a cond context is OK (although dangerous).
cond_tensor = self._getCondTensor()
math_ops.add(1, cond_tensor)
def testValidCondContextBranches(self):
# Accessing a tensor from a cond context from the other branch's cond
# context is OK (although dangerous).
cond_tensor = []
def branch_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
@test_util.run_v1_only("b/120545219")
def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK.
def body(_):
c = constant_op.constant(1)
return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK.
def body(_):
cond_tensor = self._getCondTensor()
# Create another cond containing the while loop for good measure
return control_flow_ops.cond(
math_ops.less(1, 2),
lambda: control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + cond_tensor, [0]),
lambda: constant_op.constant(0))
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal.
def true_fn():
while_tensor = self._getWhileTensor()
return control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + while_tensor, [0])
with self.assertRaisesRegex(
ValueError,
"Cannot use 'cond/while/Const_1' as input to 'cond/while_1/add' because"
" they are in different while loops. See info log for more details."):
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
class TupleTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testTensors(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.VariableV1([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v2)
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], self.evaluate(t1))
self.assertAllClose([10.0], self.evaluate(v2))
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], self.evaluate(t2))
self.assertAllClose([1.0], self.evaluate(v1))
@test_util.run_v1_only("b/120545219")
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.VariableV1(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v2)
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], self.evaluate(g1))
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
self.evaluate(v2))
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], self.evaluate(g2))
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v1))
def testAcceptTensorsAsControlInputs(self):
with self.cached_session():
var = variables.VariableV1(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
self.evaluate(t)
self.assertEqual(1, self.evaluate(var))
class AssertTest(test.TestCase):
@test_util.run_deprecated_v1
def testGuardedAssertDoesNotCopyWhenTrue(self):
if test_util.is_gpu_available():
self.skipTest("b/128646478 fails in opensource")
with self.session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names),
str(unguarded_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
class WhileOpBenchmark(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
self.evaluate(variables.global_variables_initializer())
if static_unroll:
for _ in xrange(steps):
i, x = loop_body(i, x)
else:
i, x = control_flow_ops.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in xrange(3):
# exclude warm up time
self.evaluate(r)
start_time = time.time()
for _ in xrange(num_iters):
self.evaluate(r)
return (time.time() - start_time) / num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
@test_util.with_control_flow_v2
class EagerTest(test.TestCase):
def testCond(self):
with context.eager_mode():
pred = math_ops.less(1, 2)
fn1 = lambda: [constant_op.constant(10)]
fn2 = lambda: [constant_op.constant(20)]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual(r.numpy(), 10)
self.assertFalse(isinstance(r, list))
# TODO(b/117279927): Re-enable once msan failure is fixed.
def DISABLED_testCondInDefun(self):
with context.eager_mode():
@eager_function.defun
def foo(pred):
# TODO(b/111124878): this only needs to output one element.
fn1 = lambda: (constant_op.constant(10), constant_op.constant(100))
fn2 = lambda: (constant_op.constant(20), constant_op.constant(200))
return control_flow_ops.cond(constant_op.constant(pred), fn1, fn2)
r = foo(True)
self.assertAllEqual(r[0].numpy(), 10)
self.assertNotIsInstance(r, list)
r = foo(False)
self.assertAllEqual(r[0].numpy(), 20)
self.assertFalse(isinstance(r, list))
def testWhileLoop(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor).numpy(), [46, 47, 48, 49, 50])
def testWhileLoopWithMaxIterations(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(
isum(tensor, maximum_iterations=3).numpy(),
[1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode():
tensor = constant_op.constant(0)
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [tensor], maximum_iterations=1)
self.assertEqual(1, r.numpy())
def testWithDependencies(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
t3 = control_flow_ops.with_dependencies(t1, t2)
self.assertAllEqual(t2.numpy(), t3.numpy())
def testTuple(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
tup1, tup2 = control_flow_ops.tuple([t1, t2])
self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy())
@test_util.run_v1_only("b/120545219")
def testCase(self):
with context.eager_mode():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
[(x < y, f1), (x > z, f2)], default=f3, exclusive=True)
self.assertAllEqual(r1.numpy(), 17)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Logic for parsing a function signatures.
Much of this logic is duplicated at
tools/binary_size/libsupersize/caspian/function_signature.cc."""
def _FindParameterListParen(name):
"""Finds index of the "(" that denotes the start of a parameter list."""
# This loops from left-to-right, but the only reason (I think) that this
# is necessary (rather than reusing _FindLastCharOutsideOfBrackets), is
# to capture the outer-most function in the case where classes are nested.
start_idx = 0
template_balance_count = 0
paren_balance_count = 0
while True:
idx = name.find('(', start_idx)
if idx == -1:
return -1
template_balance_count += (
name.count('<', start_idx, idx) - name.count('>', start_idx, idx))
# Special: operators with angle brackets.
operator_idx = name.find('operator<', start_idx, idx)
if operator_idx != -1:
if name[operator_idx + 9] == '<':
template_balance_count -= 2
else:
template_balance_count -= 1
else:
operator_idx = name.find('operator>', start_idx, idx)
if operator_idx != -1:
if name[operator_idx + 9] == '>':
template_balance_count += 2
else:
template_balance_count += 1
paren_balance_count += (
name.count('(', start_idx, idx) - name.count(')', start_idx, idx))
if template_balance_count == 0 and paren_balance_count == 0:
# Special case: skip "(anonymous namespace)".
if -1 != name.find('(anonymous namespace)', idx, idx + 21):
start_idx = idx + 21
continue
# Special case: skip "decltype (...)"
# Special case: skip "{lambda(PaintOp*)#63}"
if name[idx - 1] != ' ' and name[idx - 7:idx] != '{lambda':
return idx
start_idx = idx + 1
paren_balance_count += 1
def _FindLastCharOutsideOfBrackets(name, target_char, prev_idx=None):
"""Returns the last index of |target_char| that is not within ()s nor <>s."""
paren_balance_count = 0
template_balance_count = 0
while True:
idx = name.rfind(target_char, 0, prev_idx)
if idx == -1:
return -1
# It is much faster to use.find() and.count() than to loop over each
# character.
template_balance_count += (
name.count('<', idx, prev_idx) - name.count('>', idx, prev_idx))
paren_balance_count += (
name.count('(', idx, prev_idx) - name.count(')', idx, prev_idx))
if template_balance_count == 0 and paren_balance_count == 0:
return idx
prev_idx = idx
def _FindReturnValueSpace(name, paren_idx):
"""Returns the index of the space that comes after the return type."""
space_idx = paren_idx
# Special case: const cast operators (see tests).
if -1 != name.find(' const', paren_idx - 6, paren_idx):
space_idx = paren_idx - 6
while True:
space_idx = _FindLastCharOutsideOfBrackets(name, ' ', space_idx)
# Special cases: "operator new", "operator< <templ>", "operator<< <tmpl>".
# No space is added for operator>><tmpl>.
if -1 == space_idx:
break
if -1 != name.find('operator', space_idx - 8, space_idx):
space_idx -= 8
elif -1 != name.find('operator<', space_idx - 9, space_idx):
space_idx -= 9
elif -1 != name.find('operator<<', space_idx - 10, space_idx):
space_idx -= 10
else:
break
return space_idx
def _StripTemplateArgs(name):
last_right_idx = None
while True:
last_right_idx = name.rfind('>', 0, last_right_idx)
if last_right_idx == -1:
return name
left_idx = _FindLastCharOutsideOfBrackets(name, '<', last_right_idx + 1)
if left_idx != -1:
# Leave in empty <>s to denote that it's a template.
name = name[:left_idx + 1] + name[last_right_idx:]
last_right_idx = left_idx
def _NormalizeTopLevelGccLambda(name, left_paren_idx):
# cc::{lambda(PaintOp*)#63}::_FUN() -> cc::$lambda#63()
left_brace_idx = name.index('{')
hash_idx = name.index('#', left_brace_idx + 1)
right_brace_idx = name.index('}', hash_idx + 1)
number = name[hash_idx + 1:right_brace_idx]
return '{}$lambda#{}{}'.format(
name[:left_brace_idx], number, name[left_paren_idx:])
def _NormalizeTopLevelClangLambda(name, left_paren_idx):
# cc::$_21::__invoke() -> cc::$lambda#21()
dollar_idx = name.index('$')
colon_idx = name.index(':', dollar_idx + 1)
number = name[dollar_idx + 2:colon_idx]
return '{}$lambda#{}{}'.format(
name[:dollar_idx], number, name[left_paren_idx:])
def ParseJava(full_name):
"""Breaks java full_name into parts.
See unit tests for example signatures.
Returns:
A tuple of (full_name, template_name, name), where:
* full_name = "class_with_package#member(args): type"
* template_name = "class_with_package#member"
* name = "class_without_package#member
"""
hash_idx = full_name.find('#')
if hash_idx != -1:
# Parse an already parsed full_name.
# Format: Class#symbol: type
full_class_name = full_name[:hash_idx]
colon_idx = full_name.find(':')
if colon_idx == -1:
member = full_name[hash_idx + 1:]
member_type = ''
else:
member = full_name[hash_idx + 1:colon_idx]
member_type = full_name[colon_idx:]
else:
parts = full_name.split(' ')
full_class_name = parts[0]
member = parts[-1] if len(parts) > 1 else None
member_type = '' if len(parts) < 3 else ': ' + parts[1]
short_class_name = full_class_name.split('.')[-1]
if member is None:
return full_name, full_name, short_class_name
full_name = '{}#{}{}'.format(full_class_name, member, member_type)
paren_idx = member.find('(')
if paren_idx != -1:
member = member[:paren_idx]
name = '{}#{}'.format(short_class_name, member)
template_name = '{}#{}'.format(full_class_name, member)
return full_name, template_name, name
def Parse(name):
"""Strips return type and breaks function signature into parts.
See unit tests for example signatures.
Returns:
A tuple of:
* name without return type (symbol.full_name),
* full_name without params (symbol.template_name),
* full_name without params and template args (symbol.name)
"""
left_paren_idx = _FindParameterListParen(name)
full_name = name
if left_paren_idx > 0:
right_paren_idx = name.rindex(')')
assert right_paren_idx > left_paren_idx
space_idx = _FindReturnValueSpace(name, left_paren_idx)
name_no_params = name[space_idx + 1:left_paren_idx]
# Special case for top-level lambdas.
if name_no_params.endswith('}::_FUN'):
# Don't use |name_no_params| in here since prior _idx will be off if
# there was a return value.
name = _NormalizeTopLevelGccLambda(name, left_paren_idx)
return Parse(name)
elif name_no_params.endswith('::__invoke') and '$' in name_no_params:
assert '$_' in name_no_params, 'Surprising lambda: ' + name
name = _NormalizeTopLevelClangLambda(name, left_paren_idx)
return Parse(name)
full_name = name[space_idx + 1:]
name = name_no_params + name[right_paren_idx + 1:]
template_name = name
name = _StripTemplateArgs(name)
return full_name, template_name, name
# An odd place for this, but pylint doesn't want it as a static in models
# (circular dependency), nor as an method on BaseSymbol
# (attribute-defined-outside-init).
def InternSameNames(symbol):
"""Allow using "is" to compare names (and should help with RAM)."""
if symbol.template_name == symbol.full_name:
symbol.template_name = symbol.full_name
if symbol.name == symbol.template_name:
symbol.name = symbol.template_name | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.connect.converters;
import org.apache.kafka.common.serialization.LongDeserializer;
import org.apache.kafka.common.serialization.LongSerializer;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.storage.Converter;
import org.apache.kafka.connect.storage.HeaderConverter;
/**
* {@link Converter} and {@link HeaderConverter} implementation that only supports serializing to and deserializing from long values.
* It does support handling nulls. When converting from bytes to Kafka Connect format, the converter will always return an
* optional INT64 schema.
* <p>
* This implementation currently does nothing with the topic names or header keys.
*/
public class LongConverter extends NumberConverter<Long> {
public LongConverter() {
super("long", Schema.OPTIONAL_INT64_SCHEMA, new LongSerializer(), new LongDeserializer());
}
} | java | github | https://github.com/apache/kafka | connect/runtime/src/main/java/org/apache/kafka/connect/converters/LongConverter.java |
from axelrod import Player, Actions
C, D = Actions.C, Actions.D
class Punisher(Player):
"""
A player starts by cooperating however will defect if at any point the
opponent has defected, but forgets after meme_length matches, with
1<=mem_length<=20 proportional to the amount of time the opponent has
played D, punishing that player for playing D too often.
"""
name = 'Punisher'
classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic' : False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def __init__(self):
"""
Initialised the player
"""
super(Punisher, self).__init__()
self.mem_length = 1
self.grudged = False
self.grudge_memory = 1
def strategy(self, opponent):
"""
Begins by playing C, then plays D for an amount of rounds proportional
to the opponents historical '%' of playing D if the opponent ever
plays D
"""
if self.grudge_memory >= self.mem_length:
self.grudge_memory = 0
self.grudged = False
if self.grudged:
self.grudge_memory += 1
return D
elif D in opponent.history[-1:]:
self.mem_length = (opponent.defections * 20) // len(opponent.history)
self.grudged = True
return D
return C
def reset(self):
"""
Resets scores and history
"""
Player.reset(self)
self.grudged = False
self.grudge_memory = 0
self.mem_length = 1
class InversePunisher(Player):
"""
A player starts by cooperating however will defect if at any point the
opponent has defected, but forgets after mem_length matches, with
1 <= mem_length <= 20 proportional to the amount of time the opponent
has played C. The inverse of Punisher.
"""
name = 'Inverse Punisher'
classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def __init__(self):
super(InversePunisher, self).__init__()
self.history = []
self.mem_length = 1
self.grudged = False
self.grudge_memory = 1
def strategy(self, opponent):
"""
Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing C if the opponent ever plays D
"""
if self.grudge_memory >= self.mem_length:
self.grudge_memory = 0
self.grudged = False
if self.grudged:
self.grudge_memory += 1
return D
elif D in opponent.history[-1:]:
self.mem_length = (opponent.cooperations * 20) // len(opponent.history)
if self.mem_length == 0:
self.mem_length += 1
self.grudged = True
return D
return C
def reset(self):
"""Resets internal variables and history"""
Player.reset(self)
self.grudged = False
self.grudge_memory = 0
self.mem_length = 1 | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import io
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
fin = io.open(vocab_file, encoding="utf8")
for num, line in enumerate(fin):
items = convert_to_unicode(line.strip()).split("\t")
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class CharTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in text.lower().split(" "):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: Josef Perktold
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_approx_equal, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
#import statsmodels.sandbox.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
def compare_ftest(contrast_res, other, decimal=(5,4)):
assert_almost_equal(contrast_res.fvalue, other[0], decimal=decimal[0])
assert_almost_equal(contrast_res.pvalue, other[1], decimal=decimal[1])
assert_equal(contrast_res.df_num, other[2])
assert_equal(contrast_res.df_denom, other[3])
assert_equal("f", other[4])
class TestGLSARGretl(object):
def test_all(self):
d = macrodata.load().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
#simple diff, not growthrate, I want heteroscedasticity later for testing
endogd = np.diff(d['realinv'])
exogd = add_constant(np.c_[np.diff(d['realgdp']), d['realint'][:-1]])
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1]])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
vif = [1.002, 1.002]
cond_1norm = 6862.0664
determinant = 1.0296049e+009
reciprocal_condition_number = 0.013819244
#Chi-square(2): test-statistic, pvalue, df
normality = [20.2792, 3.94837e-005, 2]
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, maxlag=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, maxlag=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=1)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=2)
'''
Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858
'''
'''
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023:
'''
'''
Test for ARCH of order 4
coefficient std. error t-ratio p-value
--------------------------------------------------------
alpha(0) 97.0386 20.3234 4.775 3.56e-06 ***
alpha(1) 0.176114 0.0714698 2.464 0.0146 **
alpha(2) -0.0488339 0.0724981 -0.6736 0.5014
alpha(3) -0.0705413 0.0737058 -0.9571 0.3397
alpha(4) 0.0384531 0.0725763 0.5298 0.5968
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491:
'''
'''
Variance Inflation Factors
Minimum possible value = 1.0
Values > 10.0 may indicate a collinearity problem
ds_l_realgdp 1.002
realint_1 1.002
VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient
between variable j and the other independent variables
Properties of matrix X'X:
1-norm = 6862.0664
Determinant = 1.0296049e+009
Reciprocal condition number = 0.013819244
'''
'''
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491
Test of common factor restriction -
Null hypothesis: restriction is acceptable
Test statistic: F(2, 195) = 0.426391
with p-value = P(F(2, 195) > 0.426391) = 0.653468
Test for normality of residual -
Null hypothesis: error is normally distributed
Test statistic: Chi-square(2) = 20.2792
with p-value = 3.94837e-005:
'''
#no idea what this is
'''
Augmented regression for common factor test
OLS, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
coefficient std. error t-ratio p-value
---------------------------------------------------------------
const -10.9481 1.35807 -8.062 7.44e-014 ***
ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 ***
realint_1 -0.662644 0.334872 -1.979 0.0492 **
ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294
ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 *
realint_2 0.0769695 0.341527 0.2254 0.8219
Sum of squared residuals = 22432.8
Test of common factor restriction
Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468
'''
################ with OLS, HAC errors
#Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
#Dependent variable: ds_l_realinv
#HAC standard errors, bandwidth 4 (Bartlett kernel)
#coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
#for confidence interval t(199, 0.025) = 1.972
partable = np.array([
[-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # ***
[4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #***
[-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939]]) # **
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.257395),
endog_std = ("S.D. dependent var", 18.73915),
ssr = ("Sum squared resid", 22799.68),
mse_resid_sqrt = ("S.E. of regression", 10.70380),
rsquared = ("R-squared", 0.676978),
rsquared_adj = ("Adjusted R-squared", 0.673731),
fvalue = ("F(2, 199)", 90.79971),
f_pvalue = ("P-value(F)", 9.53e-29),
llf = ("Log-likelihood", -763.9752),
aic = ("Akaike criterion", 1533.950),
bic = ("Schwarz criterion", 1543.875),
hqic = ("Hannan-Quinn", 1537.966),
resid_acf1 = ("rho", -0.107341),
dw = ("Durbin-Watson", 2.213805))
linear_logs = [1.68351, 0.430953, 2, "chi2"]
#for logs: dropping 70 nan or incomplete observations, T=133
#(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70
linear_squares = [7.52477, 0.0232283, 2, "chi2"]
#Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4
lm_acorr4 = [1.17928, 0.321197, 4, 195, "F"]
lm2_acorr4 = [4.771043, 0.312, 4, "chi2"]
acorr_ljungbox4 = [5.23587, 0.264, 4, "chi2"]
#break
cusum_Harvey_Collier = [0.494432, 0.621549, 198, "t"] #stats.t.sf(0.494432, 198)*2
#see cusum results in files
break_qlr = [3.01985, 0.1, 3, 196, "maxF"] #TODO check this, max at 2001:4
break_chow = [13.1897, 0.00424384, 3, "chi2"] # break at 1984:1
arch_4 = [3.43473, 0.487871, 4, "chi2"]
normality = [23.962, 0.00001, 2, "chi2"]
het_white = [33.503723, 0.000003, 5, "chi2"]
het_breusch_pagan = [1.302014, 0.521520, 2, "chi2"] #TODO: not available
het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"]
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"] #not available
cond_1norm = 5984.0525
determinant = 7.1087467e+008
reciprocal_condition_number = 0.013826504
vif = [1.001, 1.001]
names = 'date residual leverage influence DFFITS'.split()
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt')
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1,
converters={0:lambda s: s})
#either numpy 1.6 or python 3.2 changed behavior
if np.isnan(lev[-1]['f1']):
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2,
converters={0:lambda s: s})
lev.dtype.names = names
res = res_ols #for easier copying
cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov_hac)
assert_almost_equal(res.params, partable[:,0], 5)
assert_almost_equal(bse_hac, partable[:,1], 5)
#TODO
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl
assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL
assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
#f-value is based on cov_hac I guess
#res2 = res.get_robustcov_results(cov_type='HC1')
# TODO: fvalue differs from Gretl, trying any of the HCx
#assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL
#assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(6,5))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(6,5))
linear_sq = smsdia.linear_lm(res.resid, res.model.exog)
assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6)
assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7)
hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog)
assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6)
assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6)
hw = smsdia.het_white(res.resid, res.model.exog)
assert_almost_equal(hw[:2], het_white[:2], 6)
#arch
#sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.resid, maxlag=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=5)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
vif2 = [oi.variance_inflation_factor(res.model.exog, k) for k in [1,2]]
infl = oi.OLSInfluence(res_ols)
#print np.max(np.abs(lev['DFFITS'] - infl.dffits[0]))
#print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag))
#print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl
#just rough test, low decimal in Gretl output,
assert_almost_equal(lev['residual'], res.resid, decimal=3)
assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3)
assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3)
assert_almost_equal(lev['influence'], infl.influence, decimal=4)
def test_GLSARlag():
#test that results for lag>1 is close to lag=1, and smaller ssr
from statsmodels.datasets import macrodata
d2 = macrodata.load().data
g_gdp = 400*np.diff(np.log(d2['realgdp']))
g_inv = 400*np.diff(np.log(d2['realinv']))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1]], prepend=False)
mod1 = GLSAR(g_inv, exogg, 1)
res1 = mod1.iterative_fit(5)
mod4 = GLSAR(g_inv, exogg, 4)
res4 = mod4.iterative_fit(10)
assert_array_less(np.abs(res1.params / res4.params - 1), 0.03)
assert_array_less(res4.ssr, res1.ssr)
assert_array_less(np.abs(res4.bse / res1.bse) - 1, 0.015)
assert_array_less(np.abs((res4.fittedvalues / res1.fittedvalues - 1).mean()),
0.015)
assert_equal(len(mod4.rho), 4)
if __name__ == '__main__':
t = TestGLSARGretl()
t.test_all()
'''
Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: ds_l_realinv
HAC standard errors, bandwidth 4 (Bartlett kernel)
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.48167 1.17709 -8.055 7.17e-014 ***
ds_l_realgdp 4.37422 0.328787 13.30 2.62e-029 ***
realint_1 -0.613997 0.293619 -2.091 0.0378 **
Mean dependent var 3.257395 S.D. dependent var 18.73915
Sum squared resid 22799.68 S.E. of regression 10.70380
R-squared 0.676978 Adjusted R-squared 0.673731
F(2, 199) 90.79971 P-value(F) 9.53e-29
Log-likelihood -763.9752 Akaike criterion 1533.950
Schwarz criterion 1543.875 Hannan-Quinn 1537.966
rho -0.107341 Durbin-Watson 2.213805
QLR test for structural break -
Null hypothesis: no structural break
Test statistic: max F(3, 196) = 3.01985 at observation 2001:4
(10 percent critical value = 4.09)
Non-linearity test (logs) -
Null hypothesis: relationship is linear
Test statistic: LM = 1.68351
with p-value = P(Chi-square(2) > 1.68351) = 0.430953
Non-linearity test (squares) -
Null hypothesis: relationship is linear
Test statistic: LM = 7.52477
with p-value = P(Chi-square(2) > 7.52477) = 0.0232283
LM test for autocorrelation up to order 4 -
Null hypothesis: no autocorrelation
Test statistic: LMF = 1.17928
with p-value = P(F(4,195) > 1.17928) = 0.321197
CUSUM test for parameter stability -
Null hypothesis: no change in parameters
Test statistic: Harvey-Collier t(198) = 0.494432
with p-value = P(t(198) > 0.494432) = 0.621549
Chow test for structural break at observation 1984:1 -
Null hypothesis: no structural break
Asymptotic test statistic: Chi-square(3) = 13.1897
with p-value = 0.00424384
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 3.43473
with p-value = P(Chi-square(4) > 3.43473) = 0.487871:
#ANOVA
Analysis of Variance:
Sum of squares df Mean square
Regression 47782.7 2 23891.3
Residual 22799.7 199 114.571
Total 70582.3 201 351.156
R^2 = 47782.7 / 70582.3 = 0.676978
F(2, 199) = 23891.3 / 114.571 = 208.528 [p-value 1.47e-049]
#LM-test autocorrelation
Breusch-Godfrey test for autocorrelation up to order 4
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 0.0640964 1.06719 0.06006 0.9522
ds_l_realgdp -0.0456010 0.217377 -0.2098 0.8341
realint_1 0.0511769 0.293136 0.1746 0.8616
uhat_1 -0.104707 0.0719948 -1.454 0.1475
uhat_2 -0.00898483 0.0742817 -0.1210 0.9039
uhat_3 0.0837332 0.0735015 1.139 0.2560
uhat_4 -0.0636242 0.0737363 -0.8629 0.3893
Unadjusted R-squared = 0.023619
Test statistic: LMF = 1.179281,
with p-value = P(F(4,195) > 1.17928) = 0.321
Alternative statistic: TR^2 = 4.771043,
with p-value = P(Chi-square(4) > 4.77104) = 0.312
Ljung-Box Q' = 5.23587,
with p-value = P(Chi-square(4) > 5.23587) = 0.264:
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023
#heteroscedasticity White
White's test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 104.920 21.5848 4.861 2.39e-06 ***
ds_l_realgdp -29.7040 6.24983 -4.753 3.88e-06 ***
realint_1 -6.93102 6.95607 -0.9964 0.3203
sq_ds_l_realg 4.12054 0.684920 6.016 8.62e-09 ***
X2_X3 2.89685 1.38571 2.091 0.0379 **
sq_realint_1 0.662135 1.10919 0.5970 0.5512
Unadjusted R-squared = 0.165860
Test statistic: TR^2 = 33.503723,
with p-value = P(Chi-square(5) > 33.503723) = 0.000003:
#heteroscedasticity Breusch-Pagan (original)
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 1.09468 0.192281 5.693 4.43e-08 ***
ds_l_realgdp -0.0323119 0.0386353 -0.8363 0.4040
realint_1 0.00410778 0.0512274 0.08019 0.9362
Explained sum of squares = 2.60403
Test statistic: LM = 1.302014,
with p-value = P(Chi-square(2) > 1.302014) = 0.521520
#heteroscedasticity Breusch-Pagan Koenker
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2 (Koenker robust variant)
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 10.6870 21.7027 0.4924 0.6230
ds_l_realgdp -3.64704 4.36075 -0.8363 0.4040
realint_1 0.463643 5.78202 0.08019 0.9362
Explained sum of squares = 33174.2
Test statistic: LM = 0.709924,
with p-value = P(Chi-square(2) > 0.709924) = 0.701200
########## forecast
#forecast mean y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 2.946312 -22.987904 - -11.367905
2008:4 -27.665860 -36.294434 3.036851 -42.282972 - -30.305896
2009:1 -70.239280 -44.018178 4.007017 -51.919841 - -36.116516
2009:2 -27.024588 -12.284842 1.427414 -15.099640 - -9.470044
2009:3 8.078897 4.483669 1.315876 1.888819 - 7.078520
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
#forecast actual y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 11.101892 -39.070353 - 4.714544
2008:4 -27.665860 -36.294434 11.126262 -58.234939 - -14.353928
2009:1 -70.239280 -44.018178 11.429236 -66.556135 - -21.480222
2009:2 -27.024588 -12.284842 10.798554 -33.579120 - 9.009436
2009:3 8.078897 4.483669 10.784377 -16.782652 - 25.749991
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
''' | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
/**
* The result of the {@link Admin#describeLogDirs(Collection)} call.
*/
public class DescribeLogDirsResult {
private final Map<Integer, KafkaFuture<Map<String, LogDirDescription>>> futures;
DescribeLogDirsResult(Map<Integer, KafkaFuture<Map<String, LogDirDescription>>> futures) {
this.futures = futures;
}
/**
* Return a map from brokerId to future which can be used to check the information of partitions on each individual broker.
* The result of the future is a map from broker log directory path to a description of that log directory.
*/
public Map<Integer, KafkaFuture<Map<String, LogDirDescription>>> descriptions() {
return futures;
}
/**
* Return a future which succeeds only if all the brokers have responded without error.
* The result of the future is a map from brokerId to a map from broker log directory path
* to a description of that log directory.
*/
public KafkaFuture<Map<Integer, Map<String, LogDirDescription>>> allDescriptions() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture<?>[0])).
thenApply(v -> {
Map<Integer, Map<String, LogDirDescription>> descriptions = new HashMap<>(futures.size());
for (Map.Entry<Integer, KafkaFuture<Map<String, LogDirDescription>>> entry : futures.entrySet()) {
try {
descriptions.put(entry.getKey(), entry.getValue().get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, because allOf ensured that all the futures completed successfully.
throw new RuntimeException(e);
}
}
return descriptions;
});
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Training Vision Models using Backbone API
Computer vision workflows follow a common pattern. Use a pre-trained backbone for feature extraction ([ViT](../model_doc/vit), [DINOv3](../model_doc/dinov3)). Add a "neck" for feature enhancement. Attach a task-specific head ([DETR](../model_doc/detr) for object detection, [MaskFormer](../model_doc/maskformer) for segmentation).
The Transformers library implements these models and the [backbone API](../backbones) lets you swap different backbones and heads with minimal code.

This guide combines [DINOv3 with ConvNext architecture](https://huggingface.co/facebook/dinov3-convnext-large-pretrain-lvd1689m) and a [DETR head](https://huggingface.co/facebook/detr-resnet-50). You'll train on the [license plate detection dataset](https://huggingface.co/datasets/merve/license-plates). DINOv3 delivers the best performance as of this writing.
> [!NOTE]
> This model requires access approval. Visit [the model repository](https://huggingface.co/facebook/dinov3-convnext-large-pretrain-lvd1689m) to request access.
Install [trackio](https://github.com/gradio-app/trackio) for experiment tracking and [albumentations](https://albumentations.ai/) for data augmentation. Use the latest transformers version.
```bash
pip install -Uq albumentations trackio transformers datasets
```
Initialize [`DetrConfig`] with the pre-trained DINOv3 ConvNext backbone. Use `num_labels=1` to detect the license plate bounding boxes. Create [`DetrForObjectDetection`] with this configuration. Freeze the backbone to preserve DINOv3 features without updating weights. Load the [`DetrImageProcessor`].
```py
from transformers import DetrConfig, DetrForObjectDetection, AutoImageProcessor
# Create a model with randomly initialized weights
backbone_config = AutoConfig.from_pretrained("facebook/dinov3-convnext-large-pretrain-lvd1689m")
backbone = AutoBackbone.from_pretrained("facebook/dinov3-convnext-large-pretrain-lvd1689m")
config = DetrConfig(backbone_config=backbone_config,
num_labels=1, id2label={0: "license_plate"}, label2id={"license_plate": 0})
model = DetrForObjectDetection(config)
# Assign pretrained backbone checkpoint and freeze the weights
model.model.backbone = backbone
model.model.freeze_backbone()
image_processor = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
```
Load the dataset and split it for training.
```py
from datasets import load_dataset
ds = load_dataset("merve/license-plates")
ds = ds["train"]
ds = ds.train_test_split(test_size=0.05)
train_dataset = ds["train"]
val_dataset = ds["test"]
len(train_dataset)
# 5867
```
Augment the dataset. Rescale images to a maximum size, flip them, and apply affine transforms. Eliminate invalid bounding boxes and ensure annotations stay clean with `rebuild_objects`.
```py
import albumentations as A
import numpy as np
from PIL import Image
train_aug = A.Compose(
[
A.LongestMaxSize(max_size=1024, p=1.0),
A.HorizontalFlip(p=0.5),
A.Affine(rotate=(-5, 5), shear=(-5, 5), translate_percent=(0.05, 0.05), p=0.5),
],
bbox_params=A.BboxParams(format="coco", label_fields=["category_id"], min_visibility=0.0),
)
def train_transform(batch):
imgs_out, objs_out = [], []
original_imgs, original_objs = batch["image"], batch["objects"]
for i, (img_pil, objs) in enumerate(zip(original_imgs, original_objs)):
img = np.array(img_pil)
labels = [0] * len(objs["bbox"])
out = train_aug(image=img, bboxes=list(objs["bbox"]), category_id=labels)
if len(out["bboxes"]) == 0:
imgs_out.append(img_pil) # if no boxes left after augmentation, use original
objs_out.append(objs)
continue
H, W = out["image"].shape[:2]
clamped = []
for (x, y, w, h) in out["bboxes"]:
x = max(0.0, min(x, W - 1.0))
y = max(0.0, min(y, H - 1.0))
w = max(1.0, min(w, W - x))
h = max(1.0, min(h, H - y))
clamped.append([x, y, w, h])
imgs_out.append(Image.fromarray(out["image"]))
objs_out.append(rebuild_objects(clamped, out["category_id"]))
batch["image"] = imgs_out
batch["objects"] = objs_out
return batch
def rebuild_objects(bboxes, labels):
bboxes = [list(map(float, b)) for b in bboxes]
areas = [float(w*h) for (_, _, w, h) in bboxes]
ids = list(range(len(bboxes)))
return {
"id": ids,
"bbox": bboxes,
"category_id": list(map(int, labels)),
"area": areas,
"iscrowd": [0]*len(bboxes),
}
train_dataset = train_dataset.with_transform(train_transform)
```
Build COCO-style annotations for the image processor.
```py
import torch
def format_annotations(image, objects, image_id):
n = len(objects["id"])
anns = []
iscrowd_list = objects.get("iscrowd", [0] * n)
area_list = objects.get("area", None)
for i in range(n):
x, y, w, h = objects["bbox"][i]
area = area_list[i] if area_list is not None else float(w * h)
anns.append({
"id": int(objects["id"][i]),
"iscrowd": int(iscrowd_list[i]),
"bbox": [float(x), float(y), float(w), float(h)],
"category_id": int(objects.get("category_id", objects.get("category"))[i]),
"area": float(area),
})
return {"image_id": int(image_id), "annotations": anns}
```
Create batches in the data collator. Format annotations and pass them with transformed images to the image processor.
```py
def collate_fn(examples):
images = [example["image"] for example in examples]
ann_batch = [format_annotations(example["image"], example["objects"], example["image_id"]) for example in examples]
inputs = image_processor(images=images, annotations=ann_batch, return_tensors="pt")
return inputs
```
Initialize the [`Trainer`] and set up [`TrainingArguments`] for model convergence. Pass datasets, data collator, arguments, and model to `Trainer` to start training.
```py
from transformers import Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir="./license-plate-detr-dinov3",
per_device_train_batch_size=4,
per_device_eval_batch_size=4,
num_train_epochs=8,
learning_rate=1e-5,
weight_decay=1e-4,
warmup_steps=500,
eval_strategy="steps",
eval_steps=500,
save_total_limit=2,
dataloader_pin_memory=False,
fp16=True,
report_to="trackio",
load_best_model_at_end=True,
remove_unused_columns=False,
push_to_hub=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
data_collator=collate_fn,
)
trainer.train()
```
Push the trainer and image processor to the Hub.
```py
trainer.push_to_hub()
image_processor.push_to_hub("merve/license-plate-detr-dinov3")
```
Test the model with an object detection pipeline.
```py
from transformers import pipeline
obj_detector = pipeline(
"object-detection", model="merve/license-plate-detr-dinov3"
)
results = obj_detector("https://huggingface.co/datasets/merve/vlm_test_images/resolve/main/license-plates.jpg", threshold=0.05)
print(results)
```
Visualize the results.
```py
from PIL import Image, ImageDraw
import numpy as np
import requests
def plot_results(image, results, threshold):
image = Image.fromarray(np.uint8(image))
draw = ImageDraw.Draw(image)
width, height = image.size
for result in results:
score = result["score"]
label = result["label"]
box = list(result["box"].values())
if score > threshold:
x1, y1, x2, y2 = tuple(box)
draw.rectangle((x1, y1, x2, y2), outline="red")
draw.text((x1 + 5, y1 + 10), f"{score:.2f}", fill="green" if score > 0.7 else "red")
return image
image = Image.open(requests.get("https://huggingface.co/datasets/merve/vlm_test_images/resolve/main/license-plates.jpg", stream=True).raw)
plot_results(image, results, threshold=0.05)
```
 | unknown | github | https://github.com/huggingface/transformers | docs/source/en/tasks/training_vision_backbone.md |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.dataaccess.jdbc.jdbccomplextypes;
import java.sql.Connection;
import java.sql.SQLException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import oracle.jdbc.driver.OracleConnection;
import org.springframework.jdbc.core.SqlTypeValue;
import org.springframework.jdbc.core.support.AbstractSqlTypeValue;
@SuppressWarnings("unused")
class SqlTypeValueFactory {
void createStructSample() throws ParseException {
// tag::struct[]
TestItem testItem = new TestItem(123L, "A test item",
new SimpleDateFormat("yyyy-M-d").parse("2010-12-31"));
SqlTypeValue value = new AbstractSqlTypeValue() {
protected Object createTypeValue(Connection connection, int sqlType, String typeName) throws SQLException {
Object[] item = new Object[] { testItem.getId(), testItem.getDescription(),
new java.sql.Date(testItem.getExpirationDate().getTime()) };
return connection.createStruct(typeName, item);
}
};
// end::struct[]
}
void createOracleArray() {
// tag::oracle-array[]
Long[] ids = new Long[] {1L, 2L};
SqlTypeValue value = new AbstractSqlTypeValue() {
protected Object createTypeValue(Connection conn, int sqlType, String typeName) throws SQLException {
return conn.unwrap(OracleConnection.class).createOracleArray(typeName, ids);
}
};
// end::oracle-array[]
}
} | java | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/java/org/springframework/docs/dataaccess/jdbc/jdbccomplextypes/SqlTypeValueFactory.java |
# coding: utf-8
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email, EqualTo
from ..models import User
class SigninForm(Form):
"""Form for signin"""
email = StringField('Email',
validators=[
DataRequired("Email shouldn't be empty."),
Email('Email format is not correct.')
])
password = PasswordField('Password',
validators=[DataRequired("Password shouldn't be empty.")])
def validate_email(self, field):
user = User.query.filter(User.email == self.email.data).first()
if not user:
raise ValueError("Account doesn't exist.")
def validate_password(self, field):
if self.email.data:
user = User.query.filter(User.email == self.email.data).first()
if not user or not user.check_password(self.password.data):
raise ValueError('Password is not correct.')
else:
self.user = user
class SignupForm(Form):
"""Form for signin"""
name = StringField('Username',
validators=[DataRequired("Username shouldn't be empty.")])
email = StringField('Email',
validators=[
DataRequired(message="Email shouldn't be empty."),
Email(message='Email format is not correct.')
])
password = PasswordField('Password',
validators=[DataRequired("Password shouldn't be empty.")])
repassword = PasswordField('Retype password',
validators=[
DataRequired("Please retype the password."),
EqualTo('password', message="Passwords must match.")
])
def validate_name(self, field):
user = User.query.filter(User.name == self.name.data).first()
if user:
raise ValueError('This username already exists.')
def validate_email(self, field):
user = User.query.filter(User.email == self.email.data).first()
if user:
raise ValueError('This email already exists.') | unknown | codeparrot/codeparrot-clean | ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Joel Denning @joeldenning
*/
"use strict";
const { ConcatSource } = require("webpack-sources");
const { UsageState } = require("../ExportsInfo");
const ExternalModule = require("../ExternalModule");
const Template = require("../Template");
const propertyAccess = require("../util/propertyAccess");
const AbstractLibraryPlugin = require("./AbstractLibraryPlugin");
/** @typedef {import("webpack-sources").Source} Source */
/** @typedef {import("../../declarations/WebpackOptions").LibraryOptions} LibraryOptions */
/** @typedef {import("../../declarations/WebpackOptions").LibraryType} LibraryType */
/** @typedef {import("../Chunk")} Chunk */
/** @typedef {import("../Compilation").ChunkHashContext} ChunkHashContext */
/** @typedef {import("../ExportsInfo").ExportInfoName} ExportInfoName */
/** @typedef {import("../javascript/JavascriptModulesPlugin").RenderContext} RenderContext */
/** @typedef {import("../util/Hash")} Hash */
/**
* @template T
* @typedef {import("./AbstractLibraryPlugin").LibraryContext<T>} LibraryContext<T>
*/
/**
* @typedef {object} SystemLibraryPluginOptions
* @property {LibraryType} type
*/
/**
* @typedef {object} SystemLibraryPluginParsed
* @property {string} name
*/
/**
* @typedef {SystemLibraryPluginParsed} T
* @extends {AbstractLibraryPlugin<SystemLibraryPluginParsed>}
*/
class SystemLibraryPlugin extends AbstractLibraryPlugin {
/**
* @param {SystemLibraryPluginOptions} options the plugin options
*/
constructor(options) {
super({
pluginName: "SystemLibraryPlugin",
type: options.type
});
}
/**
* @param {LibraryOptions} library normalized library option
* @returns {T} preprocess as needed by overriding
*/
parseOptions(library) {
const { name } = library;
if (name && typeof name !== "string") {
throw new Error(
`System.js library name must be a simple string or unset. ${AbstractLibraryPlugin.COMMON_LIBRARY_NAME_MESSAGE}`
);
}
const _name = /** @type {string} */ (name);
return {
name: _name
};
}
/**
* @param {Source} source source
* @param {RenderContext} renderContext render context
* @param {LibraryContext<T>} libraryContext context
* @returns {Source} source with library export
*/
render(source, { chunkGraph, moduleGraph, chunk }, { options, compilation }) {
const modules = chunkGraph
.getChunkModules(chunk)
.filter(
(m) => m instanceof ExternalModule && m.externalType === "system"
);
const externals = /** @type {ExternalModule[]} */ (modules);
// The name this bundle should be registered as with System
const name = options.name
? `${JSON.stringify(compilation.getPath(options.name, { chunk }))}, `
: "";
// The array of dependencies that are external to webpack and will be provided by System
const systemDependencies = JSON.stringify(
externals.map((m) =>
typeof m.request === "object" && !Array.isArray(m.request)
? m.request.amd
: m.request
)
);
// The name of the variable provided by System for exporting
const dynamicExport = "__WEBPACK_DYNAMIC_EXPORT__";
// An array of the internal variable names for the webpack externals
const externalWebpackNames = externals.map(
(m) =>
`__WEBPACK_EXTERNAL_MODULE_${Template.toIdentifier(
`${chunkGraph.getModuleId(m)}`
)}__`
);
// Declaring variables for the internal variable names for the webpack externals
const externalVarDeclarations = externalWebpackNames
.map((name) => `var ${name} = {};`)
.join("\n");
// Define __esModule flag on all internal variables and helpers
/** @type {string[]} */
const externalVarInitialization = [];
// The system.register format requires an array of setter functions for externals.
const setters =
externalWebpackNames.length === 0
? ""
: Template.asString([
"setters: [",
Template.indent(
externals
.map((module, i) => {
const external = externalWebpackNames[i];
const exportsInfo = moduleGraph.getExportsInfo(module);
const otherUnused =
exportsInfo.otherExportsInfo.getUsed(chunk.runtime) ===
UsageState.Unused;
/** @type {string[]} */
const instructions = [];
/** @type {ExportInfoName[]} */
const handledNames = [];
for (const exportInfo of exportsInfo.orderedExports) {
const used = exportInfo.getUsedName(
undefined,
chunk.runtime
);
if (used) {
if (otherUnused || used !== exportInfo.name) {
if (exportInfo.name === "default") {
// Ideally we should use `module && module.__esModule ? module['default'] : module`
// But we need to keep compatibility with SystemJS format libraries (they are using `default`) and bundled SystemJS libraries from commonjs format
instructions.push(
`${external}${propertyAccess([
used
])} = module["default"] || module;`
);
} else {
instructions.push(
`${external}${propertyAccess([
used
])} = module${propertyAccess([exportInfo.name])};`
);
}
handledNames.push(exportInfo.name);
}
} else {
handledNames.push(exportInfo.name);
}
}
if (!otherUnused) {
if (
!Array.isArray(module.request) ||
module.request.length === 1
) {
externalVarInitialization.push(
`Object.defineProperty(${external}, "__esModule", { value: true });`
);
}
// See comment above
instructions.push(
`${external}["default"] = module["default"] || module;`
);
if (handledNames.length > 0) {
const name = `${external}handledNames`;
externalVarInitialization.push(
`var ${name} = ${JSON.stringify(handledNames)};`
);
instructions.push(
Template.asString([
"Object.keys(module).forEach(function(key) {",
Template.indent([
`if(${name}.indexOf(key) >= 0)`,
Template.indent(`${external}[key] = module[key];`)
]),
"});"
])
);
} else {
instructions.push(
Template.asString([
"Object.keys(module).forEach(function(key) {",
Template.indent([`${external}[key] = module[key];`]),
"});"
])
);
}
}
if (instructions.length === 0) return "function() {}";
return Template.asString([
"function(module) {",
Template.indent(instructions),
"}"
]);
})
.join(",\n")
),
"],"
]);
return new ConcatSource(
Template.asString([
`System.register(${name}${systemDependencies}, function(${dynamicExport}, __system_context__) {`,
Template.indent([
externalVarDeclarations,
Template.asString(externalVarInitialization),
"return {",
Template.indent([
setters,
"execute: function() {",
Template.indent(`${dynamicExport}(`)
])
]),
""
]),
source,
Template.asString([
"",
Template.indent([
Template.indent([Template.indent([");"]), "}"]),
"};"
]),
"})"
])
);
}
/**
* @param {Chunk} chunk the chunk
* @param {Hash} hash hash
* @param {ChunkHashContext} chunkHashContext chunk hash context
* @param {LibraryContext<T>} libraryContext context
* @returns {void}
*/
chunkHash(chunk, hash, chunkHashContext, { options, compilation }) {
hash.update("SystemLibraryPlugin");
if (options.name) {
hash.update(compilation.getPath(options.name, { chunk }));
}
}
}
module.exports = SystemLibraryPlugin; | javascript | github | https://github.com/webpack/webpack | lib/library/SystemLibraryPlugin.js |
import torch
from torch._inductor import ir
from torch._inductor.runtime.benchmarking import benchmarker
def to_channels_last(x):
if x.dim() != 4:
raise AssertionError(f"Expected 4D tensor, but got {x.dim()}D")
# NCHW -> NHWC
stride_order = [3, 0, 2, 1]
y = x.clone().as_strided(
x.shape,
ir.FlexibleLayout.stride_ordered(x.shape, stride_order),
)
y.copy_(x)
if not torch.allclose(x, y):
raise AssertionError("Tensor copy failed: x and y are not close")
return y
def bench_conv(with_stack=True):
x = torch.rand(256, 3, 224, 224).cuda()
weight = torch.rand(64, 3, 7, 7).cuda()
x_chan = to_channels_last(x)
weight_chan = to_channels_last(weight)
kwargs = {
"stride": [2, 2],
"padding": [3, 3],
"dilation": [1, 1],
"transposed": False,
"output_padding": [0, 0],
"groups": 1,
}
def baseline_fn():
return torch.convolution(x, weight, bias=None, **kwargs)
def test_fn():
return torch.convolution(x_chan, weight_chan, bias=None, **kwargs)
# warmup
baseline_fn()
test_fn()
torch.cuda.synchronize()
with torch.profiler.profile(with_stack=with_stack) as p:
baseline_out = baseline_fn()
test_out = test_fn()
torch.cuda.synchronize()
p.export_chrome_trace("/tmp/chrome.json")
if not torch.allclose(baseline_out, test_out, atol=1e-3, rtol=1e-3):
raise AssertionError(
f"baseline_out and test_out are not close: "
f"baseline={baseline_out[0][0][0][:32]}, test={test_out[0][0][0][:32]}"
)
baseline_ms = benchmarker.benchmark_gpu(baseline_fn, rep=40)
test_ms = benchmarker.benchmark_gpu(test_fn, rep=40)
print(f"baseline {baseline_ms} test {test_ms} speedup {baseline_ms / test_ms:.3f}x")
def main():
bench_conv()
if __name__ == "__main__":
main() | python | github | https://github.com/pytorch/pytorch | benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py |
from __future__ import absolute_import
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from six import text_type
from zerver.decorator import to_non_negative_int
from zerver.lib.actions import do_update_pointer
from zerver.lib.request import has_request_variables, JsonableError, REQ
from zerver.lib.response import json_success
from zerver.lib.utils import statsd, generate_random_token
from zerver.models import UserProfile, Message, UserMessage
def get_pointer_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
def generate_client_id():
# type: () -> text_type
return generate_random_token(32)
def get_profile_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result) | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/blockScopedNamespaceDifferentFile.ts] ////
//// [test.ts]
namespace C {
export class Name {
static funcData = A.AA.func();
static someConst = A.AA.foo;
constructor(parameters) {}
}
}
//// [typings.d.ts]
declare namespace A {
namespace AA {
function func(): number;
const foo = "";
}
}
//// [out.js]
"use strict";
var C;
(function (C) {
var Name = /** @class */ (function () {
function Name(parameters) {
}
Name.funcData = A.AA.func();
Name.someConst = A.AA.foo;
return Name;
}());
C.Name = Name;
})(C || (C = {})); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/blockScopedNamespaceDifferentFile(target=es5).js |
# coding=utf-8
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
from octoprint.settings import settings
import os
import threading
import urllib
import time
import subprocess
import fnmatch
def getFinishedTimelapses():
files = []
basedir = settings().getBaseFolder("timelapse")
for osFile in os.listdir(basedir):
if not fnmatch.fnmatch(osFile, "*.mpg"):
continue
files.append({
"name": osFile,
"size": os.stat(os.path.join(basedir, osFile)).st_size
})
return files
class Timelapse(object):
def __init__(self):
self._imageNumber = None
self._inTimelapse = False
self._gcodeFile = None
self._captureDir = settings().getBaseFolder("timelapse_tmp")
self._movieDir = settings().getBaseFolder("timelapse")
self._snapshotUrl = settings().get("webcam", "snapshot")
self._renderThread = None
self._captureMutex = threading.Lock()
def onPrintjobStarted(self, gcodeFile):
self.startTimelapse(gcodeFile)
def onPrintjobStopped(self):
self.stopTimelapse()
def onPrintjobProgress(self, oldPos, newPos, percentage):
pass
def onZChange(self, oldZ, newZ):
pass
def startTimelapse(self, gcodeFile):
self.cleanCaptureDir()
self._imageNumber = 0
self._inTimelapse = True
self._gcodeFile = os.path.basename(gcodeFile)
def stopTimelapse(self):
self._renderThread = threading.Thread(target=self._createMovie)
self._renderThread.daemon = True
self._renderThread.start()
self._imageNumber = None
self._inTimelapse = False
def captureImage(self):
if self._captureDir is None:
return
with self._captureMutex:
filename = os.path.join(self._captureDir, "tmp_%05d.jpg" % (self._imageNumber))
self._imageNumber += 1;
captureThread = threading.Thread(target=self._captureWorker, kwargs={"filename": filename})
captureThread.daemon = True
captureThread.start()
def _captureWorker(self, filename):
urllib.urlretrieve(self._snapshotUrl, filename)
def _createMovie(self):
ffmpeg = settings().get("webcam", "ffmpeg")
bitrate = settings().get("webcam", "bitrate")
if ffmpeg is None or bitrate is None:
return
input = os.path.join(self._captureDir, "tmp_%05d.jpg")
output = os.path.join(self._movieDir, "%s_%s.mpg" % (os.path.splitext(self._gcodeFile)[0], time.strftime("%Y%m%d%H%M%S")))
subprocess.call([
ffmpeg, '-i', input, '-vcodec', 'mpeg2video', '-pix_fmt', 'yuv420p', '-r', '25', '-y',
'-b:v', bitrate, '-f', 'vob', output
])
def cleanCaptureDir(self):
if not os.path.isdir(self._captureDir):
return
for filename in os.listdir(self._captureDir):
if not fnmatch.fnmatch(filename, "*.jpg"):
continue
os.remove(os.path.join(self._captureDir, filename))
class ZTimelapse(Timelapse):
def __init__(self):
Timelapse.__init__(self)
def onZChange(self, oldZ, newZ):
self.captureImage()
class TimedTimelapse(Timelapse):
def __init__(self, interval=1):
Timelapse.__init__(self)
self._interval = interval
if self._interval < 1:
self._interval = 1 # force minimum interval of 1s
self._timerThread = None
def onPrintjobStarted(self, filename):
Timelapse.onPrintjobStarted(self, filename)
if self._timerThread is not None:
return
self._timerThread = threading.Thread(target=self.timerWorker)
self._timerThread.daemon = True
self._timerThread.start()
def timerWorker(self):
while self._inTimelapse:
self.captureImage()
time.sleep(self._interval) | unknown | codeparrot/codeparrot-clean | ||
/* ae.c module for illumos event ports.
*
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <errno.h>
#include <port.h>
#include <poll.h>
#include <sys/types.h>
#include <sys/time.h>
#include <stdio.h>
static int evport_debug = 0;
/*
* This file implements the ae API using event ports, present on Solaris-based
* systems since Solaris 10. Using the event port interface, we associate file
* descriptors with the port. Each association also includes the set of poll(2)
* events that the consumer is interested in (e.g., POLLIN and POLLOUT).
*
* There's one tricky piece to this implementation: when we return events via
* aeApiPoll, the corresponding file descriptors become dissociated from the
* port. This is necessary because poll events are level-triggered, so if the
* fd didn't become dissociated, it would immediately fire another event since
* the underlying state hasn't changed yet. We must re-associate the file
* descriptor, but only after we know that our caller has actually read from it.
* The ae API does not tell us exactly when that happens, but we do know that
* it must happen by the time aeApiPoll is called again. Our solution is to
* keep track of the last fds returned by aeApiPoll and re-associate them next
* time aeApiPoll is invoked.
*
* To summarize, in this module, each fd association is EITHER (a) represented
* only via the in-kernel association OR (b) represented by pending_fds and
* pending_masks. (b) is only true for the last fds we returned from aeApiPoll,
* and only until we enter aeApiPoll again (at which point we restore the
* in-kernel association).
*/
#define MAX_EVENT_BATCHSZ 512
typedef struct aeApiState {
int portfd; /* event port */
uint_t npending; /* # of pending fds */
int pending_fds[MAX_EVENT_BATCHSZ]; /* pending fds */
int pending_masks[MAX_EVENT_BATCHSZ]; /* pending fds' masks */
} aeApiState;
static int aeApiCreate(aeEventLoop *eventLoop) {
int i;
aeApiState *state = zmalloc(sizeof(aeApiState));
if (!state) return -1;
state->portfd = port_create();
if (state->portfd == -1) {
zfree(state);
return -1;
}
anetCloexec(state->portfd);
state->npending = 0;
for (i = 0; i < MAX_EVENT_BATCHSZ; i++) {
state->pending_fds[i] = -1;
state->pending_masks[i] = AE_NONE;
}
eventLoop->apidata = state;
return 0;
}
static int aeApiResize(aeEventLoop *eventLoop, int setsize) {
(void) eventLoop;
(void) setsize;
/* Nothing to resize here. */
return 0;
}
static void aeApiFree(aeEventLoop *eventLoop) {
aeApiState *state = eventLoop->apidata;
close(state->portfd);
zfree(state);
}
static int aeApiLookupPending(aeApiState *state, int fd) {
uint_t i;
for (i = 0; i < state->npending; i++) {
if (state->pending_fds[i] == fd)
return (i);
}
return (-1);
}
/*
* Helper function to invoke port_associate for the given fd and mask.
*/
static int aeApiAssociate(const char *where, int portfd, int fd, int mask) {
int events = 0;
int rv, err;
if (mask & AE_READABLE)
events |= POLLIN;
if (mask & AE_WRITABLE)
events |= POLLOUT;
if (evport_debug)
fprintf(stderr, "%s: port_associate(%d, 0x%x) = ", where, fd, events);
rv = port_associate(portfd, PORT_SOURCE_FD, fd, events,
(void *)(uintptr_t)mask);
err = errno;
if (evport_debug)
fprintf(stderr, "%d (%s)\n", rv, rv == 0 ? "no error" : strerror(err));
if (rv == -1) {
fprintf(stderr, "%s: port_associate: %s\n", where, strerror(err));
if (err == EAGAIN)
fprintf(stderr, "aeApiAssociate: event port limit exceeded.");
}
return rv;
}
static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) {
aeApiState *state = eventLoop->apidata;
int fullmask, pfd;
if (evport_debug)
fprintf(stderr, "aeApiAddEvent: fd %d mask 0x%x\n", fd, mask);
/*
* Since port_associate's "events" argument replaces any existing events, we
* must be sure to include whatever events are already associated when
* we call port_associate() again.
*/
fullmask = mask | eventLoop->events[fd].mask;
pfd = aeApiLookupPending(state, fd);
if (pfd != -1) {
/*
* This fd was recently returned from aeApiPoll. It should be safe to
* assume that the consumer has processed that poll event, but we play
* it safer by simply updating pending_mask. The fd will be
* re-associated as usual when aeApiPoll is called again.
*/
if (evport_debug)
fprintf(stderr, "aeApiAddEvent: adding to pending fd %d\n", fd);
state->pending_masks[pfd] |= fullmask;
return 0;
}
return (aeApiAssociate("aeApiAddEvent", state->portfd, fd, fullmask));
}
static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) {
aeApiState *state = eventLoop->apidata;
int fullmask, pfd;
if (evport_debug)
fprintf(stderr, "del fd %d mask 0x%x\n", fd, mask);
pfd = aeApiLookupPending(state, fd);
if (pfd != -1) {
if (evport_debug)
fprintf(stderr, "deleting event from pending fd %d\n", fd);
/*
* This fd was just returned from aeApiPoll, so it's not currently
* associated with the port. All we need to do is update
* pending_mask appropriately.
*/
state->pending_masks[pfd] &= ~mask;
if (state->pending_masks[pfd] == AE_NONE)
state->pending_fds[pfd] = -1;
return;
}
/*
* The fd is currently associated with the port. Like with the add case
* above, we must look at the full mask for the file descriptor before
* updating that association. We don't have a good way of knowing what the
* events are without looking into the eventLoop state directly. We rely on
* the fact that our caller has already updated the mask in the eventLoop.
*/
/* We always remove the specified events from the current mask,
* regardless of whether eventLoop->events[fd].mask has been updated yet. */
fullmask = eventLoop->events[fd].mask & ~mask;
if (fullmask == AE_NONE) {
/*
* We're removing *all* events, so use port_dissociate to remove the
* association completely. Failure here indicates a bug.
*/
if (evport_debug)
fprintf(stderr, "aeApiDelEvent: port_dissociate(%d)\n", fd);
if (port_dissociate(state->portfd, PORT_SOURCE_FD, fd) != 0) {
perror("aeApiDelEvent: port_dissociate");
abort(); /* will not return */
}
} else if (aeApiAssociate("aeApiDelEvent", state->portfd, fd,
fullmask) != 0) {
/*
* ENOMEM is a potentially transient condition, but the kernel won't
* generally return it unless things are really bad. EAGAIN indicates
* we've reached a resource limit, for which it doesn't make sense to
* retry (counter-intuitively). All other errors indicate a bug. In any
* of these cases, the best we can do is to abort.
*/
abort(); /* will not return */
}
}
static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) {
aeApiState *state = eventLoop->apidata;
struct timespec timeout, *tsp;
uint_t mask, i;
uint_t nevents;
port_event_t event[MAX_EVENT_BATCHSZ];
/*
* If we've returned fd events before, we must re-associate them with the
* port now, before calling port_get(). See the block comment at the top of
* this file for an explanation of why.
*/
for (i = 0; i < state->npending; i++) {
if (state->pending_fds[i] == -1)
/* This fd has since been deleted. */
continue;
if (aeApiAssociate("aeApiPoll", state->portfd,
state->pending_fds[i], state->pending_masks[i]) != 0) {
/* See aeApiDelEvent for why this case is fatal. */
abort();
}
state->pending_masks[i] = AE_NONE;
state->pending_fds[i] = -1;
}
state->npending = 0;
if (tvp != NULL) {
timeout.tv_sec = tvp->tv_sec;
timeout.tv_nsec = tvp->tv_usec * 1000;
tsp = &timeout;
} else {
tsp = NULL;
}
/*
* port_getn can return with errno == ETIME having returned some events (!).
* So if we get ETIME, we check nevents, too.
*/
nevents = 1;
if (port_getn(state->portfd, event, MAX_EVENT_BATCHSZ, &nevents,
tsp) == -1 && (errno != ETIME || nevents == 0)) {
if (errno == ETIME || errno == EINTR)
return 0;
/* Any other error indicates a bug. */
panic("aeApiPoll: port_getn, %s", strerror(errno));
}
state->npending = nevents;
for (i = 0; i < nevents; i++) {
mask = 0;
if (event[i].portev_events & POLLIN)
mask |= AE_READABLE;
if (event[i].portev_events & POLLOUT)
mask |= AE_WRITABLE;
eventLoop->fired[i].fd = event[i].portev_object;
eventLoop->fired[i].mask = mask;
if (evport_debug)
fprintf(stderr, "aeApiPoll: fd %d mask 0x%x\n",
(int)event[i].portev_object, mask);
state->pending_fds[i] = event[i].portev_object;
state->pending_masks[i] = (uintptr_t)event[i].portev_user;
}
return nevents;
}
static char *aeApiName(void) {
return "evport";
} | c | github | https://github.com/redis/redis | src/ae_evport.c |
import Avatar from "../components/avatar";
import DateFormatter from "../components/date-formatter";
import CoverImage from "./cover-image";
import Link from "next/link";
export default function PostPreview({
title,
coverImage,
date,
excerpt,
author,
slug,
}) {
return (
<div>
<div className="mb-5">
<CoverImage
slug={slug}
title={title}
src={coverImage}
height={278}
width={556}
/>
</div>
<h3 className="text-3xl mb-3 leading-snug">
<Link href={`/posts/${slug}`} className="hover:underline">
{title}
</Link>
</h3>
<div className="text-lg mb-4">
<DateFormatter dateString={date} />
</div>
<p className="text-lg leading-relaxed mb-4">{excerpt}</p>
<Avatar name={author.name} picture={author.picture} />
</div>
);
} | javascript | github | https://github.com/vercel/next.js | examples/cms-tina/components/post-preview.js |
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
const {
ObjectDefineProperty,
ObjectSetPrototypeOf,
Symbol,
} = primordials;
const { Readable, finished } = require('stream');
const kHeaders = Symbol('kHeaders');
const kHeadersDistinct = Symbol('kHeadersDistinct');
const kHeadersCount = Symbol('kHeadersCount');
const kTrailers = Symbol('kTrailers');
const kTrailersDistinct = Symbol('kTrailersDistinct');
const kTrailersCount = Symbol('kTrailersCount');
function readStart(socket) {
if (socket && !socket._paused && socket.readable)
socket.resume();
}
function readStop(socket) {
if (socket)
socket.pause();
}
/* Abstract base class for ServerRequest and ClientResponse. */
function IncomingMessage(socket) {
let streamOptions;
if (socket) {
streamOptions = {
highWaterMark: socket.readableHighWaterMark,
};
}
Readable.call(this, streamOptions);
this._readableState.readingMore = true;
this.socket = socket;
this.httpVersionMajor = null;
this.httpVersionMinor = null;
this.httpVersion = null;
this.complete = false;
this[kHeaders] = null;
this[kHeadersCount] = 0;
this.rawHeaders = [];
this[kTrailers] = null;
this[kTrailersCount] = 0;
this.rawTrailers = [];
this.joinDuplicateHeaders = false;
this.aborted = false;
this.upgrade = null;
// request (server) only
this.url = '';
this.method = null;
// response (client) only
this.statusCode = null;
this.statusMessage = null;
this.client = socket;
this._consuming = false;
// Flag for when we decide that this message cannot possibly be
// read by the user, so there's no point continuing to handle it.
this._dumped = false;
}
ObjectSetPrototypeOf(IncomingMessage.prototype, Readable.prototype);
ObjectSetPrototypeOf(IncomingMessage, Readable);
ObjectDefineProperty(IncomingMessage.prototype, 'connection', {
__proto__: null,
get: function() {
return this.socket;
},
set: function(val) {
this.socket = val;
},
});
ObjectDefineProperty(IncomingMessage.prototype, 'headers', {
__proto__: null,
get: function() {
if (!this[kHeaders]) {
this[kHeaders] = {};
const src = this.rawHeaders;
const dst = this[kHeaders];
for (let n = 0; n < this[kHeadersCount]; n += 2) {
this._addHeaderLine(src[n + 0], src[n + 1], dst);
}
}
return this[kHeaders];
},
set: function(val) {
this[kHeaders] = val;
},
});
ObjectDefineProperty(IncomingMessage.prototype, 'headersDistinct', {
__proto__: null,
get: function() {
if (!this[kHeadersDistinct]) {
this[kHeadersDistinct] = {};
const src = this.rawHeaders;
const dst = this[kHeadersDistinct];
for (let n = 0; n < this[kHeadersCount]; n += 2) {
this._addHeaderLineDistinct(src[n + 0], src[n + 1], dst);
}
}
return this[kHeadersDistinct];
},
set: function(val) {
this[kHeadersDistinct] = val;
},
});
ObjectDefineProperty(IncomingMessage.prototype, 'trailers', {
__proto__: null,
get: function() {
if (!this[kTrailers]) {
this[kTrailers] = {};
const src = this.rawTrailers;
const dst = this[kTrailers];
for (let n = 0; n < this[kTrailersCount]; n += 2) {
this._addHeaderLine(src[n + 0], src[n + 1], dst);
}
}
return this[kTrailers];
},
set: function(val) {
this[kTrailers] = val;
},
});
ObjectDefineProperty(IncomingMessage.prototype, 'trailersDistinct', {
__proto__: null,
get: function() {
if (!this[kTrailersDistinct]) {
this[kTrailersDistinct] = {};
const src = this.rawTrailers;
const dst = this[kTrailersDistinct];
for (let n = 0; n < this[kTrailersCount]; n += 2) {
this._addHeaderLineDistinct(src[n + 0], src[n + 1], dst);
}
}
return this[kTrailersDistinct];
},
set: function(val) {
this[kTrailersDistinct] = val;
},
});
IncomingMessage.prototype.setTimeout = function setTimeout(msecs, callback) {
if (callback)
this.on('timeout', callback);
this.socket.setTimeout(msecs);
return this;
};
// Argument n cannot be factored out due to the overhead of
// argument adaptor frame creation inside V8 in case that number of actual
// arguments is different from expected arguments.
// Ref: https://bugs.chromium.org/p/v8/issues/detail?id=10201
// NOTE: Argument adapt frame issue might be solved in V8 engine v8.9.
// Refactoring `n` out might be possible when V8 is upgraded to that
// version.
// Ref: https://v8.dev/blog/v8-release-89
IncomingMessage.prototype._read = function _read(n) {
if (!this._consuming) {
this._readableState.readingMore = false;
this._consuming = true;
}
// We actually do almost nothing here, because the parserOnBody
// function fills up our internal buffer directly. However, we
// do need to unpause the underlying socket so that it flows.
if (this.socket.readable)
readStart(this.socket);
};
// It's possible that the socket will be destroyed, and removed from
// any messages, before ever calling this. In that case, just skip
// it, since something else is destroying this connection anyway.
IncomingMessage.prototype._destroy = function _destroy(err, cb) {
if (!this.readableEnded || !this.complete) {
this.aborted = true;
this.emit('aborted');
}
// If aborted and the underlying socket is not already destroyed,
// destroy it.
// We have to check if the socket is already destroyed because finished
// does not call the callback when this method is invoked from `_http_client`
// in `test/parallel/test-http-client-spurious-aborted.js`
if (this.socket && !this.socket.destroyed && this.aborted) {
this.socket.destroy(err);
const cleanup = finished(this.socket, (e) => {
if (e?.code === 'ERR_STREAM_PREMATURE_CLOSE') {
e = null;
}
cleanup();
process.nextTick(onError, this, e || err, cb);
});
} else {
process.nextTick(onError, this, err, cb);
}
};
IncomingMessage.prototype._addHeaderLines = _addHeaderLines;
function _addHeaderLines(headers, n) {
if (headers?.length) {
let dest;
if (this.complete) {
this.rawTrailers = headers;
this[kTrailersCount] = n;
dest = this[kTrailers];
} else {
this.rawHeaders = headers;
this[kHeadersCount] = n;
dest = this[kHeaders];
}
if (dest) {
for (let i = 0; i < n; i += 2) {
this._addHeaderLine(headers[i], headers[i + 1], dest);
}
}
}
}
// This function is used to help avoid the lowercasing of a field name if it
// matches a 'traditional cased' version of a field name. It then returns the
// lowercased name to both avoid calling toLowerCase() a second time and to
// indicate whether the field was a 'no duplicates' field. If a field is not a
// 'no duplicates' field, a `0` byte is prepended as a flag. The one exception
// to this is the Set-Cookie header which is indicated by a `1` byte flag, since
// it is an 'array' field and thus is treated differently in _addHeaderLines().
// TODO: perhaps http_parser could be returning both raw and lowercased versions
// of known header names to avoid us having to call toLowerCase() for those
// headers.
function matchKnownFields(field, lowercased) {
switch (field.length) {
case 3:
if (field === 'Age' || field === 'age') return 'age';
break;
case 4:
if (field === 'Host' || field === 'host') return 'host';
if (field === 'From' || field === 'from') return 'from';
if (field === 'ETag' || field === 'etag') return 'etag';
if (field === 'Date' || field === 'date') return '\u0000date';
if (field === 'Vary' || field === 'vary') return '\u0000vary';
break;
case 6:
if (field === 'Server' || field === 'server') return 'server';
if (field === 'Cookie' || field === 'cookie') return '\u0002cookie';
if (field === 'Origin' || field === 'origin') return '\u0000origin';
if (field === 'Expect' || field === 'expect') return '\u0000expect';
if (field === 'Accept' || field === 'accept') return '\u0000accept';
break;
case 7:
if (field === 'Referer' || field === 'referer') return 'referer';
if (field === 'Expires' || field === 'expires') return 'expires';
if (field === 'Upgrade' || field === 'upgrade') return '\u0000upgrade';
break;
case 8:
if (field === 'Location' || field === 'location')
return 'location';
if (field === 'If-Match' || field === 'if-match')
return '\u0000if-match';
break;
case 10:
if (field === 'User-Agent' || field === 'user-agent')
return 'user-agent';
if (field === 'Set-Cookie' || field === 'set-cookie')
return '\u0001';
if (field === 'Connection' || field === 'connection')
return '\u0000connection';
break;
case 11:
if (field === 'Retry-After' || field === 'retry-after')
return 'retry-after';
break;
case 12:
if (field === 'Content-Type' || field === 'content-type')
return 'content-type';
if (field === 'Max-Forwards' || field === 'max-forwards')
return 'max-forwards';
break;
case 13:
if (field === 'Authorization' || field === 'authorization')
return 'authorization';
if (field === 'Last-Modified' || field === 'last-modified')
return 'last-modified';
if (field === 'Cache-Control' || field === 'cache-control')
return '\u0000cache-control';
if (field === 'If-None-Match' || field === 'if-none-match')
return '\u0000if-none-match';
break;
case 14:
if (field === 'Content-Length' || field === 'content-length')
return 'content-length';
break;
case 15:
if (field === 'Accept-Encoding' || field === 'accept-encoding')
return '\u0000accept-encoding';
if (field === 'Accept-Language' || field === 'accept-language')
return '\u0000accept-language';
if (field === 'X-Forwarded-For' || field === 'x-forwarded-for')
return '\u0000x-forwarded-for';
break;
case 16:
if (field === 'Content-Encoding' || field === 'content-encoding')
return '\u0000content-encoding';
if (field === 'X-Forwarded-Host' || field === 'x-forwarded-host')
return '\u0000x-forwarded-host';
break;
case 17:
if (field === 'If-Modified-Since' || field === 'if-modified-since')
return 'if-modified-since';
if (field === 'Transfer-Encoding' || field === 'transfer-encoding')
return '\u0000transfer-encoding';
if (field === 'X-Forwarded-Proto' || field === 'x-forwarded-proto')
return '\u0000x-forwarded-proto';
break;
case 19:
if (field === 'Proxy-Authorization' || field === 'proxy-authorization')
return 'proxy-authorization';
if (field === 'If-Unmodified-Since' || field === 'if-unmodified-since')
return 'if-unmodified-since';
break;
}
if (lowercased) {
return '\u0000' + field;
}
return matchKnownFields(field.toLowerCase(), true);
}
// Add the given (field, value) pair to the message
//
// Per RFC2616, section 4.2 it is acceptable to join multiple instances of the
// same header with a ', ' if the header in question supports specification of
// multiple values this way. The one exception to this is the Cookie header,
// which has multiple values joined with a '; ' instead. If a header's values
// cannot be joined in either of these ways, we declare the first instance the
// winner and drop the second. Extended header fields (those beginning with
// 'x-') are always joined.
IncomingMessage.prototype._addHeaderLine = _addHeaderLine;
function _addHeaderLine(field, value, dest) {
field = matchKnownFields(field);
const flag = field.charCodeAt(0);
if (flag === 0 || flag === 2) {
field = field.slice(1);
// Make a delimited list
if (typeof dest[field] === 'string') {
dest[field] += (flag === 0 ? ', ' : '; ') + value;
} else {
dest[field] = value;
}
} else if (flag === 1) {
// Array header -- only Set-Cookie at the moment
if (dest['set-cookie'] !== undefined) {
dest['set-cookie'].push(value);
} else {
dest['set-cookie'] = [value];
}
} else if (this.joinDuplicateHeaders) {
// RFC 9110 https://www.rfc-editor.org/rfc/rfc9110#section-5.2
// https://github.com/nodejs/node/issues/45699
// allow authorization multiple fields
// Make a delimited list
if (dest[field] === undefined) {
dest[field] = value;
} else {
dest[field] += ', ' + value;
}
} else if (dest[field] === undefined) {
// Drop duplicates
dest[field] = value;
}
}
IncomingMessage.prototype._addHeaderLineDistinct = _addHeaderLineDistinct;
function _addHeaderLineDistinct(field, value, dest) {
field = field.toLowerCase();
if (!dest[field]) {
dest[field] = [value];
} else {
dest[field].push(value);
}
}
IncomingMessage.prototype._dumpAndCloseReadable = function _dumpAndCloseReadable() {
this._dumped = true;
this._readableState.ended = true;
this._readableState.endEmitted = true;
this._readableState.destroyed = true;
this._readableState.closed = true;
this._readableState.closeEmitted = true;
};
// Call this instead of resume() if we want to just
// dump all the data to /dev/null
IncomingMessage.prototype._dump = function _dump() {
if (!this._dumped) {
this._dumped = true;
// If there is buffered data, it may trigger 'data' events.
// Remove 'data' event listeners explicitly.
this.removeAllListeners('data');
this.resume();
}
};
function onError(self, error, cb) {
// This is to keep backward compatible behavior.
// An error is emitted only if there are listeners attached to the event.
if (self.listenerCount('error') === 0) {
cb();
} else {
cb(error);
}
}
module.exports = {
IncomingMessage,
readStart,
readStop,
}; | javascript | github | https://github.com/nodejs/node | lib/_http_incoming.js |
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
# Copyright 2019 Linaro Ltd.
%YAML 1.2
---
$id: http://devicetree.org/schemas/misc/intel,ixp4xx-ahb-queue-manager.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Intel IXP4xx AHB Queue Manager
maintainers:
- Linus Walleij <linusw@kernel.org>
description: |
The IXP4xx AHB Queue Manager maintains queues as circular buffers in
an 8KB embedded SRAM along with hardware pointers. It is used by both
the XScale processor and the NPEs (Network Processing Units) in the
IXP4xx for accelerating queues, especially for networking. Clients pick
queues from the queue manager with foo-queue = <&qmgr N> where the
&qmgr is a phandle to the queue manager and N is the queue resource
number. The queue resources available and their specific purpose
on a certain IXP4xx system will vary.
properties:
compatible:
items:
- const: intel,ixp4xx-ahb-queue-manager
reg:
maxItems: 1
interrupts:
items:
- description: Interrupt for queues 0-31
- description: Interrupt for queues 32-63
required:
- compatible
- reg
- interrupts
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
qmgr: queue-manager@60000000 {
compatible = "intel,ixp4xx-ahb-queue-manager";
reg = <0x60000000 0x4000>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>, <4 IRQ_TYPE_LEVEL_HIGH>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml |
#!/usr/bin/env python3
import argparse
import tarfile
import os
import urllib2
import shutil
METAPHLAN2_URL = 'https://bitbucket.org/biobakery/metaphlan2/get/2.5.0.tar.gz'
def download_file(url):
"""Download a file from a URL
Fetches a file from the specified URL.
Returns the name that the file is saved with.
"""
print("Downloading %s" % url)
target = os.path.basename(url)
print("Saving to %s" % target)
open(target, 'wb').write(urllib2.urlopen(url).read())
return target
def unpack_tar_archive(tar_file):
"""Extract files from a TAR archive
Given a TAR archive (which optionally can be
compressed with either gzip or bz2), extract the
files it contains and return a list of the
resulting file names and paths.
Once all the files are extracted the TAR archive
file is deleted from the file system.
"""
file_list = []
if not tarfile.is_tarfile(tar_file):
print("%s: not TAR file")
return [tar_file]
t = tarfile.open(tar_file)
t.extractall(".")
print("Removing %s" % tar_file)
os.remove(tar_file)
return file_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Download MetaPhlAn2 database')
parser.add_argument('--output', help="Installation directory")
args = parser.parse_args()
if args.output:
output = args.output
else:
output = os.path.dirname(os.path.realpath(__file__))
print(output)
if not os.path.exists(output):
os.makedirs(output)
metaphlan2_tarfile = download_file(METAPHLAN2_URL)
file_list = unpack_tar_archive(metaphlan2_tarfile)
print(file_list)
shutil.move("biobakery-metaphlan2-c43e40a443ed/db_v20", output) | unknown | codeparrot/codeparrot-clean | ||
package build
import (
"context"
"os"
"testing"
"github.com/moby/moby/v2/internal/testutil"
"github.com/moby/moby/v2/internal/testutil/environment"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/codes"
)
var (
testEnv *environment.Execution
baseContext context.Context
)
func TestMain(m *testing.M) {
shutdown := testutil.ConfigureTracing()
ctx, span := otel.Tracer("").Start(context.Background(), "integration/build/TestMain")
baseContext = ctx
var err error
testEnv, err = environment.New(ctx)
if err != nil {
span.SetStatus(codes.Error, err.Error())
span.End()
shutdown(ctx)
panic(err)
}
err = environment.EnsureFrozenImagesLinux(ctx, testEnv)
if err != nil {
span.SetStatus(codes.Error, err.Error())
span.End()
shutdown(ctx)
panic(err)
}
testEnv.Print()
code := m.Run()
if code != 0 {
span.SetStatus(codes.Error, "m.Run() exited with non-zero code")
}
span.End()
shutdown(ctx)
os.Exit(code)
}
func setupTest(t *testing.T) context.Context {
ctx := testutil.StartSpan(baseContext, t)
environment.ProtectAll(ctx, t, testEnv)
t.Cleanup(func() { testEnv.Clean(ctx, t) })
return ctx
} | go | github | https://github.com/moby/moby | integration/build/main_test.go |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix || (js && wasm) || wasip1
package os
import (
"errors"
"syscall"
"time"
)
const (
// Special values for Process.Pid.
pidUnset = 0
pidReleased = -1
)
func (p *Process) wait() (ps *ProcessState, err error) {
// Which type of Process do we have?
if p.handle != nil {
// pidfd
return p.pidfdWait()
} else {
// Regular PID
return p.pidWait()
}
}
func (p *Process) pidWait() (*ProcessState, error) {
// TODO(go.dev/issue/67642): When there are concurrent Wait calls, one
// may wait on the wrong process if the PID is reused after the
// completes its wait.
//
// Checking for statusDone here would not be a complete fix, as the PID
// could still be waited on and reused prior to blockUntilWaitable.
switch p.pidStatus() {
case statusReleased:
return nil, syscall.EINVAL
}
// If we can block until Wait4 will succeed immediately, do so.
ready, err := p.blockUntilWaitable()
if err != nil {
return nil, err
}
if ready {
// Mark the process done now, before the call to Wait4,
// so that Process.pidSignal will not send a signal.
p.doRelease(statusDone)
// Acquire a write lock on sigMu to wait for any
// active call to the signal method to complete.
p.sigMu.Lock()
p.sigMu.Unlock()
}
var (
status syscall.WaitStatus
rusage syscall.Rusage
)
pid1, err := ignoringEINTR2(func() (int, error) {
return syscall.Wait4(p.Pid, &status, 0, &rusage)
})
if err != nil {
return nil, NewSyscallError("wait", err)
}
p.doRelease(statusDone)
return &ProcessState{
pid: pid1,
status: status,
rusage: &rusage,
}, nil
}
func (p *Process) signal(sig Signal) error {
s, ok := sig.(syscall.Signal)
if !ok {
return errors.New("os: unsupported signal type")
}
// Which type of Process do we have?
if p.handle != nil {
// pidfd
return p.pidfdSendSignal(s)
} else {
// Regular PID
return p.pidSignal(s)
}
}
func (p *Process) pidSignal(s syscall.Signal) error {
if p.Pid == pidReleased {
return errProcessReleased
}
if p.Pid == pidUnset {
return errors.New("os: process not initialized")
}
p.sigMu.RLock()
defer p.sigMu.RUnlock()
switch p.pidStatus() {
case statusDone:
return ErrProcessDone
case statusReleased:
return errProcessReleased
}
return convertESRCH(syscall.Kill(p.Pid, s))
}
func convertESRCH(err error) error {
if err == syscall.ESRCH {
return ErrProcessDone
}
return err
}
func findProcess(pid int) (p *Process, err error) {
h, err := pidfdFind(pid)
if err == ErrProcessDone {
// We can't return an error here since users are not expecting
// it. Instead, return a process with a "done" state already
// and let a subsequent Signal or Wait call catch that.
return newDoneProcess(pid), nil
} else if err != nil {
// Ignore other errors from pidfdFind, as the callers
// do not expect them. Fall back to using the PID.
return newPIDProcess(pid), nil
}
// Use the handle.
return newHandleProcess(pid, h), nil
}
func (p *ProcessState) userTime() time.Duration {
return time.Duration(p.rusage.Utime.Nano()) * time.Nanosecond
}
func (p *ProcessState) systemTime() time.Duration {
return time.Duration(p.rusage.Stime.Nano()) * time.Nanosecond
} | go | github | https://github.com/golang/go | src/os/exec_unix.go |
from scipy.stats import maxwell
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
# Calculate a few first moments:
mean, var, skew, kurt = maxwell.stats(moments='mvsk')
# Display the probability density function (``pdf``):
x = np.linspace(maxwell.ppf(0.01),
maxwell.ppf(0.99), 100)
ax.plot(x, maxwell.pdf(x),
'r-', lw=5, alpha=0.6, label='maxwell pdf')
# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.
# Freeze the distribution and display the frozen ``pdf``:
rv = maxwell()
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Check accuracy of ``cdf`` and ``ppf``:
vals = maxwell.ppf([0.001, 0.5, 0.999])
np.allclose([0.001, 0.5, 0.999], maxwell.cdf(vals))
# True
# Generate random numbers:
r = maxwell.rvs(size=1000)
# And compare the histogram:
ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal;
import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin;
import org.elasticsearch.gradle.OS;
import org.elasticsearch.gradle.internal.conventions.util.Util;
import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin;
import org.elasticsearch.gradle.internal.test.ErrorReportingTestListener;
import org.elasticsearch.gradle.internal.test.SimpleCommandLineArgumentProvider;
import org.elasticsearch.gradle.internal.test.rerun.InternalTestRerunPlugin;
import org.elasticsearch.gradle.test.GradleTestPolicySetupPlugin;
import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider;
import org.gradle.api.Action;
import org.gradle.api.JavaVersion;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.configuration.BuildFeatures;
import org.gradle.api.file.FileCollection;
import org.gradle.api.plugins.JavaPlugin;
import org.gradle.api.provider.ProviderFactory;
import org.gradle.api.tasks.SourceSet;
import org.gradle.api.tasks.SourceSetContainer;
import org.gradle.api.tasks.testing.Test;
import java.io.File;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Stream;
import javax.inject.Inject;
import static java.util.stream.Collectors.joining;
import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams;
import static org.elasticsearch.gradle.util.FileUtils.mkdirs;
import static org.elasticsearch.gradle.util.GradleUtils.maybeConfigure;
/**
* Applies commonly used settings to all Test tasks in the project
*/
public abstract class ElasticsearchTestBasePlugin implements Plugin<Project> {
public static final String DUMP_OUTPUT_ON_FAILURE_PROP_NAME = "dumpOutputOnFailure";
public static final Set<String> TEST_TASKS_WITH_ENTITLEMENTS = Set.of("test", "internalClusterTest");
@Inject
protected abstract ProviderFactory getProviderFactory();
@Inject
protected abstract BuildFeatures getBuildFeatures();
@Override
public void apply(Project project) {
project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class);
var buildParams = loadBuildParams(project);
project.getPluginManager().apply(InternalTestRerunPlugin.class);
project.getPluginManager().apply(GradleTestPolicySetupPlugin.class);
// for fips mode check
project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class);
// Default test task should run only unit tests
maybeConfigure(project.getTasks(), "test", Test.class, task -> task.include("**/*Tests.class"));
// none of this stuff is applicable to the `:buildSrc` project tests
File heapdumpDir = new File(project.getBuildDir(), "heapdump");
project.getTasks().withType(Test.class).configureEach(test -> {
File testOutputDir = new File(test.getReports().getJunitXml().getOutputLocation().getAsFile().get(), "output");
ErrorReportingTestListener listener = new ErrorReportingTestListener(test, testOutputDir);
test.getExtensions().getExtraProperties().set(DUMP_OUTPUT_ON_FAILURE_PROP_NAME, true);
test.getExtensions().add("errorReportingTestListener", listener);
test.addTestOutputListener(listener);
test.addTestListener(listener);
/*
* We use lazy-evaluated strings in order to configure system properties whose value will not be known until
* execution time (e.g. cluster port numbers). Adding these via the normal DSL doesn't work as these get treated
* as task inputs and therefore Gradle attempts to snapshot them before/after task execution. This fails due
* to the GStrings containing references to non-serializable objects.
*
* We bypass this by instead passing this system properties vi a CommandLineArgumentProvider. This has the added
* side-effect that these properties are NOT treated as inputs, therefore they don't influence things like the
* build cache key or up to date checking.
*/
SystemPropertyCommandLineArgumentProvider nonInputProperties = new SystemPropertyCommandLineArgumentProvider();
// We specifically use an anonymous inner class here because lambda task actions break Gradle cacheability
// See: https://docs.gradle.org/current/userguide/more_about_tasks.html#sec:how_does_it_work
test.doFirst(new Action<>() {
@Override
public void execute(Task t) {
mkdirs(testOutputDir);
mkdirs(heapdumpDir);
mkdirs(test.getWorkingDir());
mkdirs(test.getWorkingDir().toPath().resolve("temp").toFile());
// TODO remove once jvm.options are added to test system properties
test.systemProperty("java.locale.providers", "CLDR");
}
});
test.getJvmArgumentProviders().add(nonInputProperties);
test.getExtensions().add("nonInputProperties", nonInputProperties);
test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName().replace("#", "_")));
test.setMaxParallelForks(Integer.parseInt(System.getProperty("tests.jvms", buildParams.get().getDefaultParallel().toString())));
test.exclude("**/*$*.class");
test.jvmArgs(
"-Xmx" + System.getProperty("tests.heap.size", "512m"),
"-Xms" + System.getProperty("tests.heap.size", "512m"),
"-Dtests.testfeatures.enabled=true",
"--add-opens=java.base/java.util=ALL-UNNAMED",
// TODO: only open these for mockito when it is modularized
"--add-opens=java.base/java.security.cert=ALL-UNNAMED",
"--add-opens=java.base/java.nio.channels=ALL-UNNAMED",
"--add-opens=java.base/java.net=ALL-UNNAMED",
"--add-opens=java.base/javax.net.ssl=ALL-UNNAMED",
"--add-opens=java.base/java.nio.file=ALL-UNNAMED",
"--add-opens=java.base/java.time=ALL-UNNAMED",
"--add-opens=java.management/java.lang.management=ALL-UNNAMED",
"--enable-native-access=ALL-UNNAMED",
"--add-modules=jdk.incubator.vector",
"-XX:+HeapDumpOnOutOfMemoryError"
);
test.getJvmArgumentProviders().add(new SimpleCommandLineArgumentProvider("-XX:HeapDumpPath=" + heapdumpDir));
test.getJvmArgumentProviders().add(() -> {
if (test.getJavaVersion().compareTo(JavaVersion.VERSION_23) <= 0) {
return List.of("-Djava.security.manager=allow");
} else {
return List.of();
}
});
test.getJvmArgumentProviders()
.add(() -> List.of("-Dorg.apache.lucene.vectorization.upperJavaFeatureVersion=" + test.getJavaVersion().getMajorVersion()));
String argline = System.getProperty("tests.jvm.argline");
if (argline != null) {
test.jvmArgs((Object[]) argline.split(" "));
}
// Check if "tests.asserts" is false or "tests.jvm.argline" contains the "-da" flag.
boolean disableAssertions = Util.getBooleanProperty("tests.asserts", true) == false
|| (argline != null && (argline.contains("-da")))
|| (argline != null && (argline.contains("-disableassertions")));
if (disableAssertions) {
System.out.println("disable assertions");
test.setEnableAssertions(false);
}
Map<String, String> sysprops = Map.of(
"java.awt.headless",
"true",
"tests.artifact",
project.getName(),
"tests.security.manager",
"true",
"jna.nosys",
"true"
);
test.systemProperties(sysprops);
// ignore changing test seed when build is passed -Dignore.tests.seed for cacheability
// also ignore when configuration cache is on since the test seed as task input would break
// configuration cache reuse.
if (System.getProperty("ignore.tests.seed") != null || getBuildFeatures().getConfigurationCache().getActive().get()) {
nonInputProperties.systemProperty("tests.seed", buildParams.get().getTestSeedProvider());
} else {
test.systemProperty("tests.seed", buildParams.get().getTestSeed());
}
// don't track these as inputs since they contain absolute paths and break cache relocatability
File gradleUserHome = project.getGradle().getGradleUserHomeDir();
nonInputProperties.systemProperty("gradle.user.home", gradleUserHome);
nonInputProperties.systemProperty("workspace.dir", Util.locateElasticsearchWorkspace(project.getGradle()));
// we use 'temp' relative to CWD since this is per JVM and tests are forbidden from writing to CWD
nonInputProperties.systemProperty("java.io.tmpdir", test.getWorkingDir().toPath().resolve("temp"));
if (test.getName().equals("internalClusterTest")) {
// configure a node home directory independent of the Java temp dir so that entitlements can be properly enforced
nonInputProperties.systemProperty("tempDir", test.getWorkingDir().toPath().resolve("nodesTemp"));
}
SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class);
SourceSet mainSourceSet = sourceSets.findByName(SourceSet.MAIN_SOURCE_SET_NAME);
SourceSet testSourceSet = sourceSets.findByName(SourceSet.TEST_SOURCE_SET_NAME);
SourceSet internalClusterTestSourceSet = sourceSets.findByName("internalClusterTest");
if (TEST_TASKS_WITH_ENTITLEMENTS.contains(test.getName()) && mainSourceSet != null && testSourceSet != null) {
FileCollection mainRuntime = mainSourceSet.getRuntimeClasspath();
FileCollection testRuntime = testSourceSet.getRuntimeClasspath();
FileCollection internalClusterTestRuntime = ("internalClusterTest".equals(test.getName())
&& internalClusterTestSourceSet != null) ? internalClusterTestSourceSet.getRuntimeClasspath() : project.files();
FileCollection testOnlyFiles = testRuntime.plus(internalClusterTestRuntime).minus(mainRuntime);
test.doFirst(task -> test.environment("es.entitlement.testOnlyPath", testOnlyFiles.getAsPath()));
}
test.systemProperties(getProviderFactory().systemPropertiesPrefixedBy("tests.").get());
test.systemProperties(getProviderFactory().systemPropertiesPrefixedBy("es.").get());
// TODO: remove setting logging level via system property
test.systemProperty("tests.logger.level", "WARN");
// TODO: remove this once ctx isn't added to update script params in 7.0
test.systemProperty("es.scripting.update.ctx_in_params", "false");
// TODO: remove this property in 8.0
test.systemProperty("es.search.rewrite_sort", "true");
// TODO: remove this once cname is prepended to transport.publish_address by default in 8.0
test.systemProperty("es.transport.cname_in_publish_address", "true");
// Set netty system properties to the properties we configure in jvm.options
test.systemProperty("io.netty.noUnsafe", "true");
test.systemProperty("io.netty.noKeySetOptimization", "true");
test.systemProperty("io.netty.recycler.maxCapacityPerThread", "0");
test.testLogging(logging -> {
logging.setShowExceptions(true);
logging.setShowCauses(true);
logging.setExceptionFormat("full");
});
if (OS.current().equals(OS.WINDOWS) && System.getProperty("tests.timeoutSuite") == null) {
// override the suite timeout to 60 mins for windows, because it has the most inefficient filesystem known to man
test.systemProperty("tests.timeoutSuite", "3600000!");
}
/*
* If this project builds a shadow JAR then any unit tests should test against that artifact instead of
* compiled class output and dependency jars. This better emulates the runtime environment of consumers.
*/
project.getPluginManager().withPlugin("com.gradleup.shadow", p -> {
if (test.getName().equals(JavaPlugin.TEST_TASK_NAME)) {
// Remove output class files and any other dependencies from the test classpath, since the shadow JAR includes these
// Add any "shadow" dependencies. These are dependencies that are *not* bundled into the shadow JAR
Configuration shadowConfig = project.getConfigurations().getByName(ShadowBasePlugin.CONFIGURATION_NAME);
// Add the shadow JAR artifact itself
FileCollection shadowJar = project.files(project.getTasks().named("shadowJar"));
FileCollection mainRuntime = mainSourceSet.getRuntimeClasspath();
FileCollection testRuntime = testSourceSet.getRuntimeClasspath();
test.setClasspath(testRuntime.minus(mainRuntime).plus(shadowConfig).plus(shadowJar));
}
});
});
configureJavaBaseModuleOptions(project);
configureEntitlements(project);
}
/**
* Computes and sets the {@code --patch-module=java.base} and {@code --add-opens=java.base} JVM command line options.
*/
private void configureJavaBaseModuleOptions(Project project) {
project.getTasks().withType(Test.class).configureEach(test -> {
// patch immutable collections only for "test" task
FileCollection patchedImmutableCollections = test.getName().equals("test") ? patchedImmutableCollections(project) : null;
if (patchedImmutableCollections != null) {
test.getInputs().files(patchedImmutableCollections);
test.systemProperty("tests.hackImmutableCollections", "true");
}
FileCollection entitlementBridge = TEST_TASKS_WITH_ENTITLEMENTS.contains(test.getName()) ? entitlementBridge(project) : null;
if (entitlementBridge != null) {
test.getInputs().files(entitlementBridge);
}
test.getJvmArgumentProviders().add(() -> {
String javaBasePatch = Stream.concat(
singleFilePath(patchedImmutableCollections).map(str -> str + "/java.base"),
singleFilePath(entitlementBridge)
).collect(joining(File.pathSeparator));
return javaBasePatch.isEmpty()
? List.of()
: List.of("--patch-module=java.base=" + javaBasePatch, "--add-opens=java.base/java.util=ALL-UNNAMED");
});
});
}
private Stream<String> singleFilePath(FileCollection collection) {
return Stream.ofNullable(collection).filter(fc -> fc.isEmpty() == false).map(FileCollection::getSingleFile).map(File::toString);
}
private static FileCollection patchedImmutableCollections(Project project) {
String patchProject = ":test:immutable-collections-patch";
if (project.findProject(patchProject) == null) {
return null; // build tests may not have this project, just skip
}
String configurationName = "immutableCollectionsPatch";
FileCollection patchedFileCollection = project.getConfigurations()
.create(configurationName, config -> config.setCanBeConsumed(false));
var deps = project.getDependencies();
deps.add(configurationName, deps.project(Map.of("path", patchProject, "configuration", "patch")));
return patchedFileCollection;
}
private static FileCollection entitlementBridge(Project project) {
return project.getConfigurations().findByName("entitlementBridge");
}
/**
* Sets the required JVM options and system properties to enable entitlement enforcement on tests.
* <p>
* One command line option is set in {@link #configureJavaBaseModuleOptions} out of necessity,
* since the command line can have only one {@code --patch-module} option for a given module.
*/
private static void configureEntitlements(Project project) {
Configuration agentConfig = project.getConfigurations().create("entitlementAgent");
Project agent = project.findProject(":libs:entitlement:agent");
if (agent != null) {
agentConfig.defaultDependencies(
deps -> { deps.add(project.getDependencies().project(Map.of("path", ":libs:entitlement:agent"))); }
);
}
FileCollection agentFiles = agentConfig;
Configuration bridgeConfig = project.getConfigurations().create("entitlementBridge");
Project bridge = project.findProject(":libs:entitlement:bridge");
if (bridge != null) {
bridgeConfig.defaultDependencies(
deps -> { deps.add(project.getDependencies().project(Map.of("path", ":libs:entitlement:bridge"))); }
);
}
FileCollection bridgeFiles = bridgeConfig;
project.getTasks()
.withType(Test.class)
.matching(test -> TEST_TASKS_WITH_ENTITLEMENTS.contains(test.getName()))
.configureEach(test -> {
// See also SystemJvmOptions.maybeAttachEntitlementAgent.
SystemPropertyCommandLineArgumentProvider nonInputSystemProperties = test.getExtensions()
.getByType(SystemPropertyCommandLineArgumentProvider.class);
// Agent
test.getInputs().files(agentFiles).optional(true);
nonInputSystemProperties.systemProperty("es.entitlement.agentJar", agentFiles::getAsPath);
nonInputSystemProperties.systemProperty("jdk.attach.allowAttachSelf", () -> agentFiles.isEmpty() ? "false" : "true");
// Bridge
String modulesContainingEntitlementInstrumentation = "java.logging,java.net.http,java.naming,jdk.net";
test.getInputs().files(bridgeFiles).optional(true);
// Tests may not be modular, but the JDK still is
test.jvmArgs(
"--add-exports=java.base/org.elasticsearch.entitlement.bridge=ALL-UNNAMED,"
+ modulesContainingEntitlementInstrumentation
);
// Export internal JDK packages that are required (temporarily) to declare instrumentation
test.jvmArgs("--add-exports=jdk.jlink/jdk.tools.jlink.internal=ALL-UNNAMED");
test.jvmArgs("--add-exports=jdk.internal.vm.ci/jdk.vm.ci.services=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/sun.net.www=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/sun.net.www.protocol.ftp=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/sun.net.www.protocol.file=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/sun.net.www.protocol.jar=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/sun.net.www.protocol.http=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/sun.net.www.protocol.https=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/sun.net.www.protocol.mailto=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/sun.nio.ch=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/jdk.internal.foreign=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/jdk.internal.foreign.abi=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.base/jdk.internal.foreign.layout=ALL-UNNAMED");
test.jvmArgs("--add-exports=java.net.http/jdk.internal.net.http=ALL-UNNAMED");
test.jvmArgs("--add-exports=jdk.jdi/com.sun.tools.jdi=ALL-UNNAMED");
});
}
} | java | github | https://github.com/elastic/elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.testing.mockmvc.assertj.mockmvctesterassertionsjson;
import org.assertj.core.api.InstanceOfAssertFactories;
import org.springframework.test.web.servlet.assertj.MockMvcTester;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.entry;
class FamilyControllerTests {
private final MockMvcTester mockMvc = MockMvcTester.of(new FamilyController());
void extractingPathAsMap() {
// tag::extract-asmap[]
assertThat(mockMvc.get().uri("/family")).bodyJson()
.extractingPath("$.members[0]")
.asMap()
.contains(entry("name", "Homer"));
// end::extract-asmap[]
}
void extractingPathAndConvertWithType() {
// tag::extract-convert[]
assertThat(mockMvc.get().uri("/family")).bodyJson()
.extractingPath("$.members[0]")
.convertTo(Member.class)
.satisfies(member -> assertThat(member.name).isEqualTo("Homer"));
// end::extract-convert[]
}
void extractingPathAndConvertWithAssertFactory() {
// tag::extract-convert-assert-factory[]
assertThat(mockMvc.get().uri("/family")).bodyJson()
.extractingPath("$.members")
.convertTo(InstanceOfAssertFactories.list(Member.class))
.hasSize(5)
.element(0).satisfies(member -> assertThat(member.name).isEqualTo("Homer"));
// end::extract-convert-assert-factory[]
}
void assertTheSimpsons() {
// tag::assert-file[]
assertThat(mockMvc.get().uri("/family")).bodyJson()
.isStrictlyEqualTo("sample/simpsons.json");
// end::assert-file[]
}
static class FamilyController {}
record Member(String name) {}
} | java | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/java/org/springframework/docs/testing/mockmvc/assertj/mockmvctesterassertionsjson/FamilyControllerTests.java |
import sys
_gui_libs = []
try:
import gi
gi.require_version('Gtk', '3.0') # nopep8
_gui_libs.append("gi")
except Exception as e:
print("import dans_pymodules: Exception caught when trying to import gi: {}".format(e))
print("label_combo, and mpl_canvas_wrapper will not be available!")
try:
import PyQt5
_gui_libs.append("qt")
except Exception as e:
print("import dans_pymodules: Exception caught when trying to import PyQt5: {}".format(e))
if sys.version_info.major == 3:
from .particles import *
from .mycolors import *
from .pylatex import *
from .particle_distribution import *
from .vector2d import *
from .vector import *
from .read_igun import *
from .power_of_two import power_of_two
from .coordinate_transformation_3d import *
from .field import *
from .particle_pusher import *
try:
import tkinter
_gui_libs.append("tk")
except Exception as e:
print("import dans_pymodules: Exception caught when trying to import tkinter: {}".format(e))
if "qt" in _gui_libs:
from .filedialog_qt import *
elif "gi" in _gui_libs:
from .filedialog_gtk import *
else:
from .filedialog_tk import *
if "gi" in _gui_libs:
from .label_combo import *
from .mpl_canvas_wrapper import *
elif sys.version_info.major == 2:
from particles import *
from mycolors import *
from pylatex import *
from particle_distribution import *
from vector2d import *
from vector import *
from read_igun import *
from power_of_two import *
from coordinate_transformation_3d import *
from field import *
from particle_pusher import *
try:
import Tkinter
_gui_libs.append("tk")
except Exception as e:
print("import dans_pymodules: Exception caught when trying to import Tkinter: {}".format(e))
if "qt" in _gui_libs:
from filedialog_qt import *
elif "gi" in _gui_libs:
from filedialog_gtk import *
else:
from filedialog_tk import *
if "gi" in _gui_libs:
from label_combo import *
from mpl_canvas_wrapper import *
else:
raise Exception("This version of python is not supported!") | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import os
import sys
import logging
import crontab
from website import settings
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def app_prefix(path):
return os.path.join(settings.APP_PATH, path)
def ensure_item(cron, command):
items = list(cron.find_command(command))
return items[0] if items else cron.new(command)
def main(dry_run=True):
cron = crontab.CronTab(user=settings.CRON_USER)
analytics = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/analytics.sh')))
analytics.hour.on(2)
analytics.minute.on(0) # Daily 2:00 a.m.
box = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/refresh_box_tokens.sh')))
box.hour.on(2)
box.minute.on(0) # Daily 2:00 a.m.
retractions = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/retract_registrations.sh')))
retractions.hour.on(0)
retractions.minute.on(0) # Daily 12 a.m.
embargoes = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/embargo_registrations.sh')))
embargoes.hour.on(0)
embargoes.minute.on(0) # Daily 12 a.m.
registration_approvals = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/approve_registrations.sh')))
registration_approvals.hour.on(0)
registration_approvals.minute.on(0) # Daily 12 a.m.
files_audit = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/osfstorage/files_audit.sh')))
files_audit.dow.on(0)
files_audit.hour.on(2)
files_audit.minute.on(0) # Sunday 2:00 a.m.
glacier_inventory = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/osfstorage/glacier_inventory.sh')))
glacier_inventory.dow.on(0)
glacier_inventory.hour.on(0)
glacier_inventory.minute.on(0) # Sunday 12:00 a.m.
glacier_audit = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/osfstorage/glacier_audit.sh')))
glacier_audit.dow.on(0)
glacier_audit.hour.on(6)
glacier_audit.minute.on(0) # Sunday 6:00 a.m.
triggered_mails = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/triggered_mails.sh')))
triggered_mails.hour.on(0)
triggered_mails.minute.on(0) # Daily 12 a.m.
send_queued_mails = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/send_queued_mails.sh')))
send_queued_mails.hour.on(12)
send_queued_mails.minute.on(0) # Daily 12 p.m.
usage_audit = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/osfstorage/usage_audit.sh')))
usage_audit.hour.on(0)
usage_audit.minute.on(0) # Daily 12 a.m.
logger.info('Updating crontab file:')
logger.info(cron.render())
if not dry_run:
cron.write_to_user(settings.CRON_USER)
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
main(dry_run=dry_run) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2007 - Nando Vieira
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# 2007-10-25 - Alexandre da Silva <simpsomboy@gmail.com>
# Obtained original program from Nando Vieira, and changed need use only
# python and mozembed, removed not used code
from gettext import gettext as _
import gedit
import gconf
import gtk
import gtk.gdk
import os
import pygtk
import webkit
import re
import urllib
from todo import parse_directory
DEBUG_NAME = 'TODO_DEBUG'
DEBUG_TITLE = 'todo'
ui_str = """
<ui>
<menubar name="MenuBar">
<menu name="ViewMenu" action="View">
<menuitem name="ToDo" action="ToDo"/>
</menu>
</menubar>
</ui>
"""
class BrowserPage(webkit.WebView):
def __init__(self):
webkit.WebView.__init__(self)
def debug(text, level=1):
if os.environ.has_key(DEBUG_NAME):
try:
required_level = int(os.environ[DEBUG_NAME])
if required_level >= level:
print "[%s] %s" % (DEBUG_TITLE, text)
except:
print "[%s] debug error" % DEBUG_TITLE
# TODO: Create a Configuragion dialog
class TodoPlugin(gedit.Plugin):
def __init__(self):
gedit.Plugin.__init__(self)
self.instances = {}
def activate(self, window):
debug('activating plugin')
self.instances[window] = TodoWindowHelper(self, window)
def deactivate(self, window):
debug('deactivating plugin')
self.instances[window].deactivate()
del self.instances[window]
def update_ui(self, window):
debug('updating ui')
self.instances[window].update_ui()
class TodoWindowHelper:
handlers = {}
mt = re.compile(r'(?P<protocol>^gedit:\/\/)(?P<file>.*?)\?line=(?P<line>.*?)$')
def __init__(self, plugin, window):
self.window = window
self.plugin = plugin
self.todo_window = None
self._browser = None
self.client = gconf.client_get_default()
self.add_menu()
def deactivate(self):
debug('deactivate function called')
self._browser = None
self.todo_window = None
self.window = None
self.plugin = None
def add_menu(self):
actions = [
('ToDo', gtk.STOCK_EDIT, _('TODO-List'), '<Control><Alt>t', _("List all TODO marks from your current project"), self.show_todo_marks)
]
action_group = gtk.ActionGroup("ToDoActions")
action_group.add_actions(actions, self.window)
self.manager = self.window.get_ui_manager()
self.manager.insert_action_group(action_group, -1)
self.manager.add_ui_from_string(ui_str)
def get_root_directory(self):
# get filebrowser plugin root
fb_root = self.get_filebrowser_root()
# get eddt plugin root
eddt_root = self.get_eddt_root()
if fb_root and fb_root != "" and fb_root is not None:
title = "TODO List (Filebrowser integration)"
root = fb_root
elif eddt_root and eddt_root != "" and eddt_root is not None:
title = "TODO List (EDDT integration)"
root = eddt_root
else:
title = "TODO List (current directory)"
root = os.path.dirname(__file__)
rt_path = urllib.unquote(root.replace("file://", ""))
return (rt_path, title)
# taken from snapopen plugin
def get_filebrowser_root(self):
base = u'/apps/gedit-2/plugins/filebrowser/on_load'
client = gconf.client_get_default()
client.add_dir(base, gconf.CLIENT_PRELOAD_NONE)
path = os.path.join(base, u'virtual_root')
val = client.get(path)
if val is not None:
base = u'/apps/gedit-2/plugins/filebrowser'
client = gconf.client_get_default()
client.add_dir(base, gconf.CLIENT_PRELOAD_NONE)
path = os.path.join(base, u'filter_mode')
fbfilter = client.get(path).get_string()
return val.get_string()
# taken from snapopen plugin
def get_eddt_root(self):
base = u'/apps/gedit-2/plugins/eddt'
client = gconf.client_get_default()
client.add_dir(base, gconf.CLIENT_PRELOAD_NONE)
path = os.path.join(base, u'repository')
val = client.get(path)
if val is not None:
return val.get_string()
def show_todo_marks(self, *args):
debug("opening list of todo marks")
# getting variables
root, title = self.get_root_directory()
debug("title: %s" % title)
debug("root: %s" % root)
html_str = parse_directory(root)
if self.todo_window:
self.todo_window.show()
self.todo_window.grab_focus()
else:
self._browser = BrowserPage()
self._browser.connect('navigation-requested', self.on_navigation_request)
self.todo_window = gtk.Window()
self.todo_window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)
self.todo_window.resize(700,510)
self.todo_window.connect('delete_event', self.on_todo_close)
self.todo_window.connect("key-release-event", self.on_window_key)
self.todo_window.set_destroy_with_parent(True)
self.todo_window.add(self._browser)
self.todo_window.show_all()
self.todo_window.set_title(title)
self._browser.load_string(html_str, "text/html", "utf-8", "about:")
def on_todo_close(self, *args):
self.todo_window.hide()
return True
def on_window_key(self, widget, event):
if event.keyval == gtk.keysyms.Escape:
self.todo_window.hide()
def on_navigation_request(self, page, frame, request):
file_uri = None
uri = request.get_uri()
#if uri == 'about:':
# return 0
gp = self.mt.search(uri)
if gp:
file_uri = 'file:///%s' % gp.group('file')
line_number = gp.group('line')
if file_uri:
# Test if document is not already open
for doc in self.window.get_documents():
if doc.get_uri() == file_uri:
tab = gedit.tab_get_from_document(doc)
view = tab.get_view()
self.window.set_active_tab(tab)
doc.goto_line(int(line_number))
view.scroll_to_cursor()
self.todo_window.hide()
return 1
# Document isn't open, create a new tab from uri
self.window.create_tab_from_uri(file_uri,
gedit.encoding_get_current(),
int(line_number), False, True)
self.todo_window.hide()
return 1
else:
debug("(%s) not found" % file_uri)
#self.todo_window.hide()
return 0
def update(self, text=None):
pass
def update_ui(self):
pass
def set_data(self, name, value):
self.window.get_active_tab().get_view().set_data(name, value)
def get_data(self, name):
return self.window.get_active_tab().get_view().get_data(name) | unknown | codeparrot/codeparrot-clean | ||
import re
def match_num(string):
text = re.compile(r"^5")
if text.match(string):
return True
else:
return False | unknown | mbpp | ||
# Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.1"
__author__ = "Donald Stufft"
__email__ = "donald@stufft.io"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__ | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.patterns import sep
import re
import logging
log = logging.getLogger(__name__)
def process(mtree):
for node in mtree.unidentified_leaves():
indices = []
didx = 0
pattern = re.compile(sep + '-' + sep)
match = pattern.search(node.value)
while match:
span = match.span()
indices.extend([ span[0], span[1] ])
match = pattern.search(node.value, span[1])
if indices:
node.partition(indices) | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides API for application-defined metadata attached to Kafka records.
*/
package org.apache.kafka.common.header; | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/header/package-info.java |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package terraform
import (
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/configs/configschema"
)
// ResourceProvider is a legacy interface for providers.
//
// This is retained only for compatibility with legacy code. The current
// interface for providers is providers.Interface, in the sibling directory
// named "providers".
type ResourceProvider interface {
/*********************************************************************
* Functions related to the provider
*********************************************************************/
// ProviderSchema returns the config schema for the main provider
// configuration, as would appear in a "provider" block in the
// configuration files.
//
// Currently not all providers support schema. Callers must therefore
// first call Resources and DataSources and ensure that at least one
// resource or data source has the SchemaAvailable flag set.
GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error)
// Validate is called once at the beginning with the raw configuration
// (no interpolation done) and can return a list of warnings and/or
// errors.
//
// This is called once with the provider configuration only. It may not
// be called at all if no provider configuration is given.
//
// This should not assume that any values of the configurations are valid.
// The primary use case of this call is to check that required keys are
// set.
Validate(*ResourceConfig) ([]string, []error)
// Configure configures the provider itself with the configuration
// given. This is useful for setting things like access keys.
//
// This won't be called at all if no provider configuration is given.
//
// Configure returns an error if it occurred.
Configure(*ResourceConfig) error
// Resources returns all the available resource types that this provider
// knows how to manage.
Resources() []ResourceType
// Stop is called when the provider should halt any in-flight actions.
//
// This can be used to make a nicer Ctrl-C experience for Terraform.
// Even if this isn't implemented to do anything (just returns nil),
// Terraform will still cleanly stop after the currently executing
// graph node is complete. However, this API can be used to make more
// efficient halts.
//
// Stop doesn't have to and shouldn't block waiting for in-flight actions
// to complete. It should take any action it wants and return immediately
// acknowledging it has received the stop request. Terraform core will
// automatically not make any further API calls to the provider soon
// after Stop is called (technically exactly once the currently executing
// graph nodes are complete).
//
// The error returned, if non-nil, is assumed to mean that signaling the
// stop somehow failed and that the user should expect potentially waiting
// a longer period of time.
Stop() error
/*********************************************************************
* Functions related to individual resources
*********************************************************************/
// ValidateResource is called once at the beginning with the raw
// configuration (no interpolation done) and can return a list of warnings
// and/or errors.
//
// This is called once per resource.
//
// This should not assume any of the values in the resource configuration
// are valid since it is possible they have to be interpolated still.
// The primary use case of this call is to check that the required keys
// are set and that the general structure is correct.
ValidateResource(string, *ResourceConfig) ([]string, []error)
// Apply applies a diff to a specific resource and returns the new
// resource state along with an error.
//
// If the resource state given has an empty ID, then a new resource
// is expected to be created.
Apply(
*InstanceInfo,
*InstanceState,
*InstanceDiff) (*InstanceState, error)
// Diff diffs a resource versus a desired state and returns
// a diff.
Diff(
*InstanceInfo,
*InstanceState,
*ResourceConfig) (*InstanceDiff, error)
// Refresh refreshes a resource and updates all of its attributes
// with the latest information.
Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error)
/*********************************************************************
* Functions related to importing
*********************************************************************/
// ImportState requests that the given resource be imported.
//
// The returned InstanceState only requires ID be set. Importing
// will always call Refresh after the state to complete it.
//
// IMPORTANT: InstanceState doesn't have the resource type attached
// to it. A type must be specified on the state via the Ephemeral
// field on the state.
//
// This function can return multiple states. Normally, an import
// will map 1:1 to a physical resource. However, some resources map
// to multiple. For example, an AWS security group may contain many rules.
// Each rule is represented by a separate resource in Terraform,
// therefore multiple states are returned.
ImportState(*InstanceInfo, string) ([]*InstanceState, error)
/*********************************************************************
* Functions related to data resources
*********************************************************************/
// ValidateDataSource is called once at the beginning with the raw
// configuration (no interpolation done) and can return a list of warnings
// and/or errors.
//
// This is called once per data source instance.
//
// This should not assume any of the values in the resource configuration
// are valid since it is possible they have to be interpolated still.
// The primary use case of this call is to check that the required keys
// are set and that the general structure is correct.
ValidateDataSource(string, *ResourceConfig) ([]string, []error)
// DataSources returns all of the available data sources that this
// provider implements.
DataSources() []DataSource
// ReadDataDiff produces a diff that represents the state that will
// be produced when the given data source is read using a later call
// to ReadDataApply.
ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
// ReadDataApply initializes a data instance using the configuration
// in a diff produced by ReadDataDiff.
ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
}
// ResourceProviderCloser is an interface that providers that can close
// connections that aren't needed anymore must implement.
type ResourceProviderCloser interface {
Close() error
}
// ProviderSchemaRequest is used to describe to a ResourceProvider which
// aspects of schema are required, when calling the GetSchema method.
type ProviderSchemaRequest struct {
ResourceTypes []string
DataSources []string
}
// ProviderSchema represents the schema for a provider's own configuration
// and the configuration for some or all of its resources and data sources.
//
// The completeness of this structure depends on how it was constructed.
// When constructed for a configuration, it will generally include only
// resource types and data sources used by that configuration.
type ProviderSchema struct {
Provider *configschema.Block
ProviderMeta *configschema.Block
ResourceTypes map[string]*configschema.Block
DataSources map[string]*configschema.Block
ResourceTypeSchemaVersions map[string]uint64
}
// SchemaForResourceType attempts to find a schema for the given mode and type.
// Returns nil if no such schema is available.
func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) {
switch mode {
case addrs.ManagedResourceMode:
return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName]
case addrs.DataResourceMode:
// Data resources don't have schema versions right now, since state is discarded for each refresh
return ps.DataSources[typeName], 0
default:
// Shouldn't happen, because the above cases are comprehensive.
return nil, 0
}
}
// SchemaForResourceAddr attempts to find a schema for the mode and type from
// the given resource address. Returns nil if no such schema is available.
func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) {
return ps.SchemaForResourceType(addr.Mode, addr.Type)
}
// ResourceType is a type of resource that a resource provider can manage.
type ResourceType struct {
Name string // Name of the resource, example "instance" (no provider prefix)
Importable bool // Whether this resource supports importing
// SchemaAvailable is set if the provider supports the ProviderSchema,
// ResourceTypeSchema and DataSourceSchema methods. Although it is
// included on each resource type, it's actually a provider-wide setting
// that's smuggled here only because that avoids a breaking change to
// the plugin protocol.
SchemaAvailable bool
}
// DataSource is a data source that a resource provider implements.
type DataSource struct {
Name string
// SchemaAvailable is set if the provider supports the ProviderSchema,
// ResourceTypeSchema and DataSourceSchema methods. Although it is
// included on each resource type, it's actually a provider-wide setting
// that's smuggled here only because that avoids a breaking change to
// the plugin protocol.
SchemaAvailable bool
}
// ResourceProviderFactory is a function type that creates a new instance
// of a resource provider.
type ResourceProviderFactory func() (ResourceProvider, error)
// ResourceProviderFactoryFixed is a helper that creates a
// ResourceProviderFactory that just returns some fixed provider.
func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory {
return func() (ResourceProvider, error) {
return p, nil
}
}
func ProviderHasResource(p ResourceProvider, n string) bool {
for _, rt := range p.Resources() {
if rt.Name == n {
return true
}
}
return false
}
func ProviderHasDataSource(p ResourceProvider, n string) bool {
for _, rt := range p.DataSources() {
if rt.Name == n {
return true
}
}
return false
}
const errPluginInit = `
Plugin reinitialization required. Please run "terraform init".
Plugins are external binaries that Terraform uses to access and manipulate
resources. The configuration provided requires plugins which can't be located,
don't satisfy the version constraints, or are otherwise incompatible.
Terraform automatically discovers provider requirements from your
configuration, including providers used in child modules. To see the
requirements and constraints, run "terraform providers".
%s
` | go | github | https://github.com/hashicorp/terraform | internal/legacy/terraform/resource_provider.go |
function Component(props) {
const user =
useFragment(
graphql`
fragment F on T {
id
}
`,
props.user
) ?? {};
return user.name;
} | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/hook-inside-logical-expression.js |
from chainer.training.triggers import EarlyStoppingTrigger
from chainermn.extensions import ObservationAggregator
class MultiNodeEarlyStoppingTrigger(object):
"""__init__(\
self, comm, check_trigger=(1, 'epoch'), monitor='main/loss', \
patience=3, mode='auto', verbose=False, \
max_trigger=(100, 'epoch'))
Trigger for Early Stopping in Multiple Node Environments
It serves almost the same as
:class:`~chainer.training.triggers.EarlyStoppingTrigger`,
but it can correctly work in multiple node environments.
The difference between it and
:class:`~chainer.training.triggers.EarlyStoppingTrigger` is that,
in each check interval, it computes the mean of the accumulated
values *across all nodes*. In this way, all nodes will have the same
value to determine the timing at which the trigger fires so that
they will stop at the same time.
Args:
comm : ChainerMN communicator
check_trigger: Trigger that decides the comparison
interval between current best value and new value.
This must be a tuple in the form of ``<int>,
'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
monitor (str) : The metric you want to monitor
patience (int) : Counts to let the trigger be patient.
The trigger will not fire until the condition is met
for successive ``patience`` checks.
mode (str) : ``'max'``, ``'min'``, or ``'auto'``.
It is used to determine how to compare the monitored values.
verbose (bool) : Enable verbose output.
If verbose is true, you can get more information
max_trigger: Upper bound of the number of training loops
suffix (str): Suffix added to the name of the monitored
metric after aggregation.
.. note::
``patients`` is also available as an alias of ``patience`` for
historical reason.
"""
def __init__(self, comm,
*, check_trigger=(1, 'epoch'), monitor='main/loss',
patience=None, mode='auto', verbose=False,
max_trigger=(100, 'epoch'), suffix='_aggregated', **kwargs):
# `patients` as an alias of `patience`
monitor_aggregated = monitor + suffix
self.actual_trigger = EarlyStoppingTrigger(check_trigger=check_trigger,
monitor=monitor_aggregated,
patience=patience,
mode=mode, verbose=verbose,
max_trigger=max_trigger,
**kwargs)
self.aggregator = ObservationAggregator(
comm, monitor,
aggregated_key=monitor_aggregated,
comm_trigger=check_trigger)
def __call__(self, trainer):
self.aggregator(trainer)
return self.actual_trigger(trainer)
def _stop_condition(self):
return self.actual_trigger._stop_condition()
def _init_summary(self):
return self.actual_trigger._init_summary()
def get_training_length(self):
return self.actual_trigger.get_training_length() | unknown | codeparrot/codeparrot-clean | ||
"""
Tools for sending email.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
# Imported for backwards compatibility, and for the sake
# of a cleaner namespace. These symbols used to be in
# django/core/mail.py before the introduction of email
# backends and the subsequent reorganization (See #10355)
from django.core.mail.utils import CachedDnsName, DNS_NAME
from django.core.mail.message import \
EmailMessage, EmailMultiAlternatives, \
SafeMIMEText, SafeMIMEMultipart, \
DEFAULT_ATTACHMENT_MIME_TYPE, make_msgid, \
BadHeaderError, forbid_multi_line_headers
from django.core.mail.backends.smtp import EmailBackend as _SMTPConnection
def get_connection(backend=None, fail_silently=False, **kwds):
"""Load an e-mail backend and return an instance of it.
If backend is None (default) settings.EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
path = backend or settings.EMAIL_BACKEND
try:
mod_name, klass_name = path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError, e:
raise ImproperlyConfigured(('Error importing email backend module %s: "%s"'
% (mod_name, e)))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured(('Module "%s" does not define a '
'"%s" class' % (mod_name, klass_name)))
return klass(fail_silently=fail_silently, **kwds)
def send_mail(subject, message, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None,
connection=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
return EmailMessage(subject, message, from_email, recipient_list,
connection=connection).send()
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
"""
Given a datatuple of (subject, message, from_email, recipient_list), sends
each message to each recipient list. Returns the number of e-mails sent.
If from_email is None, the DEFAULT_FROM_EMAIL setting is used.
If auth_user and auth_password are set, they're used to log in.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
messages = [EmailMessage(subject, message, sender, recipient)
for subject, message, sender, recipient in datatuple]
return connection.send_messages(messages)
def mail_admins(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the admins, as defined by the ADMINS setting."""
if not settings.ADMINS:
return
mail = EmailMultiAlternatives(u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
def mail_managers(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the managers, as defined by the MANAGERS setting."""
if not settings.MANAGERS:
return
mail = EmailMultiAlternatives(u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
class SMTPConnection(_SMTPConnection):
def __init__(self, *args, **kwds):
import warnings
warnings.warn(
'mail.SMTPConnection is deprecated; use mail.get_connection() instead.',
DeprecationWarning
)
super(SMTPConnection, self).__init__(*args, **kwds) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForMultipleChoice,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeq2Seq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithFlattening,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadV1Processor,
SquadV2Processor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
) | python | github | https://github.com/huggingface/transformers | src/transformers/data/__init__.py |
#!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
"""Targets class describes which languages/platforms we support."""
__author__ = 'wclarkso@google.com (Will Clarkson)'
import logging
import os
from googleapis.codegen.filesys import files
from googleapis.codegen.utilities import json_expander
from googleapis.codegen.utilities import json_with_comments
class Targets(object):
"""Targets maintains the list of possible target options.
Reads targets.json file in local directory. This file is formatted
as:
{
'languages': {
'languageA': {
'surface_option1': {
'path': 'stable',
'description': 'something about language A',
'displayName': 'SurfaceOption1',
},
'surface_option2': {
'path': 'experimental',
'description': 'something about language A',
'displayName': 'SurfaceOption2',
'platforms': ['cmd-line'],
}
},
'languageB': {
...
}, ...
},
'platforms': {
'cmd-line': {
'displayName': 'Pretty Platform Name'
}
}
}
"""
def __init__(self, targets_path=None, template_root=None, targets_dict=None):
"""Constructor.
Loads targets file.
Args:
targets_path: (str) Path to targets file. Defaults to './targets.json'
template_root: (str) Path to template root. Defaults to '.'
targets_dict: (dict) Initial data, if not supplied from a file.
Raises:
ValueError: if the targets file does not contain the required sections.
"""
self.template_root = template_root or Targets._default_template_root
self.targets_path = targets_path or os.path.join(self.template_root,
'targets.json')
if targets_dict:
self._targets_dict = targets_dict
else:
self._targets_dict = json_with_comments.Loads(
files.GetFileContents(self.targets_path))
# Do some basic validation that this has the required fields
if 'languages' not in self._targets_dict:
raise ValueError('languages not in targets.json')
def Dict(self):
"""The targets.json file as a dictionary."""
return self._targets_dict
def VariationsForLanguage(self, language):
language_def = self._targets_dict['languages'].get(language)
if not language_def:
return None
return Variations(self, language, language_def['variations'])
def GetLanguage(self, language):
return self._targets_dict['languages'][language]
def Languages(self):
return self._targets_dict['languages']
def Platforms(self):
return self._targets_dict.get('platforms', {})
@staticmethod
def SetDefaultTemplateRoot(path):
"""Sets a new default full path to the templates directory.
Args:
path: (str) full path to templates directory.
"""
# This is not a classmethod because we don't want subclasses
# to shadow this value.
logging.info('setting default template root to %s', path)
Targets._default_template_root = path
@staticmethod
def GetDefaultTemplateRoot():
return Targets._default_template_root
# Set the initial template root.
_default_template_root = os.path.join(os.path.dirname(__file__),
'languages')
# Whether to use variation release versions when calculating template paths.
use_versioned_paths = False
@staticmethod
def SetUseVersionedPaths(use_versioned_paths):
"""Sets whether versions are used in the template path."""
# This is not a classmethod because we don't want subclasses
# to shadow this value.
Targets.use_versioned_paths = use_versioned_paths
class Variations(dict):
"""A set of variations available for a particular language."""
def __init__(self, targets, language, variations_dict):
super(Variations, self).__init__(variations_dict)
self._targets = targets
self._language = language
def IsValid(self, variation):
"""Test is a variation exists."""
return variation in self
def _RelativeTemplateDir(self, variation):
"""Returns the path to template dir for the selected variation.
By default, the path is the same as the variation name. It can be
overridden in two ways, of descending precedence:
1. by the 'releaseVersion' element, if use_versioned_paths is set.
2. with an explicit 'path' statement.
Args:
variation: (str) A target variation name.
Returns:
(str) Relative path to template directory.
"""
if self._targets.use_versioned_paths:
path = self[variation].get('releaseVersion') or variation
else:
path = None
if not path:
path = self.get(variation, {}).get('path') or variation
return os.path.join(self._language, path)
def AbsoluteTemplateDir(self, variation):
"""Returns the path to template dir for the selected variation.
Args:
variation: (str) A target variation name.
Returns:
(str) Absolute path to template directory.
"""
return os.path.join(self._targets.template_root,
self._RelativeTemplateDir(variation))
def GetFeaturesForReleaseVersion(self, release_version):
for name in self:
features = self.GetFeatures(name)
if release_version == features.get('releaseVersion'):
return features
return None
def GetFeatures(self, variation):
"""Returns the features dictionary for a specific variation.
This is the basic dictionary informaion plus any specific overrides in
the per-template-tree features.json file.
Args:
variation: (str) A target variation name.
Returns:
(Features) features dictionary
"""
if not variation:
return None
template_dir = self.AbsoluteTemplateDir(variation)
features = Features(template_dir, self.get(variation), variation)
json_path = os.path.join(template_dir, 'features.json')
try:
features_json = files.GetFileContents(json_path)
except files.FileDoesNotExist:
# for backwards compatibility, we forgive this.
# TODO(user): be stricter about this and
# fix/remove any tests that fail as a result.
return features
features.update(json_expander.ExpandJsonTemplate(
json_with_comments.Loads(features_json)))
# If not specified, the releaseVersion matches the variation
if not features.get('releaseVersion'):
features['releaseVersion'] = variation
return features
class Features(dict):
"""A dictionary describing the features of a particular API variation."""
# TODO(user): Do we need initial_content? The only thing we see in it is
# path, which should be set explicitly to the dirname of the real file path.
def __init__(self, template_dir, initial_content=None, name=None):
super(Features, self).__init__(initial_content or {})
self.name = name
self.template_dir = template_dir
if 'path' not in self:
self['path'] = os.path.basename(template_dir)
def DependenciesForEnvironment(self, environment=None):
"""Returns the list of dependencies for an environment.
Given an environment:
build the list of dependencies required for that environment. This
includes elements marked as all (platform='*') and ones specifically
mentioning that environment.
build the list of optional packages which might be useful to your app.
That is, everything marked generic, but not in the first list.
build the list of everything excluded from the first two sets
Args:
environment: (str) An environment (as per platforms.PLATFORMS). If None,
the optional packages list will include everything that is not
mandatory (i.e. marked with platform='*').
Returns:
list(dict), list(dict), list(dict): required_packages, optional_packages,
packages we do not want.
"""
required = []
optional = []
excluded = []
for r in self.get('requires', []):
environments = r['environments']
if '*' in environments or environment in environments:
required.append(r)
elif 'generic' in environments or not environment:
optional.append(r)
else:
excluded.append(r)
return required, optional, excluded
def ExtractPathsFromDependencies(self, dependencies, file_type=None):
"""Extract the file paths from a list of dependencies.
Args:
dependencies: (list(str)) list of dependencies from a Features object
file_type: (str) If specified, only extract paths for that file type.
Returns:
set(str): The set of file paths required for this dependency set.
"""
ret = set()
for d in dependencies or []:
for f in d.get('files') or []:
p = f.get('path')
if p and (file_type is None or file_type == f.get('type')):
ret.add(p)
return ret
def AllDependencyPaths(self):
"""Returns the set of all file paths mentioned as dependencies.
Returns:
set(str)
"""
ret = set()
for dependency in self.get('requires', []):
for f in dependency.get('files') or []:
p = f.get('path')
if p:
ret.add(p)
return ret
def FilePathsWeDoNotDependOn(self, environment=None, file_type=None):
"""Returns the list of file paths which are NOT required for an environment.
Figure out the files we need for an environment and reduce that by the
kind of files (if we only want source or binary), then invert that list
w.r.t. all the files mentioned in the features requirements list.
The rationale for this function is to make it easy to find the set of
files which should be stripped from a download, while leaving all files
not explicitly mentioned in the features.
Args:
environment: (str) An environment (as per platforms.PLATFORMS). If None,
return have the optional packages list include everything that is
not requried.
file_type: (str) If specified, only extract paths for that file type.
Returns:
list(str): The paths which are NOT required for that platform
"""
if not environment and not file_type: # quick exit for common case
return []
req, _, _ = self.DependenciesForEnvironment(environment=environment)
req_paths = self.ExtractPathsFromDependencies(req, file_type=file_type)
all_paths = self.AllDependencyPaths()
return all_paths - req_paths | unknown | codeparrot/codeparrot-clean | ||
{
"backfill": {
"affected_many": "Se activarán {{count}} ejecuciones.",
"affected_one": "1 ejecución será activada.",
"affected_other": "Se activarán {{count}} ejecuciones.",
"affectedNone": "No hay ejecuciones que coincidan con los criterios seleccionados.",
"allRuns": "Todas las Ejecuciones",
"backwards": "Ejecutar Hacia Atrás",
"dateRange": "Rango de Fechas",
"errorStartDateBeforeEndDate": "La Fecha Inicial debe ser antes de la Fecha Final",
"maxRuns": "Máximo de Ejecuciones Activas",
"missingAndErroredRuns": "Ejecutaciones Faltantes y con Errores",
"missingRuns": "Ejecutaciones Faltantes",
"reprocessBehavior": "Comportamiento de Reprocesamiento",
"run": "Ejecutar Backfill",
"selectDescription": "Ejecutar este Dag para un rango de fechas",
"selectLabel": "Backfill",
"title": "Ejecutar Backfill",
"toaster": {
"success": {
"description": "Backfill jobs han sido activados exitosamente.",
"title": "Backfill generado"
}
},
"tooltip": "Backfill requiere una programación",
"unpause": "Reanudar {{dag_display_name}} al activarse",
"validation": {
"datesRequired": "Ambos intervalos de Fecha Inicial y Fecha Final deben ser proporcionados.",
"startBeforeEnd": "El intervalo de Fecha Inicial debe ser menor o igual a la Fecha Final."
}
},
"banner": {
"backfillInProgress": "Backfill en progreso",
"cancel": "Cancelar backfill",
"pause": "Pausar backfill",
"unpause": "Reanudar backfill"
},
"clipboard": {
"copy": "Copiar"
},
"close": "Cerrar",
"configForm": {
"advancedOptions": "Opciones Avanzadas",
"configJson": "Configuración JSON",
"invalidJson": "Formato JSON inválido: {{errorMessage}}"
},
"dagWarnings": {
"error_many": "Errores",
"error_one": "1 Error",
"error_other": "Errores",
"errorAndWarning": "1 Error y {{warningText}}",
"warning_many": "{{count}} Advertencias",
"warning_one": "1 Advertencia",
"warning_other": "{{count}} Advertencias"
},
"durationChart": {
"duration": "Duración (segundos)",
"lastDagRun_many": "Últimas {{count}} Ejecuciones de Dag",
"lastDagRun_one": "Última Ejecución de Dag",
"lastDagRun_other": "Últimas {{count}} Ejecuciones de Dag",
"lastTaskInstance_many": "Últimas {{count}} Instancias de Tarea",
"lastTaskInstance_one": "Última Instancia de Tarea",
"lastTaskInstance_other": "Últimas {{count}} Instancias de Tarea",
"queuedDuration": "Duración en Cola",
"runAfter": "Ejecutar Después",
"runDuration": "Duración de la Ejecución"
},
"fileUpload": {
"files_many": "{{count}} archivos",
"files_one": "{{count}} archivo",
"files_other": "{{count}} archivos"
},
"flexibleForm": {
"placeholder": "Seleccionar Valor",
"placeholderArray": "Ingrese cada cadena en una nueva línea",
"placeholderExamples": "Comience a escribir para ver opciones",
"placeholderMulti": "Seleccionar uno o múltiples valores",
"validationErrorArrayNotArray": "El valor debe ser un array.",
"validationErrorArrayNotNumbers": "Todos los elementos en el array deben ser números.",
"validationErrorArrayNotObject": "Todos los elementos en el array deben ser objetos.",
"validationErrorRequired": "Este campo es requerido"
},
"graph": {
"directionDown": "De arriba a abajo",
"directionLeft": "De derecha a izquierda",
"directionRight": "De izquierda a derecha",
"directionUp": "De abajo a arriba",
"downloadImage": "Descargar imagen",
"downloadImageError": "Error al descargar la imagen.",
"downloadImageErrorTitle": "Descarga Fallida",
"otherDagRuns": "+Otras Ejecuciones de Dag",
"taskCount_many": "{{count}} Tareas",
"taskCount_one": "{{count}} Tarea",
"taskCount_other": "{{count}} Tareas",
"taskGroup": "Grupo de Tareas"
},
"limitedList": "+{{count}} más",
"limitedList.allItems": "Todos los {{count}} elementos:",
"limitedList.allTags_many": "Todas las etiquetas ({{count}})",
"limitedList.allTags_one": "Todas las etiquetas (1)",
"limitedList.allTags_other": "Todas las etiquetas ({{count}})",
"limitedList.clickToInteract": "Haz clic en una etiqueta para filtrar Dags",
"limitedList.clickToOpenFull": "Haz clic en \"+{{count}} más\" para ver la lista completa",
"limitedList.copyPasteText": "Puedes copiar y pegar el texto de arriba",
"limitedList.showingItems_many": "Mostrando {{count}} elementos",
"limitedList.showingItems_one": "Mostrando 1 elemento",
"limitedList.showingItems_other": "Mostrando {{count}} elementos",
"logs": {
"file": "Archivo",
"location": "línea {{line}} en {{name}}"
},
"reparseDag": "Reparar Dag",
"sortedAscending": "ordenado ascendente",
"sortedDescending": "ordenado descendente",
"sortedUnsorted": "sin ordenar",
"taskTries": "Intentos de Tarea",
"toggleCardView": "Mostrar vista de tarjeta",
"toggleTableView": "Mostrar vista de tabla",
"triggerDag": {
"button": "Trigger",
"loading": "Cargando información del Dag...",
"loadingFailed": "Error al cargar la información del Dag. Por favor, inténtelo de nuevo.",
"runIdHelp": "Opcional - se generará si no se proporciona",
"selectDescription": "Activar una ejecución única de este Dag",
"selectLabel": "Ejecución Única",
"title": "Activar Dag",
"toaster": {
"success": {
"description": "La ejecución del Dag ha sido activada exitosamente.",
"title": "Ejecución del Dag Activada"
}
},
"unpause": "Reanudar {{dagDisplayName}} al activarse"
},
"trimText": {
"details": "Detalles",
"empty": "Vacío",
"noContent": "No hay contenido disponible."
},
"versionDetails": {
"bundleLink": "Enlace del Bundle",
"bundleName": "Nombre del Bundle",
"bundleVersion": "Versión del Bundle",
"createdAt": "Creado en",
"versionId": "ID de la Versión"
},
"versionSelect": {
"dagVersion": "Versión del Dag",
"versionCode": "v{{versionCode}}"
}
} | json | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/public/i18n/locales/es/components.json |
from .gpu import *
from .staffboundary import distance_transform_kernel
from pyopencl import LocalMemory
from . import bitimage, staffsize
from .page import Page
from scipy.optimize import minimize
import glob
import metaomr
prg = build_program(['clrand32', 'kanungo'])
def normalized_page(page):
if not hasattr(page, 'staff_dist'):
staffsize.staffsize(page)
scale = 8.0 / page.staff_dist
img = bitimage.scale(page.img, scale, align=64)
# Grab a 512x512 area around the center of the page
h = int(page.orig_size[0] * scale)
w = int(page.orig_size[1] * scale)
img = bitimage.as_hostimage(img)[h/2-256:h/2+256, w/2-256:w/2+256]
return bitimage.as_bitimage(img)
def normalized_staves(page):
# Assume page is not rotated; scanned pages must be preprocessed first
staffsize.staffsize(page)
img = bitimage.scale(page.img, 8.0 / page.staff_dist, align=64)
new_page = Page(img)
staffsize.staffsize(new_page)
num_staves = len(new_page.staves())
return [new_page.staves.extract_staff(i) for i in xrange(num_staves)]
IDEAL_SET = None
def load_ideal_set():
global IDEAL_SET
if IDEAL_SET is None:
ideal_imgs = glob.glob('resources/ideal_set/*.png')
IDEAL_SET = []
for img in ideal_imgs:
page, = metaomr.open(img)
IDEAL_SET.append(KanungoImage(normalized_page(page)))
return IDEAL_SET
# Source: T. Kanungo, R. M. Haralick, and I. Phillips. "Global and local
# document degradation models." In Proceedings of the Second International
# Conference on Document Analysis and Recognition, pages 730-734. IEEE, 1993.
class KanungoImage:
img = None
fg_dist = None
bg_dist = None
seed = None
def __init__(self, img):
self.img = img
img = img.get()
self.fg_dist = thr.to_device(np.where(np.unpackbits(img),0,2**10).astype(np.int32))
distance_transform_kernel(self.fg_dist, 10)
self.bg_dist = thr.to_device(np.where(np.unpackbits(~img),0,2**10).astype(np.int32))
distance_transform_kernel(self.bg_dist, 10)
self.seed = thr.to_device(np.random.randint(1, 2**32, 2).astype(np.uint32))
def degrade(self, params):
# Values from scipy.optimize are real-valued and potentially negative
params = np.array(params)
params[[0, 1, 3]] = np.clip(params[[0, 1, 3]], 0, 1)
params[[2, 4]] = np.clip(params[[2, 4]], 0, np.inf)
params = list(params)
params[5] = max(0, np.rint(params[5]).astype(int))
img = self.noise(params)
img = self.closing(img, params)
return img
def noise(self, (nu, a0, a, b0, b, k)):
new_img = self.img.copy()
prg.kanungo_noise(new_img, self.fg_dist, self.bg_dist,
np.float32(nu),
np.float32(a0), np.float32(a),
np.float32(b0), np.float32(b),
self.seed,
LocalMemory(4),
global_size=new_img.shape[::-1])
return new_img
def closing(self, img, (nu, a0, a, b0, b, k)):
return bitimage.closing(img, numiter=k)
# Source: Kanungo, Tapas, and Qigong Zheng. "Estimation of morphological
# degradation model parameters." In 2001 IEEE International Conference on
# Acoustics, Speech, and Signal Processing. Vol. 3. IEEE, 2001.
# To reduce temporary memory usage, just return the list of patterns
# for each image, which can then be concatenated and bincounted
def pattern_list(img):
patterns = thr.empty_like(Type(np.uint16, (img.shape[0], img.shape[1]*8)))
prg.patterns_3x3(img, patterns, global_size=img.shape[::-1])
return patterns.get().ravel()
def test_hists_ks(hist1, hist2):
cdf1 = np.cumsum(hist1).astype(float) / hist1.sum()
cdf2 = np.cumsum(hist2).astype(float) / hist2.sum()
ks = np.abs(cdf1 - cdf2).max()
p = None
return ks, p
def test_hists_euc(hist1, hist2):
# Normalize bins excluding all-white pattern
hist1 = hist1.astype(float)
hist2 = hist2.astype(float)
hist1 = hist1[1:] / hist1[1:].sum()
hist2 = hist2[1:] / hist2[1:].sum()
return np.sqrt(np.sum((hist1 - hist2)[1:] ** 2)), None
import scipy.stats
test_hists_chisq = scipy.stats.chisquare
VI = None
def test_hists_mahalanobis(hist1, hist2):
global VI
if VI is None:
VI = np.load('results/kanungo_covar_inv.npz')['VI']
diffs = hist1 - hist2
return np.sqrt(diffs.T.dot(VI).dot(diffs)), None
def est_parameters(page, ideal_set=None, opt_method='nelder-mead', test_fn=test_hists_mahalanobis, maxfev=50):
if ideal_set is None:
ideal_set = load_ideal_set()
page_center = normalized_page(page)
patterns = pattern_list(page_center)
page_hist = np.bincount(patterns).astype(float)[1:].copy()
page_hist.resize(2 ** (3*3) - 1)
page_hist /= page_hist.sum()
def objective(params):
degraded = [ideal_img.degrade(params) for ideal_img in ideal_set]
patterns = np.concatenate([pattern_list(degraded_img) for degraded_img in degraded])
combined_hist = np.bincount(patterns).astype(float)[1:].copy()
combined_hist.resize(2 ** (3*3) - 1)
combined_hist /= combined_hist.sum()
res = test_fn(combined_hist, page_hist)[0]
return res
minim_results = []
for i in xrange(10):
params_0 = np.array([0.01, 0.01, 0.5, 0.01, 0.5, 0]
+ np.random.random(6)
* [0.09, 0.09, 3, 0.09, 3, 2])
minim_results.append(minimize(objective, params_0, method=opt_method,
options=dict(xtol=1e-4, maxfev=maxfev),
bounds=[(0,0.5), (0,0.5), (0,10), (0,0.5), (0,10), (0,5)]))
best_result = np.argmin([res.fun for res in minim_results])
return minim_results[best_result] | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Database;
use Illuminate\Contracts\Events\Dispatcher;
use Illuminate\Contracts\Support\DeferrableProvider;
use Illuminate\Database\Console\Migrations\FreshCommand;
use Illuminate\Database\Console\Migrations\InstallCommand;
use Illuminate\Database\Console\Migrations\MigrateCommand;
use Illuminate\Database\Console\Migrations\MigrateMakeCommand;
use Illuminate\Database\Console\Migrations\RefreshCommand;
use Illuminate\Database\Console\Migrations\ResetCommand;
use Illuminate\Database\Console\Migrations\RollbackCommand;
use Illuminate\Database\Console\Migrations\StatusCommand;
use Illuminate\Database\Migrations\DatabaseMigrationRepository;
use Illuminate\Database\Migrations\MigrationCreator;
use Illuminate\Database\Migrations\Migrator;
use Illuminate\Support\ServiceProvider;
class MigrationServiceProvider extends ServiceProvider implements DeferrableProvider
{
/**
* The commands to be registered.
*
* @var array
*/
protected $commands = [
'Migrate' => MigrateCommand::class,
'MigrateFresh' => FreshCommand::class,
'MigrateInstall' => InstallCommand::class,
'MigrateRefresh' => RefreshCommand::class,
'MigrateReset' => ResetCommand::class,
'MigrateRollback' => RollbackCommand::class,
'MigrateStatus' => StatusCommand::class,
'MigrateMake' => MigrateMakeCommand::class,
];
/**
* Register the service provider.
*
* @return void
*/
public function register()
{
$this->registerRepository();
$this->registerMigrator();
$this->registerCreator();
$this->registerCommands($this->commands);
}
/**
* Register the migration repository service.
*
* @return void
*/
protected function registerRepository()
{
$this->app->singleton('migration.repository', function ($app) {
$migrations = $app['config']['database.migrations'];
$table = is_array($migrations) ? ($migrations['table'] ?? null) : $migrations;
return new DatabaseMigrationRepository($app['db'], $table);
});
}
/**
* Register the migrator service.
*
* @return void
*/
protected function registerMigrator()
{
// The migrator is responsible for actually running and rollback the migration
// files in the application. We'll pass in our database connection resolver
// so the migrator can resolve any of these connections when it needs to.
$this->app->singleton('migrator', function ($app) {
$repository = $app['migration.repository'];
return new Migrator($repository, $app['db'], $app['files'], $app['events']);
});
$this->app->bind(Migrator::class, fn ($app) => $app['migrator']);
}
/**
* Register the migration creator.
*
* @return void
*/
protected function registerCreator()
{
$this->app->singleton('migration.creator', function ($app) {
return new MigrationCreator($app['files'], $app->basePath('stubs'));
});
}
/**
* Register the given commands.
*
* @param array $commands
* @return void
*/
protected function registerCommands(array $commands)
{
foreach (array_keys($commands) as $command) {
$this->{"register{$command}Command"}();
}
$this->commands(array_values($commands));
}
/**
* Register the command.
*
* @return void
*/
protected function registerMigrateCommand()
{
$this->app->singleton(MigrateCommand::class, function ($app) {
return new MigrateCommand($app['migrator'], $app[Dispatcher::class]);
});
}
/**
* Register the command.
*
* @return void
*/
protected function registerMigrateFreshCommand()
{
$this->app->singleton(FreshCommand::class, function ($app) {
return new FreshCommand($app['migrator']);
});
}
/**
* Register the command.
*
* @return void
*/
protected function registerMigrateInstallCommand()
{
$this->app->singleton(InstallCommand::class, function ($app) {
return new InstallCommand($app['migration.repository']);
});
}
/**
* Register the command.
*
* @return void
*/
protected function registerMigrateMakeCommand()
{
$this->app->singleton(MigrateMakeCommand::class, function ($app) {
// Once we have the migration creator registered, we will create the command
// and inject the creator. The creator is responsible for the actual file
// creation of the migrations, and may be extended by these developers.
$creator = $app['migration.creator'];
$composer = $app['composer'];
return new MigrateMakeCommand($creator, $composer);
});
}
/**
* Register the command.
*
* @return void
*/
protected function registerMigrateRefreshCommand()
{
$this->app->singleton(RefreshCommand::class);
}
/**
* Register the command.
*
* @return void
*/
protected function registerMigrateResetCommand()
{
$this->app->singleton(ResetCommand::class, function ($app) {
return new ResetCommand($app['migrator']);
});
}
/**
* Register the command.
*
* @return void
*/
protected function registerMigrateRollbackCommand()
{
$this->app->singleton(RollbackCommand::class, function ($app) {
return new RollbackCommand($app['migrator']);
});
}
/**
* Register the command.
*
* @return void
*/
protected function registerMigrateStatusCommand()
{
$this->app->singleton(StatusCommand::class, function ($app) {
return new StatusCommand($app['migrator']);
});
}
/**
* Get the services provided by the provider.
*
* @return array
*/
public function provides()
{
return array_merge([
'migrator', 'migration.repository', 'migration.creator', Migrator::class,
], array_values($this->commands));
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Database/MigrationServiceProvider.php |
# -*- coding: utf-8 -*-
# # Authors: MNE Developers
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
import hashlib
import os.path as op
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from scipy import sparse
from mne import Epochs, read_events, pick_info, pick_types, Annotations
from mne.event import make_fixed_length_events
from mne.datasets import testing
from mne.io import (read_fiducials, write_fiducials, _coil_trans_to_loc,
_loc_to_coil_trans, read_raw_fif, read_info, write_info,
anonymize_info)
from mne.io.constants import FIFF
from mne.io.write import DATE_NONE
from mne.io.meas_info import (Info, create_info, _write_dig_points,
_read_dig_points, _make_dig_points, _merge_info,
_force_update_info, RAW_INFO_FIELDS,
_bad_chans_comp, _get_valid_units)
from mne.io import read_raw_ctf
from mne.utils import _TempDir, run_tests_if_main, catch_logging
from mne.channels.montage import read_montage, read_dig_montage
base_dir = op.join(op.dirname(__file__), 'data')
fiducials_fname = op.join(base_dir, 'fsaverage-fiducials.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
chpi_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
event_name = op.join(base_dir, 'test-eve.fif')
kit_data_dir = op.join(op.dirname(__file__), '..', 'kit', 'tests', 'data')
hsp_fname = op.join(kit_data_dir, 'test_hsp.txt')
elp_fname = op.join(kit_data_dir, 'test_elp.txt')
data_path = testing.data_path(download=False)
sss_path = op.join(data_path, 'SSS')
pre = op.join(sss_path, 'test_move_anon_')
sss_ctc_fname = pre + 'crossTalk_raw_sss.fif'
ctf_fname = op.join(data_path, 'CTF', 'testdata_ctf.ds')
def test_get_valid_units():
"""Test the valid units."""
valid_units = _get_valid_units()
assert isinstance(valid_units, tuple)
assert all(isinstance(unit, str) for unit in valid_units)
assert "n/a" in valid_units
def test_coil_trans():
"""Test loc<->coil_trans functions."""
rng = np.random.RandomState(0)
x = rng.randn(4, 4)
x[3] = [0, 0, 0, 1]
assert_allclose(_loc_to_coil_trans(_coil_trans_to_loc(x)), x)
x = rng.randn(12)
assert_allclose(_coil_trans_to_loc(_loc_to_coil_trans(x)), x)
def test_make_info():
"""Test some create_info properties."""
n_ch = np.longlong(1)
info = create_info(n_ch, 1000., 'eeg')
assert set(info.keys()) == set(RAW_INFO_FIELDS)
coil_types = {ch['coil_type'] for ch in info['chs']}
assert FIFF.FIFFV_COIL_EEG in coil_types
pytest.raises(TypeError, create_info, ch_names='Test Ch', sfreq=1000)
pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=-1000)
pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=1000,
ch_types=['eeg', 'eeg'])
pytest.raises(TypeError, create_info, ch_names=[np.array([1])],
sfreq=1000)
pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000,
ch_types=np.array([1]))
pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000,
ch_types='awesome')
pytest.raises(TypeError, create_info, ['Test Ch'], sfreq=1000,
ch_types=None, montage=np.array([1]))
m = read_montage('biosemi32')
info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
montage=m)
ch_pos = [ch['loc'][:3] for ch in info['chs']]
assert_array_equal(ch_pos, m.pos)
names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
d = read_dig_montage(hsp_fname, None, elp_fname, names, unit='m',
transform=False)
info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
montage=d)
idents = [p['ident'] for p in info['dig']]
assert FIFF.FIFFV_POINT_NASION in idents
info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
montage=[d, m])
ch_pos = [ch['loc'][:3] for ch in info['chs']]
assert_array_equal(ch_pos, m.pos)
idents = [p['ident'] for p in info['dig']]
assert (FIFF.FIFFV_POINT_NASION in idents)
info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
montage=[d, 'biosemi32'])
ch_pos = [ch['loc'][:3] for ch in info['chs']]
assert_array_equal(ch_pos, m.pos)
idents = [p['ident'] for p in info['dig']]
assert (FIFF.FIFFV_POINT_NASION in idents)
assert info['meas_date'] is None
def test_duplicate_name_correction():
"""Test duplicate channel names with running number."""
# When running number is possible
info = create_info(['A', 'A', 'A'], 1000., verbose='error')
assert info['ch_names'] == ['A-0', 'A-1', 'A-2']
# When running number is not possible
with pytest.raises(ValueError, match='Adding a running number'):
create_info(['A', 'A', 'A-0'], 1000., verbose='error')
def test_fiducials_io():
"""Test fiducials i/o."""
tempdir = _TempDir()
pts, coord_frame = read_fiducials(fiducials_fname)
assert pts[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI
assert pts[0]['ident'] == FIFF.FIFFV_POINT_CARDINAL
temp_fname = op.join(tempdir, 'test.fif')
write_fiducials(temp_fname, pts, coord_frame)
pts_1, coord_frame_1 = read_fiducials(temp_fname)
assert coord_frame == coord_frame_1
for pt, pt_1 in zip(pts, pts_1):
assert pt['kind'] == pt_1['kind']
assert pt['ident'] == pt_1['ident']
assert pt['coord_frame'] == pt_1['coord_frame']
assert_array_equal(pt['r'], pt_1['r'])
# test safeguards
pts[0]['coord_frame'] += 1
pytest.raises(ValueError, write_fiducials, temp_fname, pts, coord_frame)
def test_info():
"""Test info object."""
raw = read_raw_fif(raw_fname)
event_id, tmin, tmax = 1, -0.2, 0.5
events = read_events(event_name)
event_id = int(events[0, 2])
epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None,
baseline=(None, 0))
evoked = epochs.average()
# Test subclassing was successful.
info = Info(a=7, b='aaaaa')
assert ('a' in info)
assert ('b' in info)
info[42] = 'foo'
assert (info[42] == 'foo')
# Test info attribute in API objects
for obj in [raw, epochs, evoked]:
assert (isinstance(obj.info, Info))
info_str = '%s' % obj.info
assert len(info_str.split('\n')) == len(obj.info.keys()) + 2
assert all(k in info_str for k in obj.info.keys())
rep = repr(obj.info)
assert '2002-12-03 19:01:10 GMT' in rep, rep
assert '146 items (3 Cardinal, 4 HPI, 61 EEG, 78 Extra)' in rep
dig_rep = repr(obj.info['dig'][0])
assert 'LPA' in dig_rep, dig_rep
assert '(-71.4, 0.0, 0.0) mm' in dig_rep, dig_rep
assert 'head frame' in dig_rep, dig_rep
# Test our BunchConstNamed support
for func in (str, repr):
assert '4 (FIFFV_COORD_HEAD)' == \
func(obj.info['dig'][0]['coord_frame'])
# Test read-only fields
info = raw.info.copy()
nchan = len(info['chs'])
ch_names = [ch['ch_name'] for ch in info['chs']]
assert info['nchan'] == nchan
assert list(info['ch_names']) == ch_names
# Deleting of regular fields should work
info['foo'] = 'bar'
del info['foo']
# Test updating of fields
del info['chs'][-1]
info._update_redundant()
assert info['nchan'] == nchan - 1
assert list(info['ch_names']) == ch_names[:-1]
info['chs'][0]['ch_name'] = 'foo'
info._update_redundant()
assert info['ch_names'][0] == 'foo'
# Test casting to and from a dict
info_dict = dict(info)
info2 = Info(info_dict)
assert info == info2
def test_read_write_info():
"""Test IO of info."""
tempdir = _TempDir()
info = read_info(raw_fname)
temp_file = op.join(tempdir, 'info.fif')
# check for bug `#1198`
info['dev_head_t']['trans'] = np.eye(4)
t1 = info['dev_head_t']['trans']
write_info(temp_file, info)
info2 = read_info(temp_file)
t2 = info2['dev_head_t']['trans']
assert (len(info['chs']) == len(info2['chs']))
assert_array_equal(t1, t2)
# proc_history (e.g., GH#1875)
creator = u'é'
info = read_info(chpi_fname)
info['proc_history'][0]['creator'] = creator
info['hpi_meas'][0]['creator'] = creator
info['subject_info']['his_id'] = creator
info['subject_info']['weight'] = 11.1
info['subject_info']['height'] = 2.3
if info['gantry_angle'] is None: # future testing data may include it
info['gantry_angle'] = 0. # Elekta supine position
gantry_angle = info['gantry_angle']
meas_id = info['meas_id']
write_info(temp_file, info)
info = read_info(temp_file)
assert info['proc_history'][0]['creator'] == creator
assert info['hpi_meas'][0]['creator'] == creator
assert info['subject_info']['his_id'] == creator
assert info['gantry_angle'] == gantry_angle
assert info['subject_info']['height'] == 2.3
assert info['subject_info']['weight'] == 11.1
for key in ['secs', 'usecs', 'version']:
assert info['meas_id'][key] == meas_id[key]
assert_array_equal(info['meas_id']['machid'], meas_id['machid'])
# Test that writing twice produces the same file
m1 = hashlib.md5()
with open(temp_file, 'rb') as fid:
m1.update(fid.read())
m1 = m1.hexdigest()
temp_file_2 = op.join(tempdir, 'info2.fif')
assert temp_file_2 != temp_file
write_info(temp_file_2, info)
m2 = hashlib.md5()
with open(temp_file_2, 'rb') as fid:
m2.update(fid.read())
m2 = m2.hexdigest()
assert m1 == m2
def test_io_dig_points():
"""Test Writing for dig files."""
tempdir = _TempDir()
points = _read_dig_points(hsp_fname)
dest = op.join(tempdir, 'test.txt')
dest_bad = op.join(tempdir, 'test.mne')
pytest.raises(ValueError, _write_dig_points, dest, points[:, :2])
pytest.raises(ValueError, _write_dig_points, dest_bad, points)
_write_dig_points(dest, points)
points1 = _read_dig_points(dest, unit='m')
err = "Dig points diverged after writing and reading."
assert_array_equal(points, points1, err)
points2 = np.array([[-106.93, 99.80], [99.80, 68.81]])
np.savetxt(dest, points2, delimiter='\t', newline='\n')
pytest.raises(ValueError, _read_dig_points, dest)
def test_make_dig_points():
"""Test application of Polhemus HSP to info."""
extra_points = _read_dig_points(hsp_fname)
info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
assert info['dig'] is None
info['dig'] = _make_dig_points(extra_points=extra_points)
assert (info['dig'])
assert_allclose(info['dig'][0]['r'], [-.10693, .09980, .06881])
elp_points = _read_dig_points(elp_fname)
nasion, lpa, rpa = elp_points[:3]
info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
assert info['dig'] is None
info['dig'] = _make_dig_points(nasion, lpa, rpa, elp_points[3:], None)
assert (info['dig'])
idx = [d['ident'] for d in info['dig']].index(FIFF.FIFFV_POINT_NASION)
assert_array_equal(info['dig'][idx]['r'],
np.array([.0013930, .0131613, -.0046967]))
pytest.raises(ValueError, _make_dig_points, nasion[:2])
pytest.raises(ValueError, _make_dig_points, None, lpa[:2])
pytest.raises(ValueError, _make_dig_points, None, None, rpa[:2])
pytest.raises(ValueError, _make_dig_points, None, None, None,
elp_points[:, :2])
pytest.raises(ValueError, _make_dig_points, None, None, None, None,
elp_points[:, :2])
def test_redundant():
"""Test some of the redundant properties of info."""
# Indexing
info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000., ch_types=None)
assert info['ch_names'][0] == 'a'
assert info['ch_names'][1] == 'b'
assert info['ch_names'][2] == 'c'
# Equality
assert info['ch_names'] == info['ch_names']
assert info['ch_names'] == ['a', 'b', 'c']
# No channels in info
info = create_info(ch_names=[], sfreq=1000., ch_types=None)
assert info['ch_names'] == []
# List should be read-only
info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000., ch_types=None)
def test_merge_info():
"""Test merging of multiple Info objects."""
info_a = create_info(ch_names=['a', 'b', 'c'], sfreq=1000., ch_types=None)
info_b = create_info(ch_names=['d', 'e', 'f'], sfreq=1000., ch_types=None)
info_merged = _merge_info([info_a, info_b])
assert info_merged['nchan'], 6
assert info_merged['ch_names'], ['a', 'b', 'c', 'd', 'e', 'f']
pytest.raises(ValueError, _merge_info, [info_a, info_a])
# Testing for force updates before merging
info_c = create_info(ch_names=['g', 'h', 'i'], sfreq=500., ch_types=None)
# This will break because sfreq is not equal
pytest.raises(RuntimeError, _merge_info, [info_a, info_c])
_force_update_info(info_a, info_c)
assert (info_c['sfreq'] == info_a['sfreq'])
assert (info_c['ch_names'][0] != info_a['ch_names'][0])
# Make sure it works now
_merge_info([info_a, info_c])
# Check that you must supply Info
pytest.raises(ValueError, _force_update_info, info_a,
dict([('sfreq', 1000.)]))
# KIT System-ID
info_a['kit_system_id'] = 50
assert _merge_info((info_a, info_b))['kit_system_id'] == 50
info_b['kit_system_id'] = 50
assert _merge_info((info_a, info_b))['kit_system_id'] == 50
info_b['kit_system_id'] = 60
pytest.raises(ValueError, _merge_info, (info_a, info_b))
# hpi infos
info_d = create_info(ch_names=['d', 'e', 'f'], sfreq=1000., ch_types=None)
info_merged = _merge_info([info_a, info_d])
assert not info_merged['hpi_meas']
assert not info_merged['hpi_results']
info_a['hpi_meas'] = [{'f1': 3, 'f2': 4}]
assert _merge_info([info_a, info_d])['hpi_meas'] == info_a['hpi_meas']
info_d['hpi_meas'] = [{'f1': 3, 'f2': 4}]
assert _merge_info([info_a, info_d])['hpi_meas'] == info_d['hpi_meas']
# This will break because of inconsistency
info_d['hpi_meas'] = [{'f1': 3, 'f2': 5}]
pytest.raises(ValueError, _merge_info, [info_a, info_d])
info_0 = read_info(raw_fname)
info_0['bads'] = ['MEG 2443', 'EEG 053']
assert len(info_0['chs']) == 376
assert len(info_0['dig']) == 146
info_1 = create_info(["STI XXX"], info_0['sfreq'], ['stim'])
assert info_1['bads'] == []
info_out = _merge_info([info_0, info_1], force_update_to_first=True)
assert len(info_out['chs']) == 377
assert len(info_out['bads']) == 2
assert len(info_out['dig']) == 146
assert len(info_0['chs']) == 376
assert len(info_0['bads']) == 2
assert len(info_0['dig']) == 146
def test_check_consistency():
"""Test consistency check of Info objects."""
info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.)
# This should pass
info._check_consistency()
# Info without any channels
info_empty = create_info(ch_names=[], sfreq=1000.)
info_empty._check_consistency()
# Bad channels that are not in the info object
info2 = info.copy()
info2['bads'] = ['b', 'foo', 'bar']
pytest.raises(RuntimeError, info2._check_consistency)
# Bad data types
info2 = info.copy()
info2['sfreq'] = 'foo'
pytest.raises(ValueError, info2._check_consistency)
info2 = info.copy()
info2['highpass'] = 'foo'
pytest.raises(ValueError, info2._check_consistency)
info2 = info.copy()
info2['lowpass'] = 'foo'
pytest.raises(ValueError, info2._check_consistency)
info2 = info.copy()
info2['filename'] = 'foo'
with pytest.warns(RuntimeWarning, match='filename'):
info2._check_consistency()
# Silent type conversion to float
info2 = info.copy()
info2['sfreq'] = 1
info2['highpass'] = 2
info2['lowpass'] = 2
info2._check_consistency()
assert (isinstance(info2['sfreq'], float))
assert (isinstance(info2['highpass'], float))
assert (isinstance(info2['lowpass'], float))
# Duplicate channel names
info2 = info.copy()
info2['chs'][2]['ch_name'] = 'b'
pytest.raises(RuntimeError, info2._check_consistency)
# Duplicates appended with running numbers
with pytest.warns(RuntimeWarning, match='Channel names are not'):
info3 = create_info(ch_names=['a', 'b', 'b', 'c', 'b'], sfreq=1000.)
assert_array_equal(info3['ch_names'], ['a', 'b-0', 'b-1', 'c', 'b-2'])
def _is_anonymous(inst):
"""Check all the anonymity fields.
inst is either a raw or epochs object.
"""
from collections import namedtuple
anonymity_checks = namedtuple("anonymity_checks",
["missing_subject_info",
"anonymous_file_id_secs",
"anonymous_file_id_usecs",
"anonymous_meas_id_secs",
"anonymous_meas_id_usecs",
"anonymous_meas_date",
"anonymous_annotations"])
if 'subject_info' not in inst.info.keys():
missing_subject_info = True
else:
missing_subject_info = inst.info['subject_info'] is None
anonymous_file_id_secs = inst.info['file_id']['secs'] == DATE_NONE[0]
anonymous_file_id_usecs = inst.info['file_id']['usecs'] == DATE_NONE[1]
anonymous_meas_id_secs = inst.info['meas_id']['secs'] == DATE_NONE[0]
anonymous_meas_id_usecs = inst.info['meas_id']['usecs'] == DATE_NONE[1]
if inst.info['meas_date'] is None:
anonymous_meas_date = True
else:
assert isinstance(inst.info['meas_date'], tuple)
anonymous_meas_date = inst.info['meas_date'] == DATE_NONE
anonymous_annotations = (hasattr(inst, 'annotations') and
inst.annotations.orig_time is None)
return anonymity_checks(missing_subject_info,
anonymous_file_id_secs,
anonymous_file_id_usecs,
anonymous_meas_id_secs,
anonymous_meas_id_usecs,
anonymous_meas_date,
anonymous_annotations)
def test_anonymize():
"""Test that sensitive information can be anonymized."""
pytest.raises(TypeError, anonymize_info, 'foo')
# Fake some subject data
raw = read_raw_fif(raw_fname)
raw.set_annotations(Annotations(onset=[0, 1],
duration=[1, 1],
description='dummy',
orig_time=None))
raw.info['subject_info'] = dict(id=1, his_id='foobar', last_name='bar',
first_name='bar', birthday=(1987, 4, 8),
sex=0, hand=1)
# Test no error for incomplete info
info = raw.info.copy()
info.pop('file_id')
anonymize_info(info)
# Test instance method
events = read_events(event_name)
epochs = Epochs(raw, events[:1], 2, 0., 0.1)
assert not any(_is_anonymous(raw))
raw.anonymize()
assert all(_is_anonymous(raw))
assert not any(_is_anonymous(epochs)[:-1]) # epochs has no annotations
epochs.anonymize()
assert all(_is_anonymous(epochs)[:-1])
# When we write out with raw.save, these get overwritten with the
# new save time
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
assert all(_is_anonymous(read_raw_fif(out_fname)))
@testing.requires_testing_data
def test_csr_csc():
"""Test CSR and CSC."""
info = read_info(sss_ctc_fname)
info = pick_info(info, pick_types(info, meg=True, exclude=[]))
sss_ctc = info['proc_history'][0]['max_info']['sss_ctc']
ct = sss_ctc['decoupler'].copy()
# CSC
assert isinstance(ct, sparse.csc_matrix)
tempdir = _TempDir()
fname = op.join(tempdir, 'test.fif')
write_info(fname, info)
info_read = read_info(fname)
ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler']
assert isinstance(ct_read, sparse.csc_matrix)
assert_array_equal(ct_read.toarray(), ct.toarray())
# Now CSR
csr = ct.tocsr()
assert isinstance(csr, sparse.csr_matrix)
assert_array_equal(csr.toarray(), ct.toarray())
info['proc_history'][0]['max_info']['sss_ctc']['decoupler'] = csr
fname = op.join(tempdir, 'test1.fif')
write_info(fname, info)
info_read = read_info(fname)
ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler']
assert isinstance(ct_read, sparse.csc_matrix) # this gets cast to CSC
assert_array_equal(ct_read.toarray(), ct.toarray())
@testing.requires_testing_data
def test_check_compensation_consistency():
"""Test check picks compensation."""
raw = read_raw_ctf(ctf_fname, preload=False)
events = make_fixed_length_events(raw, 99999)
picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=True)
pick_ch_names = [raw.info['ch_names'][idx] for idx in picks]
for (comp, expected_result) in zip([0, 1], [False, False]):
raw.apply_gradient_compensation(comp)
ret, missing = _bad_chans_comp(raw.info, pick_ch_names)
assert ret == expected_result
assert len(missing) == 0
Epochs(raw, events, None, -0.2, 0.2, preload=False, picks=picks)
picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=False)
pick_ch_names = [raw.info['ch_names'][idx] for idx in picks]
for (comp, expected_result) in zip([0, 1], [False, True]):
raw.apply_gradient_compensation(comp)
ret, missing = _bad_chans_comp(raw.info, pick_ch_names)
assert ret == expected_result
assert len(missing) == 17
with catch_logging() as log:
Epochs(raw, events, None, -0.2, 0.2, preload=False,
picks=picks, verbose=True)
assert'Removing 5 compensators' in log.getvalue()
run_tests_if_main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse, os, subprocess
from PIL import Image
def parse_argument():
parser = argparse.ArgumentParser()
parser.add_argument(
'filename',
metavar='filename',
action='store',
)
return parser.parse_args()
def convert_to_gba(filename):
img = Image.open(filename)
width, height = img.size
if width > 240 or height > 160:
print 'error: resolution of image is higher than 240x120'
return
pix = img.load()
data = []
c_head = '''int main(){const unsigned int targetbmp[%d]={\n''' % (width * height)
c_eof = '''
};*(unsigned int*)0x04000000=0x0403;int x,y;for(y=0;y<%d;y++){for(x=0;x<=%d;x++){
((unsigned short*)0x06000000)[240*y+x]=targetbmp[%d*y+x];}}while(1);return 0;}
''' % (height, width, width)
for y in range(height):
for x in range(width):
r, g, b = pix[x, y]
hexstr = convert_to_16color(r, g, b)
data.append(hexstr)
ofilename = os.path.splitext(filename)[0] + '.c'
with open(ofilename, 'w') as filehandle:
filehandle.write(c_head)
for item in data:
filehandle.write(item)
filehandle.write(c_eof)
with open('makefile', 'w') as filehandle:
makefile = '''
PATH := $(DEVKITARM)/bin:$(PATH)
PROJ := {0}
TARGET := $(PROJ)
OBJS := $(PROJ).o
PREFIX := arm-none-eabi-
CC := $(PREFIX)gcc
LD := $(PREFIX)gcc
OBJCOPY := $(PREFIX)objcopy
ARCH := -mthumb-interwork -mthumb
SPECS := -specs=gba.specs
CFLAGS := $(ARCH) -O2 -Wall -fno-strict-aliasing
LDFLAGS := $(ARCH) $(SPECS)
.PHONY : build clean
build: $(TARGET).gba
$(TARGET).gba : $(TARGET).elf
\t$(OBJCOPY) -v -O binary $< $@
\t-@gbafix $@
$(TARGET).elf : $(OBJS)
\t$(LD) $^ $(LDFLAGS) -o $@
$(OBJS) : %.o : %.c
\t$(CC) -c $< $(CFLAGS) -o $@
clean :
\t@rm -fv *.gba
\t@rm -fv *.elf
\t@rm -fv *.o
'''.format(os.path.splitext(filename)[0])
filehandle.write(makefile)
subp = subprocess.Popen(
['make'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = subp.communicate()
if err:
print err
else:
print out
def convert_to_16color(r, g, b):
# converts to 16-bit RGB values
r, g, b = int(r / 256.0 * 32), int(g / 256.0 * 32), int(b / 256.0 * 32)
color = r | (g << 5) | (b << 10)
return "0x{:04x},".format(color)
if __name__ == '__main__':
args = parse_argument()
gbaimg = convert_to_gba(args.filename) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from datetime import datetime
from dateutil import relativedelta
import json
import random
from openerp import tools
from openerp.exceptions import Warning
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.tools import ustr
from openerp.osv import osv, fields
class MassMailingCategory(osv.Model):
"""Model of categories of mass mailing, i.e. marketing, newsletter, ... """
_name = 'mail.mass_mailing.category'
_description = 'Mass Mailing Category'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
}
class MassMailingList(osv.Model):
"""Model of a contact list. """
_name = 'mail.mass_mailing.list'
_order = 'name'
_description = 'Mailing List'
def _get_contact_nbr(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, 0)
Contacts = self.pool.get('mail.mass_mailing.contact')
for group in Contacts.read_group(cr, uid, [('list_id', 'in', ids), ('opt_out', '!=', True)], ['list_id'], ['list_id'], context=context):
result[group['list_id'][0]] = group['list_id_count']
return result
_columns = {
'name': fields.char('Mailing List', required=True),
'contact_nbr': fields.function(
_get_contact_nbr, type='integer',
string='Number of Contacts',
),
}
class MassMailingContact(osv.Model):
"""Model of a contact. This model is different from the partner model
because it holds only some basic information: name, email. The purpose is to
be able to deal with large contact list to email without bloating the partner
base."""
_name = 'mail.mass_mailing.contact'
_inherit = 'mail.thread'
_description = 'Mass Mailing Contact'
_order = 'email'
_rec_name = 'email'
_columns = {
'name': fields.char('Name'),
'email': fields.char('Email', required=True),
'create_date': fields.datetime('Create Date'),
'list_id': fields.many2one(
'mail.mass_mailing.list', string='Mailing List',
ondelete='cascade', required=True,
),
'opt_out': fields.boolean('Opt Out', help='The contact has chosen not to receive mails anymore from this list'),
}
def _get_latest_list(self, cr, uid, context={}):
lid = self.pool.get('mail.mass_mailing.list').search(cr, uid, [], limit=1, order='id desc', context=context)
return lid and lid[0] or False
_defaults = {
'list_id': _get_latest_list
}
def get_name_email(self, name, context):
name, email = self.pool['res.partner']._parse_partner_name(name, context=context)
if name and not email:
email = name
if email and not name:
name = email
return name, email
def name_create(self, cr, uid, name, context=None):
name, email = self.get_name_email(name, context=context)
rec_id = self.create(cr, uid, {'name': name, 'email': email}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def add_to_list(self, cr, uid, name, list_id, context=None):
name, email = self.get_name_email(name, context=context)
rec_id = self.create(cr, uid, {'name': name, 'email': email, 'list_id': list_id}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def message_get_default_recipients(self, cr, uid, ids, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = {'partner_ids': [], 'email_to': record.email, 'email_cc': False}
return res
class MassMailingStage(osv.Model):
"""Stage for mass mailing campaigns. """
_name = 'mail.mass_mailing.stage'
_description = 'Mass Mailing Campaign Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'sequence': 0,
}
class MassMailingCampaign(osv.Model):
"""Model of mass mailing campaigns. """
_name = "mail.mass_mailing.campaign"
_description = 'Mass Mailing Campaign'
def _get_statistics(self, cr, uid, ids, name, arg, context=None):
""" Compute statistics of the mass mailing campaign """
results = {}
cr.execute("""
SELECT
c.id as campaign_id,
COUNT(s.id) AS total,
COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed,
COUNT(CASE WHEN s.id is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered,
COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened,
COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied ,
COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced
FROM
mail_mail_statistics s
RIGHT JOIN
mail_mass_mailing_campaign c
ON (c.id = s.mass_mailing_campaign_id)
WHERE
c.id IN %s
GROUP BY
c.id
""", (tuple(ids), ))
for row in cr.dictfetchall():
results[row.pop('campaign_id')] = row
total = row['total'] or 1
row['delivered'] = row['sent'] - row['bounced']
row['received_ratio'] = 100.0 * row['delivered'] / total
row['opened_ratio'] = 100.0 * row['opened'] / total
row['replied_ratio'] = 100.0 * row['replied'] / total
return results
_columns = {
'name': fields.char('Name', required=True),
'stage_id': fields.many2one('mail.mass_mailing.stage', 'Stage', required=True),
'user_id': fields.many2one(
'res.users', 'Responsible',
required=True,
),
'category_ids': fields.many2many(
'mail.mass_mailing.category', 'mail_mass_mailing_category_rel',
'category_id', 'campaign_id', string='Categories'),
'mass_mailing_ids': fields.one2many(
'mail.mass_mailing', 'mass_mailing_campaign_id',
'Mass Mailings',
),
'unique_ab_testing': fields.boolean(
'AB Testing',
help='If checked, recipients will be mailed only once, allowing to send'
'various mailings in a single campaign to test the effectiveness'
'of the mailings.'),
'color': fields.integer('Color Index'),
# stat fields
'total': fields.function(
_get_statistics, string='Total',
type='integer', multi='_get_statistics'
),
'scheduled': fields.function(
_get_statistics, string='Scheduled',
type='integer', multi='_get_statistics'
),
'failed': fields.function(
_get_statistics, string='Failed',
type='integer', multi='_get_statistics'
),
'sent': fields.function(
_get_statistics, string='Sent Emails',
type='integer', multi='_get_statistics'
),
'delivered': fields.function(
_get_statistics, string='Delivered',
type='integer', multi='_get_statistics',
),
'opened': fields.function(
_get_statistics, string='Opened',
type='integer', multi='_get_statistics',
),
'replied': fields.function(
_get_statistics, string='Replied',
type='integer', multi='_get_statistics'
),
'bounced': fields.function(
_get_statistics, string='Bounced',
type='integer', multi='_get_statistics'
),
'received_ratio': fields.function(
_get_statistics, string='Received Ratio',
type='integer', multi='_get_statistics',
),
'opened_ratio': fields.function(
_get_statistics, string='Opened Ratio',
type='integer', multi='_get_statistics',
),
'replied_ratio': fields.function(
_get_statistics, string='Replied Ratio',
type='integer', multi='_get_statistics',
),
}
def _get_default_stage_id(self, cr, uid, context=None):
stage_ids = self.pool['mail.mass_mailing.stage'].search(cr, uid, [], limit=1, context=context)
return stage_ids and stage_ids[0] or False
_defaults = {
'user_id': lambda self, cr, uid, ctx=None: uid,
'stage_id': lambda self, *args: self._get_default_stage_id(*args),
}
def get_recipients(self, cr, uid, ids, model=None, context=None):
"""Return the recipients of a mailing campaign. This is based on the statistics
build for each mailing. """
Statistics = self.pool['mail.mail.statistics']
res = dict.fromkeys(ids, False)
for cid in ids:
domain = [('mass_mailing_campaign_id', '=', cid)]
if model:
domain += [('model', '=', model)]
stat_ids = Statistics.search(cr, uid, domain, context=context)
res[cid] = set(stat.res_id for stat in Statistics.browse(cr, uid, stat_ids, context=context))
return res
class MassMailing(osv.Model):
""" MassMailing models a wave of emails for a mass mailign campaign.
A mass mailing is an occurence of sending emails. """
_name = 'mail.mass_mailing'
_description = 'Mass Mailing'
# number of periods for tracking mail_mail statistics
_period_number = 6
_order = 'sent_date DESC'
def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, date_begin, context=None):
""" Generic method to generate data for bar chart values using SparklineBarWidget.
This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field).
:param obj: the target model (i.e. crm_lead)
:param domain: the domain applied to the read_group
:param list read_fields: the list of fields to read in the read_group
:param str value_field: the field used to compute the value of the bar slice
:param str groupby_field: the fields used to group
:return list section_result: a list of dicts: [
{ 'value': (int) bar_column_value,
'tootip': (str) bar_column_tooltip,
}
]
"""
date_begin = date_begin.date()
section_result = [{'value': 0,
'tooltip': ustr((date_begin + relativedelta.relativedelta(days=i)).strftime('%d %B %Y')),
} for i in range(0, self._period_number)]
group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context)
field = obj._fields.get(groupby_field.split(':')[0])
pattern = tools.DEFAULT_SERVER_DATE_FORMAT if field.type == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT
for group in group_obj:
group_begin_date = datetime.strptime(group['__domain'][0][2], pattern).date()
timedelta = relativedelta.relativedelta(group_begin_date, date_begin)
section_result[timedelta.days] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field)}
return section_result
def _get_daily_statistics(self, cr, uid, ids, field_name, arg, context=None):
""" Get the daily statistics of the mass mailing. This is done by a grouping
on opened and replied fields. Using custom format in context, we obtain
results for the next 6 days following the mass mailing date. """
obj = self.pool['mail.mail.statistics']
res = {}
for mailing in self.browse(cr, uid, ids, context=context):
res[mailing.id] = {}
date = mailing.sent_date if mailing.sent_date else mailing.create_date
date_begin = datetime.strptime(date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
date_end = date_begin + relativedelta.relativedelta(days=self._period_number - 1)
date_begin_str = date_begin.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
date_end_str = date_end.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
domain = [('mass_mailing_id', '=', mailing.id), ('opened', '>=', date_begin_str), ('opened', '<=', date_end_str)]
res[mailing.id]['opened_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['opened'], 'opened_count', 'opened:day', date_begin, context=context))
domain = [('mass_mailing_id', '=', mailing.id), ('replied', '>=', date_begin_str), ('replied', '<=', date_end_str)]
res[mailing.id]['replied_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['replied'], 'replied_count', 'replied:day', date_begin, context=context))
return res
def _get_statistics(self, cr, uid, ids, name, arg, context=None):
""" Compute statistics of the mass mailing """
results = {}
cr.execute("""
SELECT
m.id as mailing_id,
COUNT(s.id) AS total,
COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed,
COUNT(CASE WHEN s.sent is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered,
COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened,
COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied,
COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced
FROM
mail_mail_statistics s
RIGHT JOIN
mail_mass_mailing m
ON (m.id = s.mass_mailing_id)
WHERE
m.id IN %s
GROUP BY
m.id
""", (tuple(ids), ))
for row in cr.dictfetchall():
results[row.pop('mailing_id')] = row
total = row['total'] or 1
row['received_ratio'] = 100.0 * row['delivered'] / total
row['opened_ratio'] = 100.0 * row['opened'] / total
row['replied_ratio'] = 100.0 * row['replied'] / total
return results
def _get_mailing_model(self, cr, uid, context=None):
res = []
for model_name in self.pool:
model = self.pool[model_name]
if hasattr(model, '_mail_mass_mailing') and getattr(model, '_mail_mass_mailing'):
res.append((model._name, getattr(model, '_mail_mass_mailing')))
res.append(('mail.mass_mailing.contact', _('Mailing List')))
return res
# indirections for inheritance
_mailing_model = lambda self, *args, **kwargs: self._get_mailing_model(*args, **kwargs)
_columns = {
'name': fields.char('Subject', required=True),
'email_from': fields.char('From', required=True),
'create_date': fields.datetime('Creation Date'),
'sent_date': fields.datetime('Sent Date', oldname='date', copy=False),
'body_html': fields.html('Body'),
'attachment_ids': fields.many2many(
'ir.attachment', 'mass_mailing_ir_attachments_rel',
'mass_mailing_id', 'attachment_id', 'Attachments'
),
'mass_mailing_campaign_id': fields.many2one(
'mail.mass_mailing.campaign', 'Mass Mailing Campaign',
ondelete='set null',
),
'state': fields.selection(
[('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')],
string='Status', required=True, copy=False,
),
'color': fields.related(
'mass_mailing_campaign_id', 'color',
type='integer', string='Color Index',
),
# mailing options
'reply_to_mode': fields.selection(
[('thread', 'In Document'), ('email', 'Specified Email Address')],
string='Reply-To Mode', required=True,
),
'reply_to': fields.char('Reply To', help='Preferred Reply-To Address'),
# recipients
'mailing_model': fields.selection(_mailing_model, string='Recipients Model', required=True),
'mailing_domain': fields.char('Domain', oldname='domain'),
'contact_list_ids': fields.many2many(
'mail.mass_mailing.list', 'mail_mass_mailing_list_rel',
string='Mailing Lists',
),
'contact_ab_pc': fields.integer(
'AB Testing percentage',
help='Percentage of the contacts that will be mailed. Recipients will be taken randomly.'
),
# statistics data
'statistics_ids': fields.one2many(
'mail.mail.statistics', 'mass_mailing_id',
'Emails Statistics',
),
'total': fields.function(
_get_statistics, string='Total',
type='integer', multi='_get_statistics',
),
'scheduled': fields.function(
_get_statistics, string='Scheduled',
type='integer', multi='_get_statistics',
),
'failed': fields.function(
_get_statistics, string='Failed',
type='integer', multi='_get_statistics',
),
'sent': fields.function(
_get_statistics, string='Sent',
type='integer', multi='_get_statistics',
),
'delivered': fields.function(
_get_statistics, string='Delivered',
type='integer', multi='_get_statistics',
),
'opened': fields.function(
_get_statistics, string='Opened',
type='integer', multi='_get_statistics',
),
'replied': fields.function(
_get_statistics, string='Replied',
type='integer', multi='_get_statistics',
),
'bounced': fields.function(
_get_statistics, string='Bounced',
type='integer', multi='_get_statistics',
),
'received_ratio': fields.function(
_get_statistics, string='Received Ratio',
type='integer', multi='_get_statistics',
),
'opened_ratio': fields.function(
_get_statistics, string='Opened Ratio',
type='integer', multi='_get_statistics',
),
'replied_ratio': fields.function(
_get_statistics, string='Replied Ratio',
type='integer', multi='_get_statistics',
),
# daily ratio
'opened_daily': fields.function(
_get_daily_statistics, string='Opened',
type='char', multi='_get_daily_statistics',
),
'replied_daily': fields.function(
_get_daily_statistics, string='Replied',
type='char', multi='_get_daily_statistics',
)
}
def default_get(self, cr, uid, fields, context=None):
res = super(MassMailing, self).default_get(cr, uid, fields, context=context)
if 'reply_to_mode' in fields and not 'reply_to_mode' in res and res.get('mailing_model'):
if res['mailing_model'] in ['res.partner', 'mail.mass_mailing.contact']:
res['reply_to_mode'] = 'email'
else:
res['reply_to_mode'] = 'thread'
return res
_defaults = {
'state': 'draft',
'email_from': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
'reply_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
'mailing_model': 'mail.mass_mailing.contact',
'contact_ab_pc': 100,
'mailing_domain': [],
}
#------------------------------------------------------
# Technical stuff
#------------------------------------------------------
def copy_data(self, cr, uid, id, default=None, context=None):
mailing = self.browse(cr, uid, id, context=context)
default = dict(default or {},
name=_('%s (copy)') % mailing.name)
return super(MassMailing, self).copy_data(cr, uid, id, default, context=context)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
""" Override read_group to always display all states. """
if groupby and groupby[0] == "state":
# Default result structure
# states = self._get_state_list(cr, uid, context=context)
states = [('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')]
read_group_all_states = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('state', '=', state_value)],
'state': state_value,
'state_count': 0,
} for state_value, state_name in states]
# Get standard results
read_group_res = super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
# Update standard results with default results
result = []
for state_value, state_name in states:
res = filter(lambda x: x['state'] == state_value, read_group_res)
if not res:
res = filter(lambda x: x['state'] == state_value, read_group_all_states)
res[0]['state'] = [state_value, state_name]
result.append(res[0])
return result
else:
return super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
#------------------------------------------------------
# Views & Actions
#------------------------------------------------------
def on_change_model_and_list(self, cr, uid, ids, mailing_model, list_ids, context=None):
value = {}
if mailing_model == 'mail.mass_mailing.contact':
mailing_list_ids = set()
for item in list_ids:
if isinstance(item, (int, long)):
mailing_list_ids.add(item)
elif len(item) == 3:
mailing_list_ids |= set(item[2])
if mailing_list_ids:
value['mailing_domain'] = "[('list_id', 'in', %s), ('opt_out', '=', False)]" % list(mailing_list_ids)
else:
value['mailing_domain'] = "[('list_id', '=', False)]"
elif mailing_model in ['res.partner']:
value['mailing_domain'] = "[('opt_out', '=', False)]"
else:
value['mailing_domain'] = []
return {'value': value}
def action_duplicate(self, cr, uid, ids, context=None):
copy_id = None
for mid in ids:
copy_id = self.copy(cr, uid, mid, context=context)
if copy_id:
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.mass_mailing',
'res_id': copy_id,
'context': context,
}
return False
def action_test_mailing(self, cr, uid, ids, context=None):
ctx = dict(context, default_mass_mailing_id=ids[0])
return {
'name': _('Test Mailing'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'mail.mass_mailing.test',
'target': 'new',
'context': ctx,
}
def action_edit_html(self, cr, uid, ids, context=None):
if not len(ids) == 1:
raise ValueError('One and only one ID allowed for this action')
mail = self.browse(cr, uid, ids[0], context=context)
url = '/website_mail/email_designer?model=mail.mass_mailing&res_id=%d&template_model=%s&return_action=%d&enable_editor=1' % (ids[0], mail.mailing_model, context['params']['action'])
return {
'name': _('Open with Visual Editor'),
'type': 'ir.actions.act_url',
'url': url,
'target': 'self',
}
#------------------------------------------------------
# Email Sending
#------------------------------------------------------
def get_recipients(self, cr, uid, mailing, context=None):
if mailing.mailing_domain:
domain = eval(mailing.mailing_domain)
res_ids = self.pool[mailing.mailing_model].search(cr, uid, domain, context=context)
else:
res_ids = []
domain = [('id', 'in', res_ids)]
# randomly choose a fragment
if mailing.contact_ab_pc < 100:
contact_nbr = self.pool[mailing.mailing_model].search(cr, uid, domain, count=True, context=context)
topick = int(contact_nbr / 100.0 * mailing.contact_ab_pc)
if mailing.mass_mailing_campaign_id and mailing.mass_mailing_campaign_id.unique_ab_testing:
already_mailed = self.pool['mail.mass_mailing.campaign'].get_recipients(cr, uid, [mailing.mass_mailing_campaign_id.id], context=context)[mailing.mass_mailing_campaign_id.id]
else:
already_mailed = set([])
remaining = set(res_ids).difference(already_mailed)
if topick > len(remaining):
topick = len(remaining)
res_ids = random.sample(remaining, topick)
return res_ids
def send_mail(self, cr, uid, ids, context=None):
author_id = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id.id
for mailing in self.browse(cr, uid, ids, context=context):
# instantiate an email composer + send emails
res_ids = self.get_recipients(cr, uid, mailing, context=context)
if not res_ids:
raise Warning('Please select recipients.')
comp_ctx = dict(context, active_ids=res_ids)
composer_values = {
'author_id': author_id,
'attachment_ids': [(4, attachment.id) for attachment in mailing.attachment_ids],
'body': mailing.body_html,
'subject': mailing.name,
'model': mailing.mailing_model,
'email_from': mailing.email_from,
'record_name': False,
'composition_mode': 'mass_mail',
'mass_mailing_id': mailing.id,
'mailing_list_ids': [(4, l.id) for l in mailing.contact_list_ids],
'no_auto_thread': mailing.reply_to_mode != 'thread',
}
if mailing.reply_to_mode == 'email':
composer_values['reply_to'] = mailing.reply_to
composer_id = self.pool['mail.compose.message'].create(cr, uid, composer_values, context=comp_ctx)
self.pool['mail.compose.message'].send_mail(cr, uid, [composer_id], context=comp_ctx)
self.write(cr, uid, [mailing.id], {'sent_date': fields.datetime.now(), 'state': 'done'}, context=context)
return True | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.configurationsample.generic;
/**
* Properties with unresolved generic types that use identical generic parameter names but
* differ in their positions.
*
* @param <C> mapping name type
* @param <B> mapping value type
* @author Dmytro Nosan
*/
public class MixGenericNameProperties<B, C extends Number> extends AbstractGenericProperties<String, C, B> {
} | java | github | https://github.com/spring-projects/spring-boot | configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/generic/MixGenericNameProperties.java |
import numbers
from copy import copy
import theano.tensor as tt
from ..model import modelcontext
from .. import distributions as pm_dists
__all__ = ['Normal', 'StudentT', 'Binomial', 'Poisson', 'NegativeBinomial']
# Define link functions
# Hack as assigning a function in the class definition automatically binds
# it as a method.
class Identity():
def __call__(self, x):
return x
identity = Identity()
logit = tt.nnet.sigmoid
inverse = tt.inv
exp = tt.exp
class Family(object):
"""Base class for Family of likelihood distribution and link functions.
"""
priors = {}
link = None
def __init__(self, **kwargs):
# Overwrite defaults
for key, val in kwargs.items():
if key == 'priors':
self.priors = copy(self.priors)
self.priors.update(val)
else:
setattr(self, key, val)
def _get_priors(self, model=None, name=''):
"""Return prior distributions of the likelihood.
Returns
-------
dict : mapping name -> pymc3 distribution
"""
if name:
name = '{}_'.format(name)
model = modelcontext(model)
priors = {}
for key, val in self.priors.items():
if isinstance(val, numbers.Number):
priors[key] = val
else:
priors[key] = model.Var('{}{}'.format(name, key), val)
return priors
def create_likelihood(self, name, y_est, y_data, model=None):
"""Create likelihood distribution of observed data.
Parameters
----------
y_est : theano.tensor
Estimate of dependent variable
y_data : array
Observed dependent variable
"""
priors = self._get_priors(model=model, name=name)
# Wrap y_est in link function
priors[self.parent] = self.link(y_est)
if name:
name = '{}_'.format(name)
return self.likelihood('{}y'.format(name), observed=y_data, **priors)
def __repr__(self):
return """Family {klass}:
Likelihood : {likelihood}({parent})
Priors : {priors}
Link function: {link}.""".format(klass=self.__class__, likelihood=self.likelihood.__name__, parent=self.parent, priors=self.priors, link=self.link)
class StudentT(Family):
link = identity
likelihood = pm_dists.StudentT
parent = 'mu'
priors = {'lam': pm_dists.HalfCauchy.dist(beta=10, testval=1.),
'nu': 1}
class Normal(Family):
link = identity
likelihood = pm_dists.Normal
parent = 'mu'
priors = {'sd': pm_dists.HalfCauchy.dist(beta=10, testval=1.)}
class Binomial(Family):
link = logit
likelihood = pm_dists.Bernoulli
parent = 'p'
class Poisson(Family):
link = exp
likelihood = pm_dists.Poisson
parent = 'mu'
priors = {'mu': pm_dists.HalfCauchy.dist(beta=10, testval=1.)}
class NegativeBinomial(Family):
link = exp
likelihood = pm_dists.NegativeBinomial
parent = 'mu'
priors = {'mu': pm_dists.HalfCauchy.dist(beta=10, testval=1.),
'alpha': pm_dists.HalfCauchy.dist(beta=10, testval=1.)} | unknown | codeparrot/codeparrot-clean | ||
import datetime
from django.core.exceptions import ValidationError
from django.forms import TimeField
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class TimeFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_timefield_1(self):
f = TimeField()
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(14, 25), f.clean("14:25"))
self.assertEqual(datetime.time(14, 25, 59), f.clean("14:25:59"))
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean("hello")
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean("1:24 p.m.")
def test_timefield_2(self):
f = TimeField(input_formats=["%I:%M %p"])
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(4, 25), f.clean("4:25 AM"))
self.assertEqual(datetime.time(16, 25), f.clean("4:25 PM"))
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean("14:30:45")
def test_timefield_3(self):
f = TimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.time(14, 25), f.clean(" 14:25 "))
self.assertEqual(datetime.time(14, 25, 59), f.clean(" 14:25:59 "))
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(" ")
def test_timefield_changed(self):
t1 = datetime.time(12, 51, 34, 482548)
t2 = datetime.time(12, 51)
f = TimeField(input_formats=["%H:%M", "%H:%M %p"])
self.assertTrue(f.has_changed(t1, "12:51"))
self.assertFalse(f.has_changed(t2, "12:51"))
self.assertFalse(f.has_changed(t2, "12:51 PM")) | python | github | https://github.com/django/django | tests/forms_tests/field_tests/test_timefield.py |
import org.gradle.api.*
import org.jetbrains.kotlin.gradle.plugin.*
import org.gradle.kotlin.dsl.*
fun KotlinSourceSet.configureDirectoryPaths() {
if (project.isMultiplatform) {
val srcDir = if (name.endsWith("Main")) "src" else "test"
val platform = name.dropLast(4)
kotlin.srcDir("$platform/$srcDir")
if (name == "jvmMain") {
resources.srcDir("$platform/resources")
} else if (name == "jvmTest") {
resources.srcDir("$platform/test-resources")
}
} else if (platformOf(project) == "jvm") {
when (name) {
"main" -> {
kotlin.srcDir("src")
resources.srcDir("resources")
}
"test" -> {
kotlin.srcDir("test")
resources.srcDir("test-resources")
}
}
} else {
throw IllegalArgumentException("Unclear how to configure source sets for ${project.name}")
}
}
/**
* Creates shared source sets for a group of source sets.
*
* [reverseDependencies] is a list of prefixes of names of source sets that depend on the new source set.
* [dependencies] is a list of prefixes of names of source sets that the new source set depends on.
* [groupName] is the prefix of the names of the new source sets.
*
* The suffixes of the source sets are "Main" and "Test".
*/
fun NamedDomainObjectContainer<KotlinSourceSet>.groupSourceSets(
groupName: String,
reverseDependencies: List<String>,
dependencies: List<String>
) {
val sourceSetSuffixes = listOf("Main", "Test")
for (suffix in sourceSetSuffixes) {
register(groupName + suffix) {
for (dep in dependencies) {
dependsOn(get(dep + suffix))
}
for (revDep in reverseDependencies) {
get(revDep + suffix).dependsOn(this)
}
}
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | buildSrc/src/main/kotlin/SourceSets.kt |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.core.urlresolvers import reverse
from django.template.defaultfilters import register # noqa
from django.utils import html
from django.utils import safestring
import six
import six.moves.urllib.parse as urlparse
from openstack_dashboard.api import swift
LOG = logging.getLogger(__name__)
resource_urls = {
"AWS::AutoScaling::AutoScalingGroup": {
'link': 'horizon:project:stacks:detail'},
"AWS::CloudFormation::Stack": {
'link': 'horizon:project:stacks:detail'},
"AWS::EC2::Instance": {
'link': 'horizon:project:instances:detail'},
"AWS::EC2::InternetGateway": {
'link': 'horizon:project:networks:ports:detail'},
"AWS::EC2::NetworkInterface": {
'link': 'horizon:project:networks:ports:detail'},
"AWS::EC2::RouteTable": {
'link': 'horizon:project:routers:detail'},
"AWS::EC2::SecurityGroup": {
'link': 'horizon:project:access_and_security:index'},
"AWS::EC2::Subnet": {
'link': 'horizon:project:networks:subnets:detail'},
"AWS::EC2::Volume": {
'link': 'horizon:project:volumes:volumes:detail'},
"AWS::EC2::VPC": {
'link': 'horizon:project:networks:detail'},
"AWS::S3::Bucket": {
'link': 'horizon:project:containers:index'},
"OS::Cinder::Volume": {
'link': 'horizon:project:volumes:volumes:detail'},
"OS::Heat::AccessPolicy": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::AutoScalingGroup": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::CloudConfig": {
'link': 'horizon:project:stacks:detail'},
"OS::Neutron::Firewall": {
'link': 'horizon:project:firewalls:firewalldetails'},
"OS::Neutron::FirewallPolicy": {
'link': 'horizon:project:firewalls:policydetails'},
"OS::Neutron::FirewallRule": {
'link': 'horizon:project:firewalls:ruledetails'},
"OS::Heat::HARestarter": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::InstanceGroup": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::MultipartMime": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::ResourceGroup": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::SoftwareConfig": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::StructuredConfig": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::StructuredDeployment": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::Stack": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::WaitCondition": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::WaitConditionHandle": {
'link': 'horizon:project:stacks:detail'},
"OS::Neutron::HealthMonitor": {
'link': 'horizon:project:loadbalancers:monitordetails'},
"OS::Neutron::IKEPolicy": {
'link': 'horizon:project:vpn:ikepolicydetails'},
"OS::Neutron::IPsecPolicy": {
'link': 'horizon:project:vpn:ipsecpolicydetails'},
"OS::Neutron::IPsecSiteConnection": {
'link': 'horizon:project:vpn:ipsecsiteconnectiondetails'},
"OS::Neutron::Net": {
'link': 'horizon:project:networks:detail'},
"OS::Neutron::Pool": {
'link': 'horizon:project:loadbalancers:pooldetails'},
"OS::Neutron::PoolMember": {
'link': 'horizon:project:loadbalancers:memberdetails'},
"OS::Neutron::Port": {
'link': 'horizon:project:networks:ports:detail'},
"OS::Neutron::Router": {
'link': 'horizon:project:routers:detail'},
"OS::Neutron::Subnet": {
'link': 'horizon:project:networks:subnets:detail'},
"OS::Neutron::VPNService": {
'link': 'horizon:project:vpn:vpnservicedetails'},
"OS::Nova::KeyPair": {
'link': 'horizon:project:access_and_security:index'},
"OS::Nova::Server": {
'link': 'horizon:project:instances:detail'},
"OS::Swift::Container": {
'link': 'horizon:project:containers:index',
'format_pattern': '%s' + swift.FOLDER_DELIMITER},
}
def resource_to_url(resource):
if not resource or not resource.physical_resource_id:
return None
mapping = resource_urls.get(resource.resource_type, {})
try:
if 'link' not in mapping:
return None
format_pattern = mapping.get('format_pattern') or '%s'
rid = format_pattern % resource.physical_resource_id
url = reverse(mapping['link'], args=(rid,))
except Exception as e:
LOG.exception(e)
return None
return url
@register.filter
def stack_output(output):
if not output:
return u''
if isinstance(output, dict) or isinstance(output, list):
json_string = json.dumps(output, indent=2)
safe_output = u'<pre>%s</pre>' % html.escape(json_string)
return safestring.mark_safe(safe_output)
if isinstance(output, basestring):
parts = urlparse.urlsplit(output)
if parts.netloc and parts.scheme in ('http', 'https'):
url = html.escape(output)
safe_link = u'<a href="%s" target="_blank">%s</a>' % (url, url)
return safestring.mark_safe(safe_link)
return unicode(output)
resource_images = {
'LB_FAILED': '/static/dashboard/img/lb-red.svg',
'LB_DELETE': '/static/dashboard/img/lb-red.svg',
'LB_IN_PROGRESS': '/static/dashboard/img/lb-gray.gif',
'LB_INIT': '/static/dashboard/img/lb-gray.svg',
'LB_COMPLETE': '/static/dashboard/img/lb-green.svg',
'DB_FAILED': '/static/dashboard/img/db-red.svg',
'DB_DELETE': '/static/dashboard/img/db-red.svg',
'DB_IN_PROGRESS': '/static/dashboard/img/db-gray.gif',
'DB_INIT': '/static/dashboard/img/db-gray.svg',
'DB_COMPLETE': '/static/dashboard/img/db-green.svg',
'STACK_FAILED': '/static/dashboard/img/stack-red.svg',
'STACK_DELETE': '/static/dashboard/img/stack-red.svg',
'STACK_IN_PROGRESS': '/static/dashboard/img/stack-gray.gif',
'STACK_INIT': '/static/dashboard/img/stack-gray.svg',
'STACK_COMPLETE': '/static/dashboard/img/stack-green.svg',
'SERVER_FAILED': '/static/dashboard/img/server-red.svg',
'SERVER_DELETE': '/static/dashboard/img/server-red.svg',
'SERVER_IN_PROGRESS': '/static/dashboard/img/server-gray.gif',
'SERVER_INIT': '/static/dashboard/img/server-gray.svg',
'SERVER_COMPLETE': '/static/dashboard/img/server-green.svg',
'ALARM_FAILED': '/static/dashboard/img/alarm-red.svg',
'ALARM_DELETE': '/static/dashboard/img/alarm-red.svg',
'ALARM_IN_PROGRESS': '/static/dashboard/img/alarm-gray.gif',
'ALARM_INIT': '/static/dashboard/img/alarm-gray.svg',
'ALARM_COMPLETE': '/static/dashboard/img/alarm-green.svg',
'VOLUME_FAILED': '/static/dashboard/img/volume-red.svg',
'VOLUME_DELETE': '/static/dashboard/img/volume-red.svg',
'VOLUME_IN_PROGRESS': '/static/dashboard/img/volume-gray.gif',
'VOLUME_INIT': '/static/dashboard/img/volume-gray.svg',
'VOLUME_COMPLETE': '/static/dashboard/img/volume-green.svg',
'IMAGE_FAILED': '/static/dashboard/img/image-red.svg',
'IMAGE_DELETE': '/static/dashboard/img/image-red.svg',
'IMAGE_IN_PROGRESS': '/static/dashboard/img/image-gray.gif',
'IMAGE_INIT': '/static/dashboard/img/image-gray.svg',
'IMAGE_COMPLETE': '/static/dashboard/img/image-green.svg',
'WAIT_FAILED': '/static/dashboard/img/wait-red.svg',
'WAIT_DELETE': '/static/dashboard/img/wait-red.svg',
'WAIT_IN_PROGRESS': '/static/dashboard/img/wait-gray.gif',
'WAIT_INIT': '/static/dashboard/img/wait-gray.svg',
'WAIT_COMPLETE': '/static/dashboard/img/wait-green.svg',
'FIREWALL_FAILED': '/static/dashboard/img/firewall-red.svg',
'FIREWALL_DELETE': '/static/dashboard/img/firewall-red.svg',
'FIREWALL_IN_PROGRESS': '/static/dashboard/img/firewall-gray.gif',
'FIREWALL_INIT': '/static/dashboard/img/firewall-gray.svg',
'FIREWALL_COMPLETE': '/static/dashboard/img/firewall-green.svg',
'FLOATINGIP_FAILED': '/static/dashboard/img/floatingip-red.svg',
'FLOATINGIP_DELETE': '/static/dashboard/img/floatingip-red.svg',
'FLOATINGIP_IN_PROGRESS': '/static/dashboard/img/floatingip-gray.gif',
'FLOATINGIP_INIT': '/static/dashboard/img/floatingip-gray.svg',
'FLOATINGIP_COMPLETE': '/static/dashboard/img/floatingip-green.svg',
'ROUTER_FAILED': '/static/dashboard/img/router-red.svg',
'ROUTER_DELETE': '/static/dashboard/img/router-red.svg',
'ROUTER_IN_PROGRESS': '/static/dashboard/img/router-gray.gif',
'ROUTER_INIT': '/static/dashboard/img/router-gray.svg',
'ROUTER_COMPLETE': '/static/dashboard/img/router-green.svg',
'POLICY_FAILED': '/static/dashboard/img/policy-red.svg',
'POLICY_DELETE': '/static/dashboard/img/policy-red.svg',
'POLICY_IN_PROGRESS': '/static/dashboard/img/policy-gray.gif',
'POLICY_INIT': '/static/dashboard/img/policy-gray.svg',
'POLICY_COMPLETE': '/static/dashboard/img/policy-green.svg',
'CONFIG_FAILED': '/static/dashboard/img/config-red.svg',
'CONFIG_DELETE': '/static/dashboard/img/config-red.svg',
'CONFIG_IN_PROGRESS': '/static/dashboard/img/config-gray.gif',
'CONFIG_INIT': '/static/dashboard/img/config-gray.svg',
'CONFIG_COMPLETE': '/static/dashboard/img/config-green.svg',
'NETWORK_FAILED': '/static/dashboard/img/network-red.svg',
'NETWORK_DELETE': '/static/dashboard/img/network-red.svg',
'NETWORK_IN_PROGRESS': '/static/dashboard/img/network-gray.gif',
'NETWORK_INIT': '/static/dashboard/img/network-gray.svg',
'NETWORK_COMPLETE': '/static/dashboard/img/network-green.svg',
'PORT_FAILED': '/static/dashboard/img/port-red.svg',
'PORT_DELETE': '/static/dashboard/img/port-red.svg',
'PORT_IN_PROGRESS': '/static/dashboard/img/port-gray.gif',
'PORT_INIT': '/static/dashboard/img/port-gray.svg',
'PORT_COMPLETE': '/static/dashboard/img/port-green.svg',
'SECURITYGROUP_FAILED': '/static/dashboard/img/securitygroup-red.svg',
'SECURITYGROUP_DELETE': '/static/dashboard/img/securitygroup-red.svg',
'SECURITYGROUP_IN_PROGRESS':
'/static/dashboard/img/securitygroup-gray.gif',
'SECURITYGROUP_INIT': '/static/dashboard/img/securitygroup-gray.svg',
'SECURITYGROUP_COMPLETE': '/static/dashboard/img/securitygroup-green.svg',
'VPN_FAILED': '/static/dashboard/img/vpn-red.svg',
'VPN_DELETE': '/static/dashboard/img/vpn-red.svg',
'VPN_IN_PROGRESS': '/static/dashboard/img/vpn-gray.gif',
'VPN_INIT': '/static/dashboard/img/vpn-gray.svg',
'VPN_COMPLETE': '/static/dashboard/img/vpn-green.svg',
'FLAVOR_FAILED': '/static/dashboard/img/flavor-red.svg',
'FLAVOR_DELETE': '/static/dashboard/img/flavor-red.svg',
'FLAVOR_IN_PROGRESS': '/static/dashboard/img/flavor-gray.gif',
'FLAVOR_INIT': '/static/dashboard/img/flavor-gray.svg',
'FLAVOR_COMPLETE': '/static/dashboard/img/flavor-green.svg',
'KEYPAIR_FAILED': '/static/dashboard/img/keypair-red.svg',
'KEYPAIR_DELETE': '/static/dashboard/img/keypair-red.svg',
'KEYPAIR_IN_PROGRESS': '/static/dashboard/img/keypair-gray.gif',
'KEYPAIR_INIT': '/static/dashboard/img/keypair-gray.svg',
'KEYPAIR_COMPLETE': '/static/dashboard/img/keypair-green.svg',
'UNKNOWN_FAILED': '/static/dashboard/img/unknown-red.svg',
'UNKNOWN_DELETE': '/static/dashboard/img/unknown-red.svg',
'UNKNOWN_IN_PROGRESS': '/static/dashboard/img/unknown-gray.gif',
'UNKNOWN_INIT': '/static/dashboard/img/unknown-gray.svg',
'UNKNOWN_COMPLETE': '/static/dashboard/img/unknown-green.svg',
}
resource_types = {
# LB
'LoadBalance': 'LB',
'HealthMonitor': 'LB',
'PoolMember': 'LB',
'Pool': 'LB',
# DB
'DBInstance': 'DB',
'Database': 'DB',
# SERVER
'Instance': 'SERVER',
'Server': 'SERVER',
# ALARM
'Alarm': 'ALARM',
'CombinationAlarm': 'ALARM',
'CWLiteAlarm': 'ALARM',
# VOLUME
'Volume': 'VOLUME',
'VolumeAttachment': 'VOLUME',
# STACK
'stack': 'STACK',
'AutoScalingGroup': 'STACK',
'InstanceGroup': 'STACK',
'ServerGroup': 'STACK',
'ResourceGroup': 'STACK',
# IMAGE
'Image': 'IMAGE',
# WAIT
'WaitCondition': 'WAIT',
'WaitConditionHandle': 'WAIT',
'UpdateWaitConditionHandle': 'WAIT',
# FIREWALL
'Firewall': 'FIREWALL',
'FirewallPolicy': 'FIREWALL',
'FirewallRule': 'FIREWALL',
# FLOATINGIP
'FloatingIP': 'FLOATINGIP',
'FloatingIPAssociation': 'FLOATINGIP',
# ROUTER
'Router': 'ROUTER',
'RouterGateway': 'ROUTER',
'RouterInterface': 'ROUTER',
# POLICY
'ScalingPolicy': 'POLICY',
# CONFIG
'CloudConfig': 'CONFIG',
'MultipartMime': 'CONFIG',
'SoftwareConfig': 'CONFIG',
'SoftwareDeployment': 'CONFIG',
'StructuredConfig': 'CONFIG',
'StructuredDeployment': 'CONFIG',
# NETWORK
'Net': 'NETWORK',
'Subnet': 'NETWORK',
'NetworkGateway': 'NETWORK',
'ProviderNet': 'NETWORK',
# PORT
'Port': 'PORT',
# SECURITYGROUP
'SecurityGroup': 'SECURITYGROUP',
# VPN
'VPNService': 'VPN',
# FLAVOR
'Flavor': 'FLAVOR',
# KEYPAIR
'KeyPair': 'KEYPAIR',
}
def get_resource_type(type):
for key, value in six.iteritems(resource_types):
if key in type:
return value
return 'UNKNOWN'
def get_resource_status(status):
if ('IN_PROGRESS' in status):
return 'IN_PROGRESS'
elif ('FAILED' in status):
return 'FAILED'
elif ('DELETE' in status):
return 'DELETE'
elif ('INIT' in status):
return 'INIT'
else:
return 'COMPLETE'
def get_resource_image(status, type):
"""Sets the image url and in_progress action sw based on status."""
resource_type = get_resource_type(type)
resource_status = get_resource_status(status)
resource_state = resource_type + "_" + resource_status
for key in resource_images:
if key == resource_state:
return resource_images.get(key) | unknown | codeparrot/codeparrot-clean | ||
"""
WSGI config for votainteligente project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "votainteligente.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "votainteligente.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
import djcelery
djcelery.setup_loader() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# This file is part of Invenio Demosite.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio Demosite is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio Demosite is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""OaiHarvest module web tests."""
from invenio.config import CFG_SITE_SECURE_URL
from invenio.testsuite import make_test_suite, \
run_test_suite, \
InvenioWebTestCase
class InvenioOaiHarvestWebTest(InvenioWebTestCase):
"""OaiHarvest web tests."""
def test_insert_oai_source(self):
"""oaiharvest - web test insert oai source"""
self.browser.get(CFG_SITE_SECURE_URL)
# login as admin
self.login(username="admin", password="")
self.find_element_by_link_text_with_timeout("Administration")
self.browser.find_element_by_link_text("Administration").click()
self.find_element_by_link_text_with_timeout("Configure OAI Harvest")
self.browser.find_element_by_link_text("Configure OAI Harvest").click()
self.find_element_by_link_text_with_timeout("add new OAI source")
self.browser.find_element_by_link_text("add new OAI source").click()
self.fill_textbox(textbox_name="oai_src_baseurl", text="invenio-demo.cern.ch/oai2d")
self.find_element_by_xpath_with_timeout("//input[@value='Validate']")
self.browser.find_element_by_xpath("//input[@value='Validate']").click()
self.fill_textbox(textbox_name="oai_src_name", text="AtlantisOAI")
self.choose_selectbox_option_by_label(selectbox_name="oai_src_prefix", label="marcxml")
self.find_element_by_id_with_timeout("cern:theory1")
self.browser.find_element_by_id("cern:theory1").click()
self.choose_selectbox_option_by_label(selectbox_name="oai_src_frequency", label="never")
self.choose_selectbox_option_by_label(selectbox_name="oai_src_lastrun", label="from beginning")
self.find_element_by_xpath_with_timeout("//label[text()='convert (c)']")
self.browser.find_element_by_xpath("//label[text()='convert (c)']").click()
self.find_element_by_xpath_with_timeout("//input[@value='Add OAI Source']", timeout=60)
self.browser.find_element_by_xpath("//input[@value='Add OAI Source']").click()
self.page_source_test(expected_text='Please enter a valid name of or a full path to a BibConvert config file or change postprocess mode.')
self.fill_textbox(textbox_name="oai_src_config", text="oaimarc2marcxml.xsl")
self.find_element_by_xpath_with_timeout("//input[@value='Add OAI Source']")
self.browser.find_element_by_xpath("//input[@value='Add OAI Source']").click()
self.find_element_by_link_text_with_timeout("Go back to the OAI sources overview")
self.browser.find_element_by_link_text("Go back to the OAI sources overview").click()
self.page_source_test(expected_text='AtlantisOAI')
self.find_element_by_link_text_with_timeout("delete")
self.browser.find_element_by_link_text("delete").click()
self.browser.find_element_by_class_name("adminbutton").click()
self.find_element_by_link_text_with_timeout("Go back to the OAI sources overview")
self.browser.find_element_by_link_text("Go back to the OAI sources overview").click()
self.logout()
TEST_SUITE = make_test_suite(InvenioOaiHarvestWebTest, )
if __name__ == '__main__':
run_test_suite(TEST_SUITE, warn_user=True) | unknown | codeparrot/codeparrot-clean | ||
#define TORCH_ASSERT_NO_OPERATORS
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <ATen/native/Activation.h>
#include <cmath>
#include <functional>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/core/TensorBase.h>
#include <ATen/cpu/vec/functional.h>
#include <ATen/cpu/vec/vec.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cpu/Elu.h>
#include <ATen/native/cpu/Gelu.h>
#include <ATen/native/cpu/Loops.h>
#include <ATen/Parallel.h>
#include <c10/core/Scalar.h>
namespace at::native {
namespace {
#if defined(__GNUC__) && __GNUC__ == 14 && defined(__aarch64__) && !defined(__ARM_FEATURE_SVE)
// Workaround for gcc-14.2.0 ICE during RTL pass: expand when compiling for NEON
__attribute__((optimize("no-tree-vectorize")))
#endif
void log_sigmoid_cpu_kernel(TensorBase &output, TensorBase &buffer, const TensorBase &input) {
if (at::isReducedFloatingType(input.scalar_type())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(input.scalar_type(), "log_sigmoid_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
scalar_t* output_data = output.data_ptr<scalar_t>();
scalar_t* buffer_data = buffer.data_ptr<scalar_t>();
const scalar_t* input_data = input.const_data_ptr<scalar_t>();
parallel_for(0, input.numel(), 1, [&] (int64_t begin, int64_t end) {
int64_t size = end - begin;
int64_t d = 0;
for (; d < size - (size % Vec::size()); d += Vec::size()) {
Vec data_vec = Vec::loadu(input_data + begin+ d);
auto [data_vec0, data_vec1] = convert_to_float<scalar_t>(data_vec);
Vectorized<float> min_vec = minimum(data_vec0, Vectorized<float>(float(0)));
Vectorized<float> buffer_vec0 = data_vec0.abs().neg().exp();
Vectorized<float> output_vec0 = min_vec - buffer_vec0.log1p();
min_vec = minimum(data_vec1, Vectorized<float>(float(0)));
Vectorized<float> buffer_vec1 = data_vec1.abs().neg().exp();
Vectorized<float> output_vec1 = min_vec - buffer_vec1.log1p();
convert_from_float<scalar_t>(buffer_vec0, buffer_vec1).store(buffer_data + begin + d);
convert_from_float<scalar_t>(output_vec0, output_vec1).store(output_data + begin + d);
}
if (size - d > 0) {
Vec data_vec = Vec::loadu(input_data + begin + d, size - d);
auto [data_vec0, data_vec1] = convert_to_float<scalar_t>(data_vec);
Vectorized<float> min_vec = minimum(data_vec0, Vectorized<float>(float(0)));
Vectorized<float> buffer_vec0 = data_vec0.abs().neg().exp();
Vectorized<float> output_vec0 = min_vec - buffer_vec0.log1p();
min_vec = minimum(data_vec1, Vectorized<float>(float(0)));
Vectorized<float> buffer_vec1 = data_vec1.abs().neg().exp();
Vectorized<float> output_vec1 = min_vec - buffer_vec1.log1p();
convert_from_float<scalar_t>(buffer_vec0, buffer_vec1).store(buffer_data + begin + d, size - d);
convert_from_float<scalar_t>(output_vec0, output_vec1).store(output_data + begin + d, size - d);
}
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "log_sigmoid_cpu", [&] {
using Vec = Vectorized<scalar_t>;
scalar_t* output_data = output.data_ptr<scalar_t>();
scalar_t* buffer_data = buffer.data_ptr<scalar_t>();
const scalar_t* input_data = input.const_data_ptr<scalar_t>();
parallel_for(0, input.numel(), 1, [&] (int64_t begin, int64_t end) {
int64_t size = end - begin;
int64_t d = 0;
for (; d < size - (size % Vec::size()); d += Vec::size()) {
Vec data_vec = Vec::loadu(input_data + begin+ d);
Vec min_vec = vec::minimum(data_vec, Vec(scalar_t(0)));
Vec buffer_vec = data_vec.abs().neg().exp();
Vec output_vec = min_vec - buffer_vec.log1p();
buffer_vec.store(buffer_data + begin + d);
output_vec.store(output_data + begin + d);
}
if (size - d > 0) {
Vec data_vec = Vec::loadu(input_data + begin + d, size - d);
Vec min_vec = vec::minimum(data_vec, Vec(scalar_t(0)));
Vec buffer_vec = data_vec.abs().neg().exp();
Vec output_vec = min_vec - buffer_vec.log1p();
buffer_vec.store(buffer_data + begin + d, size - d);
output_vec.store(output_data + begin + d, size - d);
}
});
});
}
}
void log_sigmoid_backward_cpu_kernel(TensorIterator& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "log_sigmoid_backward_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
auto zero_val = float(0);
auto zero_vec = Vectorized<float>(zero_val);
auto one_val = float(1);
auto one_vec = Vectorized<float>(one_val);
cpu_kernel_vec(iter,
[=](scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
auto in_negative = float(a) < float(0);
auto max_deriv = in_negative ? float(1) : float(0);
auto sign = in_negative ? float(1) : -float(1);
return (max_deriv - sign * (float(b) / (float(1) + b))) * float(c);
},
[=](Vec a, Vec b, Vec c) -> Vec {
auto [a0, a1] = convert_to_float<scalar_t>(a);
auto [b0, b1] = convert_to_float<scalar_t>(b);
auto [c0, c1] = convert_to_float<scalar_t>(c);
auto mask = a0 < zero_vec;
auto max_deriv_vec = Vectorized<float>::blendv(zero_vec, one_vec, mask);
auto sign_vec = Vectorized<float>::blendv(one_vec.neg(), one_vec, mask);
a0 = (max_deriv_vec - sign_vec * (b0 / (one_vec + b0))) * c0;
mask = a1 < zero_vec;
max_deriv_vec = Vectorized<float>::blendv(zero_vec, one_vec, mask);
sign_vec = Vectorized<float>::blendv(one_vec.neg(), one_vec, mask);
a1 = (max_deriv_vec - sign_vec * (b1 / (one_vec + b1))) * c1;
return convert_from_float<scalar_t>(a0, a1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "log_sigmoid_backward_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
auto zero_val = scalar_t(0);
auto zero_vec = Vec(zero_val);
auto one_val = scalar_t(1);
auto one_vec = Vec(one_val);
cpu_kernel_vec(iter,
[=](scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
auto in_negative = a < scalar_t(0);
auto max_deriv = in_negative ? scalar_t(1) : scalar_t(0);
auto sign = in_negative ? scalar_t(1) : -scalar_t(1);
return (max_deriv - sign * (b / (scalar_t(1) + b))) * c;
},
[=](Vec a, Vec b, Vec c) -> Vec {
auto mask = a < zero_vec;
auto max_deriv_vec = Vec::blendv(zero_vec, one_vec, mask);
auto sign_vec = Vec::blendv(one_vec.neg(), one_vec, mask);
return (max_deriv_vec - sign_vec * (b / (one_vec + b))) * c;
});
});
}
}
void threshold_kernel(
TensorIteratorBase& iter,
const Scalar& threshold_scalar,
const Scalar& value_scalar) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "threshold_cpu", [&]() {
using Vec = Vectorized<float>;
float threshold = threshold_scalar.to<float>();
Vec threshold_v = Vec(threshold);
scalar_t value = value_scalar.to<scalar_t>();
Vec value_v = Vec(float(value));
cpu_kernel_vec(
iter,
[&](scalar_t x, scalar_t other) -> scalar_t {
return float(x) <= threshold ? value : other;
},
[&](Vectorized<scalar_t> x, Vectorized<scalar_t> other) -> Vectorized<scalar_t> {
auto [x0, x1] = convert_to_float<scalar_t>(x);
auto [other0, other1] = convert_to_float<scalar_t>(other);
return convert_from_float<scalar_t>(Vec::blendv(other0, value_v, x0 <= threshold_v),
Vec::blendv(other1, value_v, x1 <= threshold_v));
});
});
} else {
AT_DISPATCH_ALL_TYPES(iter.dtype(), "threshold_cpu", [&] {
using Vec = Vectorized<scalar_t>;
scalar_t threshold = threshold_scalar.to<scalar_t>();
Vec threshold_v = Vec(threshold);
scalar_t value = value_scalar.to<scalar_t>();
Vec value_v = Vec(value);
cpu_kernel_vec(
iter,
[&](scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
},
[&](Vec x, Vec other) -> Vec {
return Vec::blendv(other, value_v, x <= threshold_v);
});
});
}
}
void elu_kernel(TensorIteratorBase& it, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) {
if (at::isReducedFloatingType(it.common_dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(it.common_dtype(), "elu_cpu", [&]() {
cpu_kernel_vec(
it,
get_scalar_elu_elementwise_func<scalar_t, float>(alpha.to<float>(), scale.to<float>(), input_scale.to<float>()),
get_vectorized_elu_elementwise_func<scalar_t>(alpha.to<float>(), scale.to<float>(), input_scale.to<float>()));
});
} else {
AT_DISPATCH_FLOATING_TYPES(it.common_dtype(), "elu_cpu", [&]() {
cpu_kernel_vec(
it,
get_scalar_elu_elementwise_func<scalar_t>(alpha.to<scalar_t>(), scale.to<scalar_t>(), input_scale.to<scalar_t>()),
get_vectorized_elu_elementwise_func<scalar_t>(alpha.to<scalar_t>(), scale.to<scalar_t>(), input_scale.to<scalar_t>()));
});
}
}
void elu_backward_kernel(TensorIteratorBase& it, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) {
if (at::isReducedFloatingType(it.common_dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(it.common_dtype(), "elu_backward_cpu", [&]() {
auto negcoef = alpha.to<float>() * scale.to<float>();
auto poscoef = scale.to<float>();
auto negiptcoef = input_scale.to<float>();
const Vectorized<float> negcoef_vec(negcoef);
const Vectorized<float> negiptcoef_vec(negiptcoef);
const Vectorized<float> poscoef_vec(poscoef);
const Vectorized<float> zero_vec(static_cast<float>(0));
cpu_kernel_vec(
it,
[negcoef, negiptcoef, poscoef, is_result](scalar_t a, scalar_t b) -> scalar_t {
if (is_result) {
return float(b) <= float(0) ? float(a) * negiptcoef * (float(b) + negcoef) : float(a) * poscoef;
} else {
return float(b) <= float(0) ? float(a) * negiptcoef * negcoef * std::exp(float(b) * negiptcoef): float(a) * poscoef;
}
},
[&negcoef_vec, &negiptcoef_vec, &poscoef_vec, &zero_vec, is_result](Vectorized<scalar_t> a, Vectorized<scalar_t> b) -> Vectorized<scalar_t> {
auto [a0, a1] = convert_to_float<scalar_t>(a);
auto [b0, b1] = convert_to_float<scalar_t>(b);
auto cmp0 = (b0 > zero_vec);
auto cmp1 = (b1 > zero_vec);
auto get_res_masked = [&](Vectorized<float>& cmp, Vectorized<float>& a, Vectorized<float>& b) {
if (is_result) {
return !cmp.zero_mask() ? a * poscoef_vec :
Vectorized<float>::blendv(a * negiptcoef_vec * (b + negcoef_vec), a * poscoef_vec, cmp);
} else {
return Vectorized<float>::blendv(a * negiptcoef_vec * negcoef_vec * (b * negiptcoef_vec).exp(), a * poscoef_vec, cmp);
}
};
auto res0 = get_res_masked(cmp0, a0, b0);
auto res1 = get_res_masked(cmp1, a1, b1);
return convert_from_float<scalar_t>(res0, res1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(it.dtype(), "elu_backward_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
const Vec negcoef_vec(negcoef);
const Vec negiptcoef_vec(negiptcoef);
const Vec poscoef_vec(poscoef);
const Vec zero_vec(static_cast<scalar_t>(0));
cpu_kernel_vec(
it,
[negcoef, negiptcoef, poscoef, is_result](scalar_t a, scalar_t b) -> scalar_t {
if (is_result) {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
} else {
return b <= scalar_t(0) ? a * negiptcoef * negcoef * std::exp(b * negiptcoef): a * poscoef;
}
},
[&negcoef_vec, &negiptcoef_vec, &poscoef_vec, &zero_vec, is_result](Vec a, Vec b) -> Vec {
auto cmp = (b > zero_vec);
if (is_result) {
if (!cmp.zero_mask()) { // only a * poscoef (which is very quick) needs to be computed
return a * poscoef_vec;
} else {
return Vec::blendv(a * negiptcoef_vec * (b + negcoef_vec), a * poscoef_vec, cmp);
}
} else {
return Vec::blendv(a * negiptcoef_vec * negcoef_vec * (b * negiptcoef_vec).exp(), a * poscoef_vec, cmp);
}
}
);
});
}
}
// TODO(yangxm): Add another fast kernel using formula
// y = 0.5x * (1 + tanh(sqrt(2/Pi) * (x + 0.044715x^3)))
// and the fast tanh impl from Eigen.
void GeluKernelImpl(TensorIteratorBase& it, GeluType approximate) {
auto grain_size = at::internal::GRAIN_SIZE;
// Numbers based on benchmarking.
// Benchmark: benchmarks/operator_benchmarks/pt/gelu_test.py
#ifdef C10_MOBILE
// Benchmarked on S8 US phone.
// Internal benchmarking that converts operator benchmark into
// a torchscript module and run that on mobile.
// Same benchmark as server side.
constexpr int64_t GELU_MIN_ELEMENTS_FOR_MULTI_THREADING{6144};
#else
// Benchmarked on i9 8 core 16 thread machine.
// 1 thread: cd benchmark/operator_benchmarks;
// python -m pt.gelu_test --tag_filter long --omp_num_threads 1
// 2 threads: cd benchmark/operator_benchmarks;
// python -m pt.gelu_test --tag_filter long --omp_num_threads 1
constexpr int64_t GELU_MIN_ELEMENTS_FOR_MULTI_THREADING{16384};
#endif
if (it.numel() > GELU_MIN_ELEMENTS_FOR_MULTI_THREADING) {
grain_size = it.numel() / at::get_num_threads();
}
if (approximate == GeluType::Tanh) {
if (at::isReducedFloatingType(it.common_dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(it.common_dtype(), "GeluKernelImpl", [&]() {
cpu_kernel_vec(
it,
scalar_gelu_approximated_with_tanh<scalar_t>,
vectorized_gelu_approximated_with_tanh<scalar_t>,
grain_size);
});
} else {
AT_DISPATCH_FLOATING_TYPES(
it.dtype(), "GeluKernelImpl", [&]() {
cpu_kernel_vec(
it,
scalar_gelu_approximated_with_tanh<scalar_t>,
vectorized_gelu_approximated_with_tanh<scalar_t>,
grain_size);
});
}
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
it.dtype(),
"GeluKernelImpl",
[&]() {
cpu_kernel_vec(
it,
scalar_gelu<scalar_t>,
vectorized_gelu<scalar_t>,
grain_size);
});
}
}
void GeluBackwardKernelImpl(TensorIteratorBase& it, GeluType approximate) {
if (approximate == GeluType::Tanh) {
if (at::isReducedFloatingType(it.common_dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(it.common_dtype(), "GeluBackwardKernelImpl", [&]() {
auto kBetaVec = Vectorized<float>((float)(M_SQRT2 * M_2_SQRTPI * 0.5));
auto kKappaVec = Vectorized<float>((float)(0.044715));
auto kOneVec = Vectorized<float>((float)(1));
auto kThreeVec = Vectorized<float>((float)(3));
auto kPointFiveVec = Vectorized<float>((float)(0.5));
cpu_kernel_vec(
it,
[](scalar_t dy, scalar_t x) -> scalar_t {
const float kBeta = float(M_SQRT2 * M_2_SQRTPI * 0.5);
const float kKappa = float(0.044715);
float x_sq = float(x) * float(x);
float x_cube = x_sq * float(x);
float inner = kBeta * (float(x) + kKappa * x_cube);
float tanh_inner = float(std::tanh(inner));
float left = float(0.5) * float(x);
float right = float(1) + tanh_inner;
float left_derivative = float(0.5) * right;
float tanh_derivative = float(1) - tanh_inner * tanh_inner;
float inner_derivative =
kBeta * (float(1) + float(3) * kKappa * x_sq);
float right_derivative = left * tanh_derivative * inner_derivative;
return float(dy) * (left_derivative + right_derivative);
},
[&](Vectorized<scalar_t> dy_vec, Vectorized<scalar_t> x_vec) -> Vectorized<scalar_t> {
auto [x0_vec, x1_vec] = convert_to_float<scalar_t>(x_vec);
auto [dy0_vec, dy1_vec] = convert_to_float<scalar_t>(dy_vec);
auto x0_sq = x0_vec * x0_vec;
auto x1_sq = x1_vec * x1_vec;
auto x0_cube = x0_vec * x0_vec * x0_vec;
auto x1_cube = x1_vec * x1_vec * x1_vec;
auto inner_vec0 = kBetaVec * (x0_vec + kKappaVec * x0_cube);
auto inner_vec1 = kBetaVec * (x1_vec + kKappaVec * x1_cube);
auto tanh_inner_vec0 = inner_vec0.tanh();
auto tanh_inner_vec1 = inner_vec1.tanh();
auto left_vec0 = kPointFiveVec * x0_vec;
auto left_vec1 = kPointFiveVec * x1_vec;
auto right_vec0 = kOneVec + tanh_inner_vec0;
auto right_vec1 = kOneVec + tanh_inner_vec1;
auto left_derivative_vec0 = kPointFiveVec * right_vec0;
auto left_derivative_vec1 = kPointFiveVec * right_vec1;
auto tanh_derivative_vec0 = kOneVec - tanh_inner_vec0 * tanh_inner_vec0;
auto tanh_derivative_vec1 = kOneVec - tanh_inner_vec1 * tanh_inner_vec1;
auto inner_derivative_vec0 = kBetaVec * (kOneVec + kThreeVec * kKappaVec * x0_sq);
auto inner_derivative_vec1 = kBetaVec * (kOneVec + kThreeVec * kKappaVec * x1_sq);
auto right_derivative_vec0 = left_vec0 * tanh_derivative_vec0 * inner_derivative_vec0;
auto right_derivative_vec1 = left_vec1 * tanh_derivative_vec1 * inner_derivative_vec1;
auto res0 = dy0_vec * (left_derivative_vec0 + right_derivative_vec0);
auto res1 = dy1_vec * (left_derivative_vec1 + right_derivative_vec1);
return convert_from_float<scalar_t>(res0, res1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(
it.dtype(), "GeluBackwardKernelImpl", [&]() {
using Vec = vec::Vectorized<scalar_t>;
const Vec kBetaVec(scalar_t(M_SQRT2 * M_2_SQRTPI * 0.5));
const Vec kKappaVec(scalar_t(0.044715));
const Vec kOneVec(scalar_t(1));
const Vec kThreeVec(scalar_t(3));
const Vec kPointFiveVec(scalar_t(0.5));
cpu_kernel_vec(
it,
[](scalar_t dy, scalar_t x) {
const scalar_t kBeta = M_SQRT2 * M_2_SQRTPI * 0.5;
const scalar_t kKappa = 0.044715;
auto x_sq = x * x;
auto x_cube = x_sq * x;
auto inner = kBeta * (x + kKappa * x_cube);
auto tanh_inner = std::tanh(inner);
auto left = scalar_t(0.5) * x;
auto right = scalar_t(1) + tanh_inner;
auto left_derivative = scalar_t(0.5) * right;
auto tanh_derivative = scalar_t(1) - tanh_inner * tanh_inner;
auto inner_derivative =
kBeta * (scalar_t(1) + scalar_t(3) * kKappa * x_sq);
auto right_derivative = left * tanh_derivative * inner_derivative;
return dy * (left_derivative + right_derivative);
},
[&](Vec dy_vec, Vec x_vec) {
auto x_sq = x_vec * x_vec;
auto x_cube = x_vec * x_vec * x_vec;
auto inner_vec =
kBetaVec * (x_vec + kKappaVec * x_cube);
auto tanh_inner_vec = inner_vec.tanh();
auto left_vec = kPointFiveVec * x_vec;
auto right_vec = kOneVec + tanh_inner_vec;
auto left_derivative_vec = kPointFiveVec * right_vec;
auto tanh_derivative_vec =
kOneVec - tanh_inner_vec * tanh_inner_vec;
auto inner_derivative_vec =
kBetaVec * (kOneVec + kThreeVec * kKappaVec * x_sq);
auto right_derivative_vec =
left_vec * tanh_derivative_vec * inner_derivative_vec;
return dy_vec * (left_derivative_vec + right_derivative_vec);
});
});
}
} else {
if (at::isReducedFloatingType(it.common_dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(it.common_dtype(), "GeluBackwardKernelImpl", [&]() {
auto kAlphaVec = Vectorized<float>((float)(M_SQRT1_2));
auto kBetaVec = Vectorized<float>((float)(M_2_SQRTPI * M_SQRT1_2 * 0.5));
auto kOneVec = Vectorized<float>((float)(1));
auto kPointFiveVec = Vectorized<float>((float)(0.5));
auto kMinusPointFiveVec = Vectorized<float>((float)(-0.5));
cpu_kernel_vec(
it,
[](scalar_t dy, scalar_t x) -> scalar_t {
const float kAlpha = float(M_SQRT1_2);
const float kBeta = float(M_2_SQRTPI) * float(M_SQRT1_2) * float(0.5);
const float cdf =
float(0.5) * (float(1) + std::erf(float(x) * kAlpha));
const float pdf = kBeta * std::exp(float(x) * float(x) * float(-0.5));
return float(dy) * (cdf + float(x) * pdf);
},
[&](Vectorized<scalar_t> dy, Vectorized<scalar_t> x) -> Vectorized<scalar_t> {
auto [x0, x1] = convert_to_float<scalar_t>(x);
auto [dy0, dy1] = convert_to_float<scalar_t>(dy);
auto cdf_vec0 = kPointFiveVec * (kOneVec + (x0 * kAlphaVec).erf());
auto cdf_vec1 = kPointFiveVec * (kOneVec + (x1 * kAlphaVec).erf());
auto pdf_vec0 = kBetaVec * (x0 * x0 * kMinusPointFiveVec).exp();
auto pdf_vec1 = kBetaVec * (x1 * x1 * kMinusPointFiveVec).exp();
auto res0 = dy0 * (cdf_vec0 + x0 * pdf_vec0);
auto res1 = dy1 * (cdf_vec1 + x1 * pdf_vec1);
return convert_from_float<scalar_t>(res0, res1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(
it.dtype(), "GeluBackwardKernelImpl", [&]() {
using Vec = vec::Vectorized<scalar_t>;
const Vec kAlphaVec(scalar_t(M_SQRT1_2));
const Vec kBetaVec(scalar_t(M_2_SQRTPI * M_SQRT1_2 * 0.5));
const Vec kOneVec(scalar_t(1));
const Vec kPointFiveVec(scalar_t(0.5));
const Vec kMinusPointFiveVec(scalar_t(-0.5));
cpu_kernel_vec(
it,
[](scalar_t dy, scalar_t x) {
const scalar_t kAlpha = scalar_t(M_SQRT1_2);
const scalar_t kBeta = M_2_SQRTPI * M_SQRT1_2 * scalar_t(0.5);
const scalar_t cdf =
scalar_t(0.5) * (scalar_t(1) + std::erf(x * kAlpha));
const scalar_t pdf = kBeta * std::exp(x * x * scalar_t(-0.5));
return dy * (cdf + x * pdf);
},
[&](Vec dy_vec, Vec x_vec) {
const Vec cdf_vec =
kPointFiveVec * (kOneVec + (x_vec * kAlphaVec).erf());
const Vec pdf_vec =
kBetaVec * (x_vec * x_vec * kMinusPointFiveVec).exp();
return dy_vec * (cdf_vec + x_vec * pdf_vec);
});
});
}
}
}
void hardsigmoid_kernel(TensorIteratorBase& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "hardsigmoid_cpu", [&]() {
const float zero(0.0f);
const float three(3.0f);
const float six(6.0f);
using Vec = vec::Vectorized<float>;
const Vec kZeroVec(zero);
const Vec kThreeVec(three);
const Vec kSixVec(six);
cpu_kernel_vec(
iter,
[&](scalar_t self_val) -> scalar_t {
return std::min(std::max(float(self_val) + three, zero), six) / six;
},
[&](vec::Vectorized<scalar_t> self_val) -> vec::Vectorized<scalar_t> {
auto [self_val0, self_val1] = convert_to_float<scalar_t>(self_val);
self_val0 = minimum(
maximum(self_val0 + kThreeVec, kZeroVec),
kSixVec
) / kSixVec;
self_val1 = minimum(
maximum(self_val1 + kThreeVec, kZeroVec),
kSixVec
) / kSixVec;
return convert_from_float<scalar_t>(self_val0, self_val1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "hardsigmoid_cpu", [&] {
const scalar_t zero(0.0f);
const scalar_t three(3.0f);
const scalar_t six(6.0f);
using Vec = vec::Vectorized<scalar_t>;
const Vec kZeroVec(zero);
const Vec kThreeVec(three);
const Vec kSixVec(six);
cpu_kernel_vec(
iter,
[&](scalar_t self_val) {
return std::min(std::max(self_val + three, zero), six) / six;
},
[&](Vec self_val) {
return vec::minimum(
vec::maximum(self_val + kThreeVec, kZeroVec),
kSixVec
) / kSixVec;
});
});
}
}
void hardsigmoid_backward_kernel(TensorIteratorBase& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.common_dtype(), "hardsigmoid_backward", [&]() {
const float zero(0.0f);
const float three(3.0f);
const float neg_three(-3.0f);
const float one_sixth(1.0f / 6.0f);
using Vec = Vectorized<float>;
Vec kZeroVec(0.0f);
Vec kOneSixthVec(1.0f / 6.0f);
cpu_kernel_vec(
iter,
[=](scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (float(self_val) > neg_three && float(self_val) < three)
? float(grad_val) * one_sixth
: zero;
},
[=](Vectorized<scalar_t> grad_val, Vectorized<scalar_t> self_val) -> Vectorized<scalar_t> {
auto [self_val0, self_val1] = convert_to_float<scalar_t>(self_val);
auto [grad_val0, grad_val1] = convert_to_float<scalar_t>(grad_val);
Vec gradNonZeroMask = (self_val0 > neg_three) & (self_val0 < three);
self_val0 = Vec::blendv(kZeroVec, grad_val0 * kOneSixthVec, gradNonZeroMask);
gradNonZeroMask = (self_val1 > neg_three) & (self_val1 < three);
self_val1 = Vec::blendv(kZeroVec, grad_val1 * kOneSixthVec, gradNonZeroMask);
return convert_from_float<scalar_t>(self_val0, self_val1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "hardsigmoid_backward", [&] {
const scalar_t zero(0.0f);
const scalar_t three(3.0f);
const scalar_t neg_three(-3.0f);
const scalar_t one_sixth(1.0f / 6.0f);
using Vec = Vectorized<scalar_t>;
Vec kZeroVec(0.0f);
Vec kOneSixthVec(1.0f / 6.0f);
cpu_kernel_vec(
iter,
[=](scalar_t grad_val, scalar_t self_val) {
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
},
[=](Vec grad_val, Vec self_val) {
Vec gradNonZeroMask = (self_val > neg_three) & (self_val < three);
return Vec::blendv(kZeroVec, grad_val * kOneSixthVec, gradNonZeroMask);
});
});
}
}
void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& lambd) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "hardshrink_cpu", [&] {
auto lambd_val = lambd.to<scalar_t>();
using Vec = Vectorized<scalar_t>;
cpu_kernel_vec(
iter,
[=](scalar_t self_val) {
return (self_val >= -lambd_val && self_val <= lambd_val) ? scalar_t(0)
: self_val;
},
[=](Vec self_val) {
return Vec::blendv(self_val, Vec(0), (self_val >= -lambd_val) & (self_val <= lambd_val));
});
});
}
void softshrink_kernel(TensorIteratorBase& iter, const Scalar& lambd) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.common_dtype(), "softshrink_cpu", [&]() {
auto lambd_val = lambd.to<float>();
auto lambdVec = Vectorized<float>(lambd_val);
cpu_kernel_vec(
iter,
[=](scalar_t a) -> scalar_t {
return float(a) > lambd_val ? a - lambd_val
: (float(a) < -lambd_val ? a + lambd_val : float(a) * float(0));
},
[=](Vectorized<scalar_t> self_val) -> Vectorized<scalar_t> {
auto [self_val0, self_val1] = convert_to_float<scalar_t>(self_val);
auto self_val_t0 = convert_from_float<scalar_t>(
((self_val0 > lambdVec) | (self_val0.isnan())) & (self_val0 - lambdVec),
((self_val1 > lambdVec) | (self_val1.isnan())) & (self_val1 - lambdVec));
auto self_val_t1 = convert_from_float<scalar_t>(
((self_val0 < -lambd_val) | (self_val0.isnan())) & (self_val0 + lambdVec),
((self_val1 < -lambd_val) | (self_val1.isnan())) & (self_val1 + lambdVec));
return (self_val_t0 | self_val_t1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "softshrink_cpu", [&]() {
auto lambd_val = lambd.to<scalar_t>();
auto lambdVec = Vectorized<scalar_t>(lambd_val);
cpu_kernel_vec(
iter,
[=](scalar_t a) -> scalar_t {
return a > lambd_val ? a - lambd_val : (a < -lambd_val ? a + lambd_val : a * scalar_t(0));
},
[=](Vectorized<scalar_t> self_val) -> Vectorized<scalar_t> {
Vectorized<scalar_t> self_val_t0, self_val_t1;
self_val_t0 = ((self_val > lambdVec) | (self_val.isnan())) & (self_val - lambdVec);
self_val_t1 = ((self_val < -lambd_val) | (self_val.isnan())) & (self_val + lambdVec);
return (self_val_t0 | self_val_t1);
});
});
}
}
void shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& lambd) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "shrink_backward_cpu", [&] {
auto lambd_val = lambd.to<scalar_t>();
cpu_kernel_vec(
iter,
[=](scalar_t grad_val, scalar_t self_val) {
return (self_val >= -lambd_val && self_val <= lambd_val) ? scalar_t(0)
: grad_val;
},
[=](Vectorized<scalar_t> grad_val, Vectorized<scalar_t> self_val) {
return ((self_val < -lambd_val) | (self_val > lambd_val)) & grad_val;
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "hardshrink_backward_cpu", [&]() {
auto min_val = min.to<float>();
auto max_val = max.to<float>();
cpu_kernel_vec(
iter,
[=](scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (float(self_val) <= min_val || float(self_val) >= max_val) ? scalar_t(0) : grad_val;
},
[=](Vectorized<scalar_t> grad_val, Vectorized<scalar_t> self_val) -> Vectorized<scalar_t> {
auto [grad_val0, grad_val1] = convert_to_float<scalar_t>(grad_val);
auto [self_val0, self_val1] = convert_to_float<scalar_t>(self_val);
return convert_from_float<scalar_t>(
((self_val0 > min_val) & (self_val0 < max_val)) & grad_val0,
((self_val1 > min_val) & (self_val1 < max_val)) & grad_val1
);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "hardshrink_backward_cpu", [&] {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
cpu_kernel_vec(
iter,
[=](scalar_t grad_val, scalar_t self_val) {
return (self_val <= min_val || self_val >= max_val) ? scalar_t(0) : grad_val;
},
[=](Vectorized<scalar_t> grad_val, Vectorized<scalar_t> self_val) {
return ((self_val > min_val) & (self_val < max_val)) & grad_val;
});
});
}
}
void hardswish_kernel(TensorIterator& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "hardswish_cpu", [&]() {
const float zero(0.0f);
const float three(3.0f);
const float six(6.0f);
using Vec = vec::Vectorized<float>;
const Vec kZeroVec(zero);
const Vec kThreeVec(three);
const Vec kSixVec(six);
cpu_kernel_vec(
iter,
[&](scalar_t x) -> scalar_t {
return float(x) * std::min(std::max(float(x) + three, zero), six) / six;
},
[&](vec::Vectorized<scalar_t> x_vec) {
auto [x_vec0, x_vec1] = convert_to_float<scalar_t>(x_vec);
x_vec0 = x_vec0 * minimum(
maximum(x_vec0 + kThreeVec, kZeroVec),
kSixVec
) / kSixVec;
x_vec1 = x_vec1 * minimum(
maximum(x_vec1 + kThreeVec, kZeroVec),
kSixVec
) / kSixVec;
return convert_from_float<scalar_t>(x_vec0, x_vec1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "hardswish_cpu", [&]() {
const scalar_t zero(0.0f);
const scalar_t three(3.0f);
const scalar_t six(6.0f);
using Vec = vec::Vectorized<scalar_t>;
const Vec kZeroVec(zero);
const Vec kThreeVec(three);
const Vec kSixVec(six);
cpu_kernel_vec(
iter,
[&](scalar_t x) {
return x * std::min(std::max(x + three, zero), six) / six;
},
[&](Vec x_vec) {
return x_vec * vec::minimum(
vec::maximum(x_vec + kThreeVec, kZeroVec),
kSixVec
) / kSixVec;
}
);
});
}
}
void hardswish_backward_kernel(TensorIterator& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "hardswish_backward_cpu", [&]() {
const float zero(0.0f);
const float three(3.0f);
const float neg_three(-3.0f);
const float one_half(0.5f);
using Vec = vec::Vectorized<float>;
const Vec kZeroVec(zero);
const Vec kThreeVec(three);
const Vec kNegThreeVec(neg_three);
const Vec kOneHalfVec(one_half);
cpu_kernel_vec(
iter,
[&](scalar_t grad_val, scalar_t self_val) -> scalar_t {
if (float(self_val) <= neg_three) {
return zero;
} else if (float(self_val) < three) {
return float(grad_val) * ((float(self_val) / three) + one_half);
} else {
return grad_val;
}
},
[&](vec::Vectorized<scalar_t> grad_val, vec::Vectorized<scalar_t> self_val) {
auto [self_val0, self_val1] = convert_to_float<scalar_t>(self_val);
auto [grad_val0, grad_val1] = convert_to_float<scalar_t>(grad_val);
self_val0 = Vec::blendv(
Vec::blendv(
grad_val0 * ((self_val0 / kThreeVec) + kOneHalfVec),
grad_val0,
self_val0 >= kThreeVec
),
kZeroVec,
self_val0 <= kNegThreeVec
);
self_val1 = Vec::blendv(
Vec::blendv(
grad_val1 * ((self_val1 / kThreeVec) + kOneHalfVec),
grad_val1,
self_val1 >= kThreeVec
),
kZeroVec,
self_val1 <= kNegThreeVec
);
return convert_from_float<scalar_t>(self_val0, self_val1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "hardswish_backward_cpu", [&]() {
const scalar_t zero(0.0f);
const scalar_t three(3.0f);
const scalar_t neg_three(-3.0f);
const scalar_t one_half(0.5f);
using Vec = vec::Vectorized<scalar_t>;
const Vec kZeroVec(zero);
const Vec kThreeVec(three);
const Vec kNegThreeVec(neg_three);
const Vec kOneHalfVec(one_half);
cpu_kernel_vec(
iter,
[&](scalar_t grad_val, scalar_t self_val) {
if (self_val <= neg_three) {
return zero;
} else if (self_val < three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
},
[&](Vec grad_val, Vec self_val) {
return Vec::blendv(
Vec::blendv(
grad_val * ((self_val / kThreeVec) + kOneHalfVec),
grad_val,
self_val >= kThreeVec
),
kZeroVec,
self_val <= kNegThreeVec
);
}
);
});
}
}
void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "leaky_relu_cpu", [&]() {
auto zero_vec = Vectorized<float>((float)(0));
auto one_vec = Vectorized<float>((float)(1));
float negval = negval_.to<float>();
Vectorized<float> negval_v = Vectorized<float>(negval);
cpu_kernel_vec(
iter,
[&](scalar_t a) -> scalar_t {
return float(a) > float(0) ? float(a) : float(a) * negval;
},
[&](Vectorized<scalar_t> a) -> Vectorized<scalar_t> {
auto [a0, a1] = convert_to_float<scalar_t>(a);
auto res0 = a0 * (Vectorized<float>::blendv(negval_v, one_vec, a0 > zero_vec));
auto res1 = a1 * (Vectorized<float>::blendv(negval_v, one_vec, a1 > zero_vec));
return convert_from_float<scalar_t>(res0, res1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "leaky_relu_cpu", [&] {
using Vec = Vectorized<scalar_t>;
auto zero_vec = Vec((scalar_t)(0));
auto one_vec = Vec((scalar_t)(1));
scalar_t negval = negval_.to<scalar_t>();
Vec negval_v = Vec(negval);
cpu_kernel_vec(
iter,
[&](scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
},
[&](Vec a) -> Vec {
auto r = Vec::blendv(negval_v, one_vec, a > zero_vec);
return a * r;
});
});
}
}
void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "leaky_relu_backward_cpu", [&]() {
auto zero_vec = Vectorized<float>((float)(0));
auto one_vec = Vectorized<float>((float)(1));
float negval = negval_.to<float>();
Vectorized<float> negval_v = Vectorized<float>(negval);
cpu_kernel_vec(
iter,
[&](scalar_t a, scalar_t b) -> scalar_t {
return float(a) > float(0) ? float(b) : float(b) * negval;
},
[&](Vectorized<scalar_t> a, Vectorized<scalar_t> b) -> Vectorized<scalar_t> {
auto [a0, a1] = convert_to_float<scalar_t>(a);
auto [b0, b1] = convert_to_float<scalar_t>(b);
auto res0 = b0 * (Vectorized<float>::blendv(negval_v, one_vec, a0 > zero_vec));
auto res1 = b1 * (Vectorized<float>::blendv(negval_v, one_vec, a1 > zero_vec));
return convert_from_float<scalar_t>(res0, res1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "leaky_relu_backward_cpu", [&] {
using Vec = Vectorized<scalar_t>;
auto zero_vec = Vec((scalar_t)(0));
auto one_vec = Vec((scalar_t)(1));
scalar_t negval = negval_.to<scalar_t>();
Vec negval_v = Vec(negval);
cpu_kernel_vec(
iter,
[&](scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
},
[&](Vec a, Vec b) -> Vec {
auto r = Vec::blendv(negval_v, one_vec, a > zero_vec);
return b * r;
});
});
}
}
void softplus_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "softplus_cpu", [&]() {
using Vec = Vectorized<float>;
auto beta = beta_.to<float>();
auto threshold = threshold_.to<float>();
const Vec beta_vec(beta);
const Vec threshold_vec(threshold);
cpu_kernel_vec(
iter,
[beta, threshold](scalar_t a) -> scalar_t {
return (float(a) * beta) > threshold ? a
: static_cast<scalar_t>((std::log1p(std::exp(float(a) * beta))) / beta);
},
[beta_vec, threshold_vec](Vectorized<scalar_t> a) -> Vectorized<scalar_t> {
auto [a0, a1] = convert_to_float<scalar_t>(a);
a0 = Vec::blendv((a0 * beta_vec).exp().log1p() / beta_vec, a0, (a0 * beta_vec) > threshold_vec);
a1 = Vec::blendv((a1 * beta_vec).exp().log1p() / beta_vec, a1, (a1 * beta_vec) > threshold_vec);
return convert_from_float<scalar_t>(a0, a1);
}
);
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "softplus_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
const Vec beta_vec(beta);
const Vec threshold_vec(threshold);
cpu_kernel_vec(
iter,
[beta, threshold](scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a
: static_cast<scalar_t>(std::log1p(std::exp(a * beta))) / beta;
},
[beta_vec, threshold_vec](Vec a) -> Vec {
return Vec::blendv((a * beta_vec).exp().log1p() / beta_vec, a, (a * beta_vec) > threshold_vec);
}
);
});
}
}
void softplus_backward_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "softplus_backward_cpu", [&]() {
using Vec = Vectorized<float>;
auto beta = beta_.to<float>();
auto threshold = threshold_.to<float>();
const Vec beta_vec(beta);
const Vec threshold_vec(threshold);
const Vec one_vec(1.0f);
cpu_kernel_vec(
iter,
[beta, threshold](scalar_t a, scalar_t b) -> scalar_t {
float z = std::exp(float(b) * beta);
return (float(b) * beta) > threshold ? a : static_cast<scalar_t>(float(a) * z / (z + float(1.)));
},
[beta_vec, one_vec, threshold_vec](Vectorized<scalar_t> a, Vectorized<scalar_t> b) -> Vectorized<scalar_t> {
auto [a0, a1] = convert_to_float<scalar_t>(a);
auto [b0, b1] = convert_to_float<scalar_t>(b);
Vec z = (b0 * beta_vec).exp();
a0 = Vec::blendv(a0 * z / (z + one_vec), a0, (b0 * beta_vec) > threshold_vec);
z = (b1 * beta_vec).exp();
a1 = Vec::blendv(a1 * z / (z + one_vec), a1, (b1 * beta_vec) > threshold_vec);
return convert_from_float<scalar_t>(a0, a1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "softplus_backward_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
const Vec beta_vec(beta);
const Vec threshold_vec(threshold);
const Vec one_vec(static_cast<scalar_t>(1.0));
cpu_kernel_vec(
iter,
[beta, threshold](scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = std::exp(b * beta);
return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.));
},
[beta_vec, one_vec, threshold_vec](Vec a, Vec b) -> Vec {
const Vec z = (b * beta_vec).exp();
return Vec::blendv(a * z / (z + one_vec), a, (b * beta_vec) > threshold_vec);
}
);
});
}
}
void glu_kernel(TensorIteratorBase& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "glu_cpu", [&]() {
const float float_one_val(1);
const Vectorized<float> float_one_vec(float_one_val);
cpu_kernel_vec(
iter,
[float_one_val](scalar_t a, scalar_t b) -> scalar_t {
return float(a) * (float_one_val / (float_one_val + std::exp(- float(b))));
},
[float_one_vec](Vectorized<scalar_t> a, Vectorized<scalar_t> b) -> Vectorized<scalar_t> {
auto [a0, a1] = convert_to_float<scalar_t>(a);
auto [b0, b1] = convert_to_float<scalar_t>(b);
return convert_from_float<scalar_t>(a0 * (float_one_vec / (float_one_vec + b0.neg().exp())),
a1 * (float_one_vec / (float_one_vec + b1.neg().exp())));
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "glu_cpu", [&] {
using Vec = Vectorized<scalar_t>;
const scalar_t one_val(1);
const Vec one_vec(one_val);
cpu_kernel_vec(
iter,
[one_val](scalar_t a, scalar_t b) -> scalar_t {
return a * (one_val / (one_val + std::exp(-b)));
},
[one_vec](Vec a, Vec b) -> Vec {
return a * (one_vec / (one_vec + b.neg().exp()));
}
);
});
}
}
void glu_jvp_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "glu_jvp_cpu", [&] {
using Vec = Vectorized<scalar_t>;
const scalar_t one(1);
const Vec ones(one);
cpu_kernel_vec(
iter,
[one](scalar_t res, scalar_t b, scalar_t da, scalar_t db) -> scalar_t {
const auto sig_b = one / (one + std::exp(-b));
return da * sig_b + res * (db - sig_b * db);
},
[ones](Vec res, Vec b, Vec da, Vec db) -> Vec {
const auto sig_b = ones / (ones + b.neg().exp());
return da * sig_b + res * (db - sig_b * db);
}
);
});
}
void glu_backward_kernel(TensorIterator& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "glu_backward_cpu", [&]() {
const float float_one_val(1);
const Vectorized<float> float_one_vec(float_one_val);
cpu_kernel_vec(
iter,
[float_one_val](scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return (float_one_val - float(a)) * float(a) * float(b) * float(c);
},
[float_one_vec](Vectorized<scalar_t> a, Vectorized<scalar_t> b, Vectorized<scalar_t> c) -> Vectorized<scalar_t> {
auto [a0, a1] = convert_to_float<scalar_t>(a);
auto [b0, b1] = convert_to_float<scalar_t>(b);
auto [c0, c1] = convert_to_float<scalar_t>(c);
a0 = (float_one_vec - a0) * a0 * b0 * c0;
a1 = (float_one_vec - a1) * a1 * b1 * c1;
return convert_from_float<scalar_t>(a0, a1);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "glu_backward_cpu", [&] {
using Vec = Vectorized<scalar_t>;
const scalar_t one_val(1);
const Vec one_vec(one_val);
cpu_kernel_vec(
iter,
[one_val](scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return (one_val - a) * a * b * c;
},
[one_vec](Vec a, Vec b, Vec c) -> Vec {
return (one_vec - a) * a * b * c;
}
);
});
}
}
void silu_kernel(TensorIteratorBase& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "silu_cpu", [&]() {
const Vectorized<float> kOneVec(1.0f);
cpu_kernel_vec(
iter,
[](scalar_t x) -> scalar_t {
return float(x) / (1.0f + std::exp(-float(x)));
},
[kOneVec](Vectorized<scalar_t> x_vec) -> Vectorized<scalar_t> {
auto [x_vec0, x_vec1] = convert_to_float<scalar_t>(x_vec);
return convert_from_float<scalar_t>(
x_vec0 / (kOneVec + x_vec0.neg().exp()),
x_vec1 / (kOneVec + x_vec1.neg().exp()));
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
iter.dtype(), "silu_cpu", [&]() {
const Vectorized<scalar_t> kOneVec(scalar_t(1));
cpu_kernel_vec(
iter,
[](scalar_t x) {
return x / (scalar_t(1) + std::exp(-x));
},
[kOneVec](Vectorized<scalar_t> x_vec) {
return x_vec / (kOneVec + x_vec.neg().exp());
});
});
}
}
void silu_backward_kernel(TensorIteratorBase& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "silu_backward_cpu", [&]() {
const Vectorized<float> kOneVec(1.0f);
cpu_kernel_vec(
iter,
[](scalar_t dy, scalar_t x) -> scalar_t {
const float sigmoid =
1.0f / (1.0f + std::exp(-float(x)));
return dy * sigmoid * (1.0f + x * (1.0f - sigmoid));
},
[kOneVec](Vectorized<scalar_t> dy_vec, Vectorized<scalar_t> x_vec) -> Vectorized<scalar_t> {
auto [x_vec0, x_vec1] = convert_to_float<scalar_t>(x_vec);
auto [dy_vec0, dy_vec1] = convert_to_float<scalar_t>(dy_vec);
const Vectorized<float> sigmoid0 =
kOneVec / (kOneVec + x_vec0.neg().exp());
const Vectorized<float> sigmoid1 =
kOneVec / (kOneVec + x_vec1.neg().exp());
return convert_from_float<scalar_t>(
dy_vec0 * sigmoid0 * (kOneVec + x_vec0 * (kOneVec - sigmoid0)),
dy_vec1 * sigmoid1 * (kOneVec + x_vec1 * (kOneVec - sigmoid1)));
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
iter.dtype(), "silu_backward_cpu", [&]() {
const Vectorized<scalar_t> kOneVec(scalar_t(1));
cpu_kernel_vec(
iter,
[](scalar_t dy, scalar_t x) {
const scalar_t sigmoid =
scalar_t(1) / (scalar_t(1) + std::exp(-x));
return dy * sigmoid * (scalar_t(1) + x * (scalar_t(1) - sigmoid));
},
[kOneVec](Vectorized<scalar_t> dy_vec, Vectorized<scalar_t> x_vec) {
const Vectorized<scalar_t> sigmoid =
kOneVec / (kOneVec + x_vec.neg().exp());
return dy_vec * sigmoid * (kOneVec + x_vec * (kOneVec - sigmoid));
});
});
}
}
void mish_kernel(TensorIteratorBase& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "mish_cpu", [&]() {
cpu_kernel_vec(
iter,
[](scalar_t x) -> scalar_t{
return static_cast<scalar_t>(float(x) * std::tanh(std::log1p(std::exp(float(x)))));
},
[](Vectorized<scalar_t> x_vec) -> Vectorized<scalar_t> {
auto [x_vec0, x_vec1] = convert_to_float<scalar_t>(x_vec);
return convert_from_float<scalar_t>(
x_vec0 * x_vec0.exp().log1p().tanh(),
x_vec1 * x_vec1.exp().log1p().tanh()
);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "mish_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
cpu_kernel_vec(
iter,
[](scalar_t x) -> scalar_t{
return static_cast<scalar_t>(x * std::tanh(std::log1p(std::exp(x))));
},
[](Vec x_vec) -> Vec {
return x_vec * x_vec.exp().log1p().tanh();
});
});
}
}
void mish_backward_kernel(TensorIterator& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "mish_backward_cpu", [&]() {
using Vec = Vectorized<float>;
const Vec kOneVec(1.0f);
cpu_kernel_vec(
iter,
[](scalar_t dy, scalar_t x) -> scalar_t {
const float sigmoid =
1.0f / (1.0f + std::exp(-float(x)));
const float tanh_softplus = std::tanh(std::log1p(std::exp(float(x))));
return dy * (tanh_softplus + x * sigmoid * (1.0f - tanh_softplus * tanh_softplus));
},
[kOneVec](Vectorized<scalar_t> dy_vec, Vectorized<scalar_t> x_vec) -> Vectorized<scalar_t> {
auto [x_vec0, x_vec1] = convert_to_float<scalar_t>(x_vec);
auto [dy_vec0, dy_vec1] = convert_to_float<scalar_t>(dy_vec);
const Vec sigmoid0 = kOneVec / (kOneVec + x_vec0.neg().exp());
const Vec sigmoid1 = kOneVec / (kOneVec + x_vec1.neg().exp());
const Vec tanh_softplus0 = x_vec0.exp().log1p().tanh();
const Vec tanh_softplus1 = x_vec1.exp().log1p().tanh();
return convert_from_float<scalar_t>(
dy_vec0 * (tanh_softplus0 + x_vec0 * sigmoid0 * (kOneVec - tanh_softplus0 * tanh_softplus0)),
dy_vec1 * (tanh_softplus1 + x_vec1 * sigmoid1 * (kOneVec - tanh_softplus1 * tanh_softplus1))
);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "mish_backward_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
const Vec kOneVec(scalar_t(1));
cpu_kernel_vec(
iter,
[](scalar_t dy, scalar_t x) -> scalar_t {
const scalar_t sigmoid =
scalar_t(1) / (scalar_t(1) + std::exp(-x));
const scalar_t tanh_softplus = std::tanh(std::log1p(std::exp(x)));
return dy * (tanh_softplus + x * sigmoid * (scalar_t(1) - tanh_softplus * tanh_softplus));
},
[kOneVec](Vec dy_vec, Vec x_vec) -> Vec {
const Vec sigmoid = kOneVec / (kOneVec + x_vec.neg().exp());
const Vec tanh_softplus = x_vec.exp().log1p().tanh();
return dy_vec * (tanh_softplus + x_vec * sigmoid * (kOneVec - tanh_softplus * tanh_softplus));
});
});
}
}
void prelu_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "prelu_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
cpu_kernel_vec(
iter,
[](scalar_t input, scalar_t weight) {
return (input > scalar_t(0)) ? input : weight * input;
},
[](Vec input, Vec weight) {
return Vec::blendv(weight * input, input, input > Vec(0));
});
});
}
void prelu_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "prelu_backward_cpu", [&]() {
cpu_kernel_multiple_outputs(iter,
[](scalar_t input, scalar_t weight, scalar_t grad) -> std::tuple<scalar_t, scalar_t> {
auto mask = input > scalar_t{0};
auto grad_input = mask ? grad : weight * grad;
auto grad_weight = mask ? scalar_t{0} : input * grad;
return {grad_input, grad_weight};
});
});
}
} // namespace
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel)
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel)
REGISTER_DISPATCH(threshold_stub, &threshold_kernel)
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel)
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel)
REGISTER_DISPATCH(prelu_stub, &prelu_kernel)
REGISTER_DISPATCH(prelu_backward_stub, &prelu_backward_kernel)
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel)
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel)
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel)
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel)
ALSO_REGISTER_AVX512_DISPATCH(log_sigmoid_cpu_stub, &log_sigmoid_cpu_kernel)
ALSO_REGISTER_AVX512_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_cpu_kernel)
ALSO_REGISTER_AVX512_DISPATCH(glu_stub, &glu_kernel)
ALSO_REGISTER_AVX512_DISPATCH(glu_backward_stub, &glu_backward_kernel)
ALSO_REGISTER_AVX512_DISPATCH(glu_jvp_stub, &glu_jvp_kernel)
ALSO_REGISTER_AVX512_DISPATCH(elu_stub, &elu_kernel)
ALSO_REGISTER_AVX512_DISPATCH(elu_backward_stub, &elu_backward_kernel)
ALSO_REGISTER_AVX512_DISPATCH(GeluKernel, &GeluKernelImpl)
ALSO_REGISTER_AVX512_DISPATCH(GeluBackwardKernel, &GeluBackwardKernelImpl)
ALSO_REGISTER_AVX512_DISPATCH(hardswish_stub, &hardswish_kernel)
ALSO_REGISTER_AVX512_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel)
ALSO_REGISTER_AVX512_DISPATCH(softplus_stub, &softplus_kernel)
ALSO_REGISTER_AVX512_DISPATCH(softplus_backward_stub, &softplus_backward_kernel)
ALSO_REGISTER_AVX512_DISPATCH(silu_stub, &silu_kernel)
ALSO_REGISTER_AVX512_DISPATCH(silu_backward_stub, &silu_backward_kernel)
ALSO_REGISTER_AVX512_DISPATCH(mish_stub, &mish_kernel)
ALSO_REGISTER_AVX512_DISPATCH(mish_backward_stub, &mish_backward_kernel)
} // namespace at::native | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/cpu/Activation.cpp |
# sqlalchemy/naming.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Establish constraint and index naming conventions.
"""
from .schema import Constraint, ForeignKeyConstraint, PrimaryKeyConstraint, \
UniqueConstraint, CheckConstraint, Index, Table, Column
from .. import event, events
from .. import exc
from .elements import _truncated_label, _defer_name, _defer_none_name, conv
import re
class ConventionDict(object):
def __init__(self, const, table, convention):
self.const = const
self._is_fk = isinstance(const, ForeignKeyConstraint)
self.table = table
self.convention = convention
self._const_name = const.name
def _key_table_name(self):
return self.table.name
def _column_X(self, idx):
if self._is_fk:
fk = self.const.elements[idx]
return fk.parent
else:
return list(self.const.columns)[idx]
def _key_constraint_name(self):
if isinstance(self._const_name, (type(None), _defer_none_name)):
raise exc.InvalidRequestError(
"Naming convention including "
"%(constraint_name)s token requires that "
"constraint is explicitly named."
)
if not isinstance(self._const_name, conv):
self.const.name = None
return self._const_name
def _key_column_X_name(self, idx):
return self._column_X(idx).name
def _key_column_X_label(self, idx):
return self._column_X(idx)._label
def _key_referred_table_name(self):
fk = self.const.elements[0]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return reftable
def _key_referred_column_X_name(self, idx):
fk = self.const.elements[idx]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return refcol
def __getitem__(self, key):
if key in self.convention:
return self.convention[key](self.const, self.table)
elif hasattr(self, '_key_%s' % key):
return getattr(self, '_key_%s' % key)()
else:
col_template = re.match(r".*_?column_(\d+)_.+", key)
if col_template:
idx = col_template.group(1)
attr = "_key_" + key.replace(idx, "X")
idx = int(idx)
if hasattr(self, attr):
return getattr(self, attr)(idx)
raise KeyError(key)
_prefix_dict = {
Index: "ix",
PrimaryKeyConstraint: "pk",
CheckConstraint: "ck",
UniqueConstraint: "uq",
ForeignKeyConstraint: "fk"
}
def _get_convention(dict_, key):
for super_ in key.__mro__:
if super_ in _prefix_dict and _prefix_dict[super_] in dict_:
return dict_[_prefix_dict[super_]]
elif super_ in dict_:
return dict_[super_]
else:
return None
def _constraint_name_for_table(const, table):
metadata = table.metadata
convention = _get_convention(metadata.naming_convention, type(const))
if isinstance(const.name, conv):
return const.name
elif convention is not None and (
const.name is None or not isinstance(const.name, conv) and
"constraint_name" in convention
):
return conv(
convention % ConventionDict(const, table,
metadata.naming_convention)
)
elif isinstance(convention, _defer_none_name):
return None
@event.listens_for(Constraint, "after_parent_attach")
@event.listens_for(Index, "after_parent_attach")
def _constraint_name(const, table):
if isinstance(table, Column):
# for column-attached constraint, set another event
# to link the column attached to the table as this constraint
# associated with the table.
event.listen(table, "after_parent_attach",
lambda col, table: _constraint_name(const, table)
)
elif isinstance(table, Table):
if isinstance(const.name, (conv, _defer_name)):
return
newname = _constraint_name_for_table(const, table)
if newname is not None:
const.name = newname | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_ipv6security_raguard_vlan
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.9"
short_description: CLI command to add/remove ipv6security-raguard-vlan
description:
- This module can be used to Add vlans to RA Guard Policy and Remove vlans to RA Guard Policy.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: false
type: str
state:
description:
- ipv6security-raguard-vlan configuration command.
required: false
type: str
choices: ['present', 'absent']
default: 'present'
pn_vlans:
description:
- Vlans attached to RA Guard Policy.
required: true
type: str
pn_name:
description:
- RA Guard Policy Name.
required: true
type: str
"""
EXAMPLES = """
- name: ipv6 security raguard vlan add
pn_ipv6security_raguard_vlan:
pn_cliswitch: "sw01"
pn_name: "foo"
pn_vlans: "100-105"
- name: ipv6 security raguard vlan add
pn_ipv6security_raguard_vlan:
pn_cliswitch: "sw01"
pn_name: "foo"
pn_vlans: "100"
- name: ipv6 security raguard vlan remove
pn_ipv6security_raguard_vlan:
pn_cliswitch: "sw01"
pn_name: "foo"
pn_vlans: "100-105"
state: 'absent'
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the ipv6security-raguard-vlan command.
returned: always
type: list
stderr:
description: set of error responses from the ipv6security-raguard-vlan command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
from ansible.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the ipv6-security-reguard command.
If a name exists, return True if name exists else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_name']
vlans = module.params['pn_vlans']
show = cli
cli += ' ipv6security-raguard-show format name no-show-headers'
out = run_commands(module, cli)[1]
if out:
out = out.split()
NAME_EXISTS = True if name in out else False
show += ' vlan-show format id no-show-headers'
out = run_commands(module, show)[1]
if out:
out = out.split()
if vlans and '-' in vlans:
vlan_list = list()
vlans = vlans.strip().split('-')
for vlan in range(int(vlans[0]), int(vlans[1]) + 1):
vlan_list.append(str(vlan))
for vlan in vlan_list:
if vlan not in out:
module.fail_json(
failed=True,
msg='vlan id %s does not exist. Make sure you create vlan before adding it' % vlan
)
else:
if vlans not in out:
module.fail_json(
failed=True,
msg='vlan id %s does not exist. Make sure you create vlan before adding it' % vlans
)
return NAME_EXISTS
def main():
""" This section is for arguments parsing """
state_map = dict(
present='ipv6security-raguard-vlan-add',
absent='ipv6security-raguard-vlan-remove'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=False, type='str', choices=state_map.keys(), default='present'),
pn_vlans=dict(required=True, type='str'),
pn_name=dict(required=True, type='str'),
)
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
vlans = module.params['pn_vlans']
name = module.params['pn_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
NAME_EXISTS = check_cli(module, cli)
cli += ' %s name %s ' % (command, name)
if command:
if NAME_EXISTS is False:
module.exit_json(
skipped=True,
msg='ipv6security raguard with name %s does not exist' % name
)
if vlans:
cli += ' vlans ' + vlans
run_cli(module, cli, state_map)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_RAGGED_UTILS_H_
#define TENSORFLOW_CORE_KERNELS_RAGGED_UTILS_H_
#include <cstdint>
#include "absl/status/status.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
// Utility functions for RaggedTensor
// Verifies that the splits are valid for ragged tensor
template <typename SPLIT_TYPE>
absl::Status RaggedTensorVerifySplits(const Tensor& ragged_splits,
bool check_last_element,
int64_t num_ragged_values) {
auto flat_ragged_splits = ragged_splits.flat<SPLIT_TYPE>();
if (ragged_splits.dims() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid ragged splits: ragged splits must be rank 1 but is rank ",
ragged_splits.dims()));
}
if (ragged_splits.NumElements() < 1) {
return absl::InvalidArgumentError(
"Invalid ragged splits: ragged splits must have at least one splits, "
"but is empty");
}
if (flat_ragged_splits(0) != static_cast<SPLIT_TYPE>(0)) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid ragged splits: first element of ragged splits "
" must be 0 but is ",
flat_ragged_splits(0)));
}
SPLIT_TYPE last_split = 0;
for (int j = 1; j < ragged_splits.dim_size(0); j++) {
auto split = flat_ragged_splits(j);
if (split < last_split) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid ragged splits: ragged splits must be "
"monotonically increasing, but ragged_splits[",
j, "]=", split, " is smaller than row_splits[", j - 1,
"]=", last_split));
}
last_split = split;
}
if (check_last_element & last_split != num_ragged_values) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid ragged splits: last element of ragged splits must be ",
"the number of ragged values(", num_ragged_values, ") but is ",
last_split));
}
return absl::OkStatus();
}
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_RAGGED_UTILS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/kernels/ragged_utils.h |
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64 && !arm64 && !loong64 && !riscv64 && !s390x
package math
const haveArchMax = false
func archMax(x, y float64) float64 {
panic("not implemented")
}
const haveArchMin = false
func archMin(x, y float64) float64 {
panic("not implemented")
} | go | github | https://github.com/golang/go | src/math/dim_noasm.go |
import numpy
class MRSData(numpy.ndarray):
"""
numpy.ndarray subclass with additional metadata like sampling rate and echo
time.
"""
def __new__(cls, input_array, dt, f0, te=30, ppm0=4.7, voxel_dimensions=(10, 10, 10), transform=None, metadata=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = numpy.asarray(input_array).view(cls)
# add the new attributes to the created instance
obj._dt = dt
obj._f0 = f0
obj._te = te
obj.ppm0 = ppm0
obj.voxel_dimensions = voxel_dimensions
obj.transform = transform
obj.metadata = metadata
return obj
def __array_finalize__(self, obj):
# if this instance is being created by slicing from another MRSData, copy the parameters across
self._dt = getattr(obj, 'dt', None)
self._f0 = getattr(obj, 'f0', None)
self._te = getattr(obj, 'te', 30)
self.ppm0 = getattr(obj, 'ppm0', None)
self.transform = getattr(obj, 'transform', None)
self.metadata = getattr(obj, 'metadata', None)
self.voxel_dimensions = getattr(obj, 'voxel_dimensions', (10, 10, 10))
def __array_wrap__(self, obj):
if len(obj.shape) == 0:
return obj[()]
else:
return numpy.ndarray.__array_wrap__(self, obj)
def __str__(self):
return "<MRSData instance f0={0}MHz TE={1}ms dt={2}ms>".format(self.f0, self.te, self.dt * 1e3)
def inherit(self, new_array):
"""
Converts a generic numpy ndarray into an MRSData instance by copying
its own MRS specific parameters. This is useful when performing some
processing on the MRSData object gives a bare NDArray result.
:param new_array: the ndarray to be converted to MRSData
:return: a new MRSData instance with data from new_array and parameters from self.
"""
cast_array = new_array.view(MRSData)
cast_array._dt = self.dt
cast_array._f0 = self.f0
cast_array._te = self.te
cast_array.ppm0 = self.ppm0
cast_array.voxel_dimensions = self.voxel_dimensions
cast_array.transform = self.transform
cast_array.metadata = self.metadata
return cast_array
@property
def dt(self):
"""
The dwell time for the acquisition.
:return: the dwell time in s
"""
return self._dt
@property
def np(self):
"""
The number of points in the FID.
:return:
"""
return self.shape[-1]
@property
def sw(self):
"""
The spectral width of the data in Hz. Calculated as 1 / dt.
:return:
"""
return 1.0 / self.dt
@property
def df(self):
"""
The frequency delta in Hz between neighbouring points in the spectrum.
Calculated as the spectral width divided by the number of points.
:return:
"""
return self.sw / self.np
@property
def te(self):
"""
The echo time of the sequence in ms.
:return:
"""
return self._te
@property
def f0(self):
"""
The scanner frequency in MHz. Also referred to by LCModel as Hz per PPM.
:return:
"""
return self._f0
def spectrum(self):
"""
Returns the Fourier transformed and shifted data
:return:
"""
return numpy.fft.fftshift(numpy.fft.fft(self, axis=-1), axes=-1)
def hertz_to_ppm(self, frequency):
"""
Converts a frequency in Hertz to the corresponding PPM for this dataset.
:param frequency: the frequency in Hz
:return:
"""
return self.ppm0 - frequency / self.f0
def ppm_to_hertz(self, frequency):
"""
Converts a frequency in PPM to the corresponding Hertz for this dataset.
:param frequency: the frequency in PPM
:return:
"""
return (self.ppm0 - frequency) * self.f0
def time_axis(self):
"""
Returns an array of the sample times in seconds for each point in the
FID.
:return: an array of the sample times in seconds for each point in the FID.
"""
return numpy.arange(0.0, self.dt * self.np, self.dt)
def frequency_axis(self):
"""
Returns an array of frequencies in Hertz ranging from -sw/2 to
sw/2.
:return: an array of frequencies in Hertz ranging from -sw/2 to sw/2.
"""
return numpy.linspace(-self.sw / 2, self.sw / 2, self.np, endpoint=False)
def frequency_axis_ppm(self):
"""
Returns an array of frequencies in PPM.
:return:
"""
return numpy.linspace(self.hertz_to_ppm(-self.sw / 2.0),
self.hertz_to_ppm(self.sw / 2.0),
self.np, endpoint=False)
def voxel_size(self):
"""
Returns the size of the voxel in mm^3.
:return: the size of the voxel in mm^3.
"""
return numpy.prod(self.voxel_dimensions)
def to_scanner(self, x, y, z):
"""
Converts a 3d position in MRSData space to the scanner reference frame
:param x:
:param y:
:param z:
:return:
"""
if self.transform is None:
raise ValueError("No transform set for MRSData object {}".format(self))
transformed_point = self.transform * numpy.matrix([x, y, z, 1]).T
return numpy.squeeze(numpy.asarray(transformed_point))[0:3]
def from_scanner(self, x, y, z):
"""
Converts a 3d position in the scanner reference frame to the MRSData space
:param x:
:param y:
:param z:
:return:
"""
if self.transform is None:
raise ValueError("No transform set for MRSData object {}".format(self))
transformed_point = numpy.linalg.inv(self.transform) * numpy.matrix([x, y, z, 1]).T
return numpy.squeeze(numpy.asarray(transformed_point))[0:3] | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\WebProfilerBundle\Tests\Profiler;
use Symfony\Bundle\WebProfilerBundle\Profiler\TemplateManager;
use Symfony\Bundle\WebProfilerBundle\Tests\TestCase;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\HttpKernel\DataCollector\DataCollector;
use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
use Symfony\Component\HttpKernel\Profiler\Profile;
use Symfony\Component\HttpKernel\Profiler\Profiler;
use Twig\Environment;
use Twig\Loader\LoaderInterface;
/**
* @author Artur Wielogórski <wodor@wodor.net>
*/
class TemplateManagerTest extends TestCase
{
protected Environment $twigEnvironment;
protected Profiler $profiler;
protected TemplateManager $templateManager;
protected function setUp(): void
{
$this->profiler = $this->createStub(Profiler::class);
$twigEnvironment = $this->mockTwigEnvironment();
$templates = [
'data_collector.foo' => ['foo', '@Foo/Collector/foo.html.twig'],
'data_collector.bar' => ['bar', '@Foo/Collector/bar.html.twig'],
'data_collector.baz' => ['baz', '@Foo/Collector/baz.html.twig'],
];
$this->templateManager = new TemplateManager($this->profiler, $twigEnvironment, $templates);
}
public function testGetNameOfInvalidTemplate()
{
$this->expectException(NotFoundHttpException::class);
$this->templateManager->getName(new Profile('token'), 'notexistingpanel');
}
/**
* if template exists in both profile and profiler then its name should be returned.
*/
public function testGetNameValidTemplate()
{
$this->profiler
->method('has')
->willReturnCallback($this->profilerHasCallback(...));
$profile = new Profile('token');
$profile->addCollector(new DummyCollector('foo'));
$profile->addCollector(new DummyCollector('bar'));
$this->assertEquals('@Foo/Collector/foo.html.twig', $this->templateManager->getName($profile, 'foo'));
}
public function profilerHasCallback($panel)
{
return match ($panel) {
'foo',
'bar' => true,
default => false,
};
}
public function profileHasCollectorCallback($panel)
{
return match ($panel) {
'foo',
'baz' => true,
default => false,
};
}
protected function mockTwigEnvironment()
{
$loader = $this->createStub(LoaderInterface::class);
$loader
->method('exists')
->willReturn(true);
$this->twigEnvironment = new Environment($loader);
return $this->twigEnvironment;
}
}
class DummyCollector extends DataCollector
{
public function __construct(private string $name)
{
}
public function getName(): string
{
return $this->name;
}
public function collect(Request $request, Response $response, ?\Throwable $exception = null): void
{
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/WebProfilerBundle/Tests/Profiler/TemplateManagerTest.php |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from common_report_header import common_report_header
from openerp.report import report_sxw
class journal_print(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(journal_print, self).__init__(cr, uid, name, context=context)
self.period_ids = []
self.last_move_id = False
self.journal_ids = []
self.sort_selection = 'am.name'
self.localcontext.update({
'time': time,
'lines': self.lines,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_account': self._get_account,
'get_filter': self._get_filter,
'get_start_date': self._get_start_date,
'get_end_date': self._get_end_date,
'get_fiscalyear': self._get_fiscalyear,
'display_currency':self._display_currency,
'get_sortby': self._get_sortby,
'get_target_move': self._get_target_move,
'check_last_move_id': self.check_last_move_id,
'set_last_move_id': self.set_last_move_id,
'tax_codes': self.tax_codes,
'sum_vat': self._sum_vat,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
new_ids = ids
self.query_get_clause = ''
self.target_move = data['form'].get('target_move', 'all')
if (data['model'] == 'ir.ui.menu'):
self.period_ids = tuple(data['form']['periods'])
self.journal_ids = tuple(data['form']['journal_ids'])
new_ids = data['form'].get('active_ids', [])
self.query_get_clause = 'AND '
self.query_get_clause += obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
self.sort_selection = data['form'].get('sort_selection', 'date')
objects = self.pool.get('account.journal.period').browse(self.cr, self.uid, new_ids)
elif new_ids:
#in case of direct access from account.journal.period object, we need to set the journal_ids and periods_ids
self.cr.execute('SELECT period_id, journal_id FROM account_journal_period WHERE id IN %s', (tuple(new_ids),))
res = self.cr.fetchall()
self.period_ids, self.journal_ids = zip(*res)
return super(journal_print, self).set_context(objects, data, ids, report_type=report_type)
def set_last_move_id(self, move_id):
self.last_move_id = move_id
def check_last_move_id(self, move_id):
'''
return True if we need to draw a gray line above this line, used to separate moves
'''
if self.last_move_id:
return not(self.last_move_id == move_id)
return False
def tax_codes(self, period_id, journal_id):
ids_journal_period = self.pool.get('account.journal.period').search(self.cr, self.uid,
[('journal_id', '=', journal_id), ('period_id', '=', period_id)])
self.cr.execute(
'select distinct tax_code_id from account_move_line ' \
'where period_id=%s and journal_id=%s and tax_code_id is not null and state<>\'draft\'',
(period_id, journal_id)
)
ids = map(lambda x: x[0], self.cr.fetchall())
tax_code_ids = []
if ids:
self.cr.execute('select id from account_tax_code where id in %s order by code', (tuple(ids),))
tax_code_ids = map(lambda x: x[0], self.cr.fetchall())
tax_codes = self.pool.get('account.tax.code').browse(self.cr, self.uid, tax_code_ids)
return tax_codes
def _sum_vat(self, period_id, journal_id, tax_code_id):
self.cr.execute('select sum(tax_amount) from account_move_line where ' \
'period_id=%s and journal_id=%s and tax_code_id=%s and state<>\'draft\'',
(period_id, journal_id, tax_code_id))
return self.cr.fetchone()[0] or 0.0
def _sum_debit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(debit) FROM account_move_line l, account_move am '
'WHERE l.move_id=am.id AND am.state IN %s AND l.period_id IN %s AND l.journal_id IN %s ' + self.query_get_clause + ' ',
(tuple(move_state), tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(l.credit) FROM account_move_line l, account_move am '
'WHERE l.move_id=am.id AND am.state IN %s AND l.period_id IN %s AND l.journal_id IN %s '+ self.query_get_clause+'',
(tuple(move_state), tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def lines(self, period_id, journal_id=False):
if not journal_id:
journal_id = self.journal_ids
else:
journal_id = [journal_id]
obj_mline = self.pool.get('account.move.line')
self.cr.execute('update account_journal_period set state=%s where journal_id IN %s and period_id=%s and state=%s', ('printed', self.journal_ids, period_id, 'draft'))
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT l.id FROM account_move_line l, account_move am WHERE l.move_id=am.id AND am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' ORDER BY '+ self.sort_selection + ', l.move_id',(tuple(move_state), period_id, tuple(journal_id) ))
ids = map(lambda x: x[0], self.cr.fetchall())
return obj_mline.browse(self.cr, self.uid, ids)
def _set_get_account_currency_code(self, account_id):
self.cr.execute("SELECT c.symbol AS code "\
"FROM res_currency c,account_account AS ac "\
"WHERE ac.id = %s AND ac.currency_id = c.id" % (account_id))
result = self.cr.fetchone()
if result:
self.account_currency = result[0]
else:
self.account_currency = False
def _get_fiscalyear(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).fiscalyear_id.name
return super(journal_print, self)._get_fiscalyear(data)
def _get_account(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).company_id.name
return super(journal_print, self)._get_account(data)
def _display_currency(self, data):
if data['model'] == 'account.journal.period':
return True
return data['form']['amount_currency']
def _get_sortby(self, data):
if self.sort_selection == 'date':
return 'Date'
elif self.sort_selection == 'ref':
return 'Reference Number'
return 'Date'
report_sxw.report_sxw('report.account.journal.period.print', 'account.journal.period', 'addons/account/report/account_journal.rml', parser=journal_print, header='external')
report_sxw.report_sxw('report.account.journal.period.print.sale.purchase', 'account.journal.period', 'addons/account/report/account_journal_sale_purchase.rml', parser=journal_print, header='external')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
test all other .agg behavior
"""
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
from functools import partial
import numpy as np
import pandas as pd
from pandas import date_range, DataFrame, Index, MultiIndex, Series
from pandas.core.groupby import SpecificationError
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
def test_agg_api():
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
tm.assert_frame_equal(result, expected)
def test_agg_datetimes_mixed():
data = [[1, '2012-01-01', 1.0],
[2, '2012-01-02', 2.0],
[3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0],
datetime.strptime(row[1], '%Y-%m-%d').date() if row[1] else None,
row[2]]
for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index():
from pandas import period_range, PeriodIndex
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_items(series)
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes():
# GH 12821
df = DataFrame({'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
'time': date_range('1/1/2011', periods=8, freq='H')})
df.loc[[0, 1, 2, 5], 'time'] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index('class')
grouped = df.groupby('class')
tm.assert_frame_equal(grouped.first(), exp)
tm.assert_frame_equal(grouped.agg('first'), exp)
tm.assert_frame_equal(grouped.agg({'time': 'first'}), exp)
tm.assert_series_equal(grouped.time.first(), exp['time'])
tm.assert_series_equal(grouped.time.agg('first'), exp['time'])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index('class')
grouped = df.groupby('class')
tm.assert_frame_equal(grouped.last(), exp)
tm.assert_frame_equal(grouped.agg('last'), exp)
tm.assert_frame_equal(grouped.agg({'time': 'last'}), exp)
tm.assert_series_equal(grouped.time.last(), exp['time'])
tm.assert_series_equal(grouped.time.agg('last'), exp['time'])
# count
exp = pd.Series([2, 2, 2, 2],
index=Index(list('ABCD'), name='class'),
name='time')
tm.assert_series_equal(grouped.time.agg(len), exp)
tm.assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2],
index=Index(list('ABCD'), name='class'),
name='time')
tm.assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes():
# similar to GH12821
# xref #11444
u = [datetime(2015, x + 1, 1) for x in range(12)]
v = list('aaabbbbbbccd')
df = pd.DataFrame({'X': v, 'Y': u})
result = df.groupby('X')['Y'].agg(len)
expected = df.groupby('X')['Y'].count()
tm.assert_series_equal(result, expected)
def test_aggregate_float64_no_int64():
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5],
"b": [1, 2, 2, 4, 5],
"c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency():
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
grouped = df.groupby(['A', 'B'])
c_mean = grouped['C'].mean()
c_sum = grouped['C'].sum()
d_mean = grouped['D'].mean()
d_sum = grouped['D'].sum()
result = grouped['D'].agg(['sum', 'mean'])
expected = pd.concat([d_sum, d_mean], axis=1)
expected.columns = ['sum', 'mean']
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['sum', 'mean']])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped[['D', 'C']].agg([np.sum, np.mean])
expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
expected.columns = MultiIndex.from_product([['D', 'C'],
['sum', 'mean']])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': 'mean', 'D': 'sum'})
expected = pd.concat([d_sum, c_mean], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': ['mean', 'sum'],
'D': ['mean', 'sum']})
expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped[['D', 'C']].agg({'r': np.sum,
'r2': np.mean})
expected = pd.concat([d_sum, c_sum, d_mean, c_mean], axis=1)
expected.columns = MultiIndex.from_product([['r', 'r2'],
['D', 'C']])
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_dict_renaming_deprecation():
# 15931
df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
'B': range(5),
'C': range(5)})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False) as w:
df.groupby('A').agg({'B': {'foo': ['sum', 'max']},
'C': {'bar': ['count', 'min']}})
assert "using a dict with renaming" in str(w[0].message)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.groupby('A')[['B', 'C']].agg({'ma': 'max'})
with tm.assert_produces_warning(FutureWarning) as w:
df.groupby('A').B.agg({'foo': 'count'})
assert "using a dict on a Series for aggregation" in str(w[0].message)
def test_agg_compat():
# GH 12334
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1)
expected.columns = MultiIndex.from_tuples([('C', 'sum'),
('C', 'std')])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'C': ['sum', 'std']})
tm.assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1)
expected.columns = ['C', 'D']
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'C': 'sum', 'D': 'std'})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts():
# API change for disallowing these types of nested dicts
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
msg = r'cannot perform renaming for r[1-2] with a nested dictionary'
with tm.assert_raises_regex(SpecificationError, msg):
g.aggregate({'r1': {'C': ['mean', 'sum']},
'r2': {'D': ['mean', 'sum']}})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g.agg({'C': {'ra': ['mean', 'std']},
'D': {'rb': ['mean', 'std']}})
expected = pd.concat([g['C'].mean(), g['C'].std(),
g['D'].mean(), g['D'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples(
[('ra', 'mean'), ('ra', 'std'),
('rb', 'mean'), ('rb', 'std')])
tm.assert_frame_equal(result, expected, check_like=True)
# same name as the original column
# GH9052
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'D': np.sum, 'result2': np.mean})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_item_by_item_raise_typeerror():
from numpy.random import randint
df = DataFrame(randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing('----------------------------------------')
pprint_thing(df.to_string())
raise TypeError('test')
with tm.assert_raises_regex(TypeError, 'test'):
df.groupby(0).agg(raiseException)
def test_series_agg_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
def test_series_agg_multi_pure_python():
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def bad(x):
assert (len(x.base) > 0)
return 'foo'
result = data.groupby(['A', 'B']).agg(bad)
expected = data.groupby(['A', 'B']).agg(lambda x: 'foo')
tm.assert_frame_equal(result, expected)
def test_agg_consistency():
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
try:
return np.percentile(a.dropna(), q=1)
except Exception:
return np.nan
import datetime as dt
df = DataFrame({'col1': [1, 2, 3, 4],
'col2': [10, 25, 26, 31],
'date': [dt.date(2013, 2, 10), dt.date(2013, 2, 10),
dt.date(2013, 2, 11), dt.date(2013, 2, 11)]})
g = df.groupby('date')
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
tm.assert_frame_equal(result, expected)
def test_agg_callables():
# GH 7929
df = DataFrame({'foo': [1, 2], 'bar': [3, 4]}).astype(np.int64)
class fn_class(object):
def __call__(self, x):
return sum(x)
equiv_callables = [sum,
np.sum,
lambda x: sum(x),
lambda x: x.sum(),
partial(sum),
fn_class(), ]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby('foo').agg(ecall)
tm.assert_frame_equal(result, expected)
def test_agg_over_numpy_arrays():
# GH 3788
df = pd.DataFrame([[1, np.array([10, 20, 30])],
[1, np.array([40, 50, 60])],
[2, np.array([20, 30, 40])]],
columns=['category', 'arraydata'])
result = df.groupby('category').agg(sum)
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
expected_index = pd.Index([1, 2], name='category')
expected_column = ['arraydata']
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_column)
tm.assert_frame_equal(result, expected)
def test_agg_timezone_round_trip():
# GH 15426
ts = pd.Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
df = pd.DataFrame({'a': 1,
'b': [ts + timedelta(minutes=nn) for nn in range(10)]})
result1 = df.groupby('a')['b'].agg(np.min).iloc[0]
result2 = df.groupby('a')['b'].agg(lambda x: np.min(x)).iloc[0]
result3 = df.groupby('a')['b'].min().iloc[0]
assert result1 == ts
assert result2 == ts
assert result3 == ts
dates = [pd.Timestamp("2016-01-0%d 12:00:00" % i, tz='US/Pacific')
for i in range(1, 5)]
df = pd.DataFrame({'A': ['a', 'b'] * 2, 'B': dates})
grouped = df.groupby('A')
ts = df['B'].iloc[0]
assert ts == grouped.nth(0)['B'].iloc[0]
assert ts == grouped.head(1)['B'].iloc[0]
assert ts == grouped.first()['B'].iloc[0]
assert ts == grouped.apply(lambda x: x.iloc[0])[0]
ts = df['B'].iloc[2]
assert ts == grouped.last()['B'].iloc[0]
assert ts == grouped.apply(lambda x: x.iloc[-1])[0]
def test_sum_uint64_overflow():
# see gh-14758
# Convert to uint64 and don't overflow
df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
df = df + 9223372036854775807
index = pd.Index([9223372036854775808,
9223372036854775810,
9223372036854775812],
dtype=np.uint64)
expected = pd.DataFrame({1: [9223372036854775809,
9223372036854775811,
9223372036854775813]},
index=index)
expected.index.name = 0
result = df.groupby(0).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("structure, expected", [
(tuple, pd.DataFrame({'C': {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
(list, pd.DataFrame({'C': {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
(lambda x: tuple(x), pd.DataFrame({'C': {(1, 1): (1, 1, 1),
(3, 4): (3, 4, 4)}})),
(lambda x: list(x), pd.DataFrame({'C': {(1, 1): [1, 1, 1],
(3, 4): [3, 4, 4]}}))
])
def test_agg_structs_dataframe(structure, expected):
df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3],
'B': [1, 1, 1, 4, 4, 4],
'C': [1, 1, 1, 3, 4, 4]})
result = df.groupby(['A', 'B']).aggregate(structure)
expected.index.names = ['A', 'B']
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("structure, expected", [
(tuple, pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name='C')),
(list, pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name='C')),
(lambda x: tuple(x), pd.Series([(1, 1, 1), (3, 4, 4)],
index=[1, 3], name='C')),
(lambda x: list(x), pd.Series([[1, 1, 1], [3, 4, 4]],
index=[1, 3], name='C'))
])
def test_agg_structs_series(structure, expected):
# Issue #18079
df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3],
'B': [1, 1, 1, 4, 4, 4],
'C': [1, 1, 1, 3, 4, 4]})
result = df.groupby('A')['C'].aggregate(structure)
expected.index.name = 'A'
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.")
def test_agg_category_nansum():
categories = ['a', 'b', 'c']
df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
categories=categories),
'B': [1, 2, 3]})
result = df.groupby("A").B.agg(np.nansum)
expected = pd.Series([3, 3, 0],
index=pd.CategoricalIndex(['a', 'b', 'c'],
categories=categories,
name='A'),
name='B')
tm.assert_series_equal(result, expected) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Logging module for the Multivio application."""
#==============================================================================
# This file is part of the Multivio software.
# Project : Multivio - https://www.multivio.org/
# Copyright: (c) 2009-2011 RERO (http://www.rero.ch/)
# License : See file COPYING
#==============================================================================
__copyright__ = "Copyright (c) 2009-2011 RERO"
__license__ = "GPL V.2"
#---------------------------- Modules -----------------------------------------
# import of standard modules
from optparse import OptionParser
import re
import os
import sys
import time
import cStringIO
import hashlib
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
try:
import Image
except:
from PIL import Image
WKHTMLTOX_SUPPORT = True
try:
import wkhtmltox
except ImportError:
WKHTMLTOX_SUPPORT = False
# local modules
from web_app import WebApplication, ApplicationError, WebException
class WebProcessorError:
"""Prefix to import all exceptions."""
class UnableToRenderWebPage(WebException):
"""Problem with the remote server.
HTTP: 502
"""
def __init__(self, value=None):
WebException.__init__(self, value)
self.http_code = "502 Bad Gateway"
#---------------------------- Classes -----------------------------------------
class WebProcessorApp(WebApplication):
"""Web application for logging"""
def __init__(self):
"""Basic constructor"""
WebApplication.__init__(self)
self.usage = """Create a image as the reder of a website.
<b>Arguments:</b>
<ul>
<li><em>max_width --integer--</em> max width of the output image in pixel. Default(None)
<li><em>max_height --integer--</em> max height of the output image in pixel. Default(None)
<li><em>url --string--</em> url of an image file.
</ul>
<a
href="/server/website/render?max_width=400&max_height=400&url=http://doc.rero.ch"><b>Example
of redering RERO DOC web site.</b></a><br>
<br>"""
def get(self, environ, start_response):
""" Callback method for new http request.
"""
if not WKHTMLTOX_SUPPORT:
raise WebProcessorError.UnableToRenderWebPage("WKTHMLTOX not installed")
#get parameters from the URI
(path, opts) = self.get_params(environ)
#check if is valid
max_width = max_height = 1024
try:
max_width = int(opts['max_width'])
except KeyError:
pass
try:
max_height = int(opts['max_height'])
except KeyError:
pass
if re.search(r'render', path) is not None and opts.has_key('url'):
url_to_fetch = opts['url']
url_md5 = hashlib.sha224(url_to_fetch).hexdigest()
local_file = os.path.join(self._tmp_dir, url_md5+".jpg")
to_download = False
try:
#file exists: ATOMIC?
tmp_file = os.open(local_file, os.O_CREAT|os.O_EXCL|os.O_RDWR)
to_download = True
os.close(tmp_file)
except Exception:
pass
if to_download:
self.logger.debug("Try to retrieve %s file" % url_to_fetch)
filename = os.path.join(self._tmp_dir, url_md5+"_tmp.jpg")
start = time.time()
try:
self._render_url(url_to_fetch, filename)
except Exception, e:
os.remove(filename)
raise WebProcessorError.UnableToRenderWebPage(str(e))
#file in cache
os.rename(filename, local_file)
else:
#downloading by an other process?
start_time_wait = time.time()
time_out_counter = 0
while os.path.getsize(local_file) == 0L \
and time_out_counter < self._timeout:
self.logger.info("Wait for file: %s" % local_file )
time.sleep(.5)
time_out_counter = time.time() - start_time_wait
if time_out_counter >= self._timeout:
self.logger.warn("Rendering process timeout")
raise WebProcessorError.UnableToRenderWebPage(
"Rendering process timeout: %s" % url_to_fetch)
data = self._resize(local_file, max_width, max_height)
start_response('200 OK', [('content-type',
'image/jpeg'),('content-length',
str(len(data)))])
return [data]
raise ApplicationError.InvalidArgument("Invalid Argument")
def _resize(self, file_name, max_width, max_height):
img = Image.open(file_name)
img.thumbnail((max_width, max_height), Image.ANTIALIAS)
temp_file = cStringIO.StringIO()
img.save(temp_file, "JPEG", quality=90)
temp_file.seek(0)
data = temp_file.read()
return data
def _render_url(self, url, output_file_name):
img = wkhtmltox.Image()
img.set_global_setting('screenHeight', '1024')
img.set_global_setting('out', output_file_name)
img.set_global_setting('in', url)
img.convert()
def get_params(self, environ):
""" Overload the default method to allow cgi url.
The url parameter should be at the end of the url.
i.e.
/server/structure/get_logical?format=raw&url=http:www.toto.ch/test?url=http://www.test.ch
is ok, but:
/server/structure/get_logical?url=http:www.toto.ch/test?url=http://www.test.ch&format=raw
is incorrect.
"""
path = environ['PATH_INFO']
opts = {}
to_parse = environ['QUERY_STRING']
self.logger.debug("To parse: %s" % to_parse)
if len(to_parse) > 0:
res = list(re.match(r'(.*?)&{0,1}url=(.*)', to_parse).groups())
#replace all until the first occurence of url=
opts['url'] = res.pop()
if len(res) > 0 and len(res[0]) > 0:
for val in res:
args = val.split('&')
for arg in args:
res_args = list(re.match(r'(.*?)=(.*)', arg).groups())
opts[res_args[0]] = res_args[1]
return (path, opts)
#---------------------------- Main Part ---------------------------------------
def main():
"""Main function"""
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.set_description ("To test the Logger class.")
parser.add_option ("-v", "--verbose", dest="verbose",
help="Verbose mode",
action="store_true", default=False)
parser.add_option ("-p", "--port", dest="port",
help="Http Port (Default: 4041)",
type="int", default=4041)
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("Error: incorrect number of arguments, try --help")
from wsgiref.simple_server import make_server
application = WebProcessorApp()
server = make_server('', options.port, application)
server.serve_forever()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import re
from time import time
from livestreamer.plugin import Plugin, PluginError
from livestreamer.plugin.api import http, validate
from livestreamer.stream import RTMPStream, HLSStream
SWF_URL = "http://play.streamingvideoprovider.com/player2.swf"
API_URL = "http://player.webvideocore.net/index.php"
_url_re = re.compile(
"http(s)?://(\w+\.)?streamingvideoprovider.co.uk/(?P<channel>[^/&?]+)"
)
_hls_re = re.compile("'(http://.+\.m3u8)'")
_rtmp_schema = validate.Schema(
validate.xml_findtext("./info/url"),
validate.url(scheme="rtmp")
)
_hls_schema = validate.Schema(
validate.transform(_hls_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.url(
scheme="http",
path=validate.endswith("m3u8")
)
)
)
)
class Streamingvideoprovider(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_hls_stream(self, channel_name):
params = {
"l": "info",
"a": "ajax_video_info",
"file": channel_name,
"rid": time()
}
playlist_url = http.get(API_URL, params=params, schema=_hls_schema)
if not playlist_url:
return
return HLSStream(self.session, playlist_url)
def _get_rtmp_stream(self, channel_name):
params = {
"l": "info",
"a": "xmlClipPath",
"clip_id": channel_name,
"rid": time()
}
res = http.get(API_URL, params=params)
rtmp_url = http.xml(res, schema=_rtmp_schema)
return RTMPStream(self.session, {
"rtmp": rtmp_url,
"swfVfy": SWF_URL,
"live": True
})
def _get_streams(self):
match = _url_re.match(self.url)
channel_name = match.group("channel")
try:
stream = self._get_rtmp_stream(channel_name)
yield "live", stream
except PluginError as err:
self.logger.error("Unable to extract RTMP stream: {0}", err)
try:
stream = self._get_hls_stream(channel_name)
if stream:
yield "live", stream
except PluginError as err:
self.logger.error("Unable to extract HLS stream: {0}", err)
__plugin__ = Streamingvideoprovider | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is largely copied from the Nagios module included in the
# Func project. Original copyright follows:
#
# func-nagios - Schedule downtime and enables/disable notifications
# Copyright 2011, Red Hat, Inc.
# Tim Bielawa <tbielawa@redhat.com>
#
# This software may be freely redistributed under the terms of the GNU
# general public license version 2 or any later version.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nagios
short_description: Perform common tasks in Nagios related to downtime and notifications.
description:
- "The M(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
- All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on.
- You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
- When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
- When using the M(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter.
version_added: "0.7"
options:
action:
description:
- Action to take.
- servicegroup options were added in 2.0.
- delete_downtime options were added in 2.2.
required: true
choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
"silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
"servicegroup_host_downtime" ]
host:
description:
- Host to operate on in Nagios.
required: false
default: null
cmdfile:
description:
- Path to the nagios I(command file) (FIFO pipe).
Only required if auto-detection fails.
required: false
default: auto-detected
author:
description:
- Author to leave downtime comments as.
Only usable with the C(downtime) action.
required: false
default: Ansible
comment:
version_added: "2.0"
description:
- Comment for C(downtime) action.
required: false
default: Scheduling downtime
minutes:
description:
- Minutes to schedule downtime for.
- Only usable with the C(downtime) action.
required: false
default: 30
services:
description:
- What to manage downtime/alerts for. Separate multiple services with commas.
C(service) is an alias for C(services).
B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions.
aliases: [ "service" ]
required: true
servicegroup:
version_added: "2.0"
description:
- The Servicegroup we want to set downtimes/alerts for.
B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
command:
description:
- The raw command to send to nagios, which
should not include the submitted time header or the line-feed
B(Required) option when using the C(command) action.
required: true
author: "Tim Bielawa (@tbielawa)"
'''
EXAMPLES = '''
# set 30 minutes of apache downtime
- nagios:
action: downtime
minutes: 30
service: httpd
host: '{{ inventory_hostname }}'
# schedule an hour of HOST downtime
- nagios:
action: downtime
minutes: 60
service: host
host: '{{ inventory_hostname }}'
# schedule an hour of HOST downtime, with a comment describing the reason
- nagios:
action: downtime
minutes: 60
service: host
host: '{{ inventory_hostname }}'
comment: Rebuilding machine
# schedule downtime for ALL services on HOST
- nagios:
action: downtime
minutes: 45
service: all
host: '{{ inventory_hostname }}'
# schedule downtime for a few services
- nagios:
action: downtime
services: frob,foobar,qeuz
host: '{{ inventory_hostname }}'
# set 30 minutes downtime for all services in servicegroup foo
- nagios:
action: servicegroup_service_downtime
minutes: 30
servicegroup: foo
host: '{{ inventory_hostname }}'
# set 30 minutes downtime for all host in servicegroup foo
- nagios:
action: servicegroup_host_downtime
minutes: 30
servicegroup: foo
host: '{{ inventory_hostname }}'
# delete all downtime for a given host
- nagios:
action: delete_downtime
host: '{{ inventory_hostname }}'
service: all
# delete all downtime for HOST with a particular comment
- nagios:
action: delete_downtime
host: '{{ inventory_hostname }}'
service: host
comment: Planned maintenance
# enable SMART disk alerts
- nagios:
action: enable_alerts
service: smart
host: '{{ inventory_hostname }}'
# "two services at once: disable httpd and nfs alerts"
- nagios:
action: disable_alerts
service: httpd,nfs
host: '{{ inventory_hostname }}'
# disable HOST alerts
- nagios:
action: disable_alerts
service: host
host: '{{ inventory_hostname }}'
# silence ALL alerts
- nagios:
action: silence
host: '{{ inventory_hostname }}'
# unsilence all alerts
- nagios:
action: unsilence
host: '{{ inventory_hostname }}'
# SHUT UP NAGIOS
- nagios:
action: silence_nagios
# ANNOY ME NAGIOS
- nagios:
action: unsilence_nagios
# command something
- nagios:
action: command
command: DISABLE_FAILURE_PREDICTION
'''
import ConfigParser
import types
import time
import os.path
######################################################################
def which_cmdfile():
locations = [
# rhel
'/etc/nagios/nagios.cfg',
# debian
'/etc/nagios3/nagios.cfg',
# older debian
'/etc/nagios2/nagios.cfg',
# bsd, solaris
'/usr/local/etc/nagios/nagios.cfg',
# groundwork it monitoring
'/usr/local/groundwork/nagios/etc/nagios.cfg',
# open monitoring distribution
'/omd/sites/oppy/tmp/nagios/nagios.cfg',
# ???
'/usr/local/nagios/etc/nagios.cfg',
'/usr/local/nagios/nagios.cfg',
'/opt/nagios/etc/nagios.cfg',
'/opt/nagios/nagios.cfg',
# icinga on debian/ubuntu
'/etc/icinga/icinga.cfg',
# icinga installed from source (default location)
'/usr/local/icinga/etc/icinga.cfg',
]
for path in locations:
if os.path.exists(path):
for line in open(path):
if line.startswith('command_file'):
return line.split('=')[1].strip()
return None
######################################################################
def main():
ACTION_CHOICES = [
'downtime',
'delete_downtime',
'silence',
'unsilence',
'enable_alerts',
'disable_alerts',
'silence_nagios',
'unsilence_nagios',
'command',
'servicegroup_host_downtime',
'servicegroup_service_downtime',
]
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, default=None, choices=ACTION_CHOICES),
author=dict(default='Ansible'),
comment=dict(default='Scheduling downtime'),
host=dict(required=False, default=None),
servicegroup=dict(required=False, default=None),
minutes=dict(default=30),
cmdfile=dict(default=which_cmdfile()),
services=dict(default=None, aliases=['service']),
command=dict(required=False, default=None),
)
)
action = module.params['action']
host = module.params['host']
servicegroup = module.params['servicegroup']
minutes = module.params['minutes']
services = module.params['services']
cmdfile = module.params['cmdfile']
command = module.params['command']
##################################################################
# Required args per action:
# downtime = (minutes, service, host)
# (un)silence = (host)
# (enable/disable)_alerts = (service, host)
# command = command
#
# AnsibleModule will verify most stuff, we need to verify
# 'minutes' and 'service' manually.
##################################################################
if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
if not host:
module.fail_json(msg='no host specified for action requiring one')
######################################################################
if action == 'downtime':
# Make sure there's an actual service selected
if not services:
module.fail_json(msg='no service selected to set downtime for')
# Make sure minutes is a number
try:
m = int(minutes)
if not isinstance(m, types.IntType):
module.fail_json(msg='minutes must be a number')
except Exception:
module.fail_json(msg='invalid entry for minutes')
######################################################################
if action == 'delete_downtime':
# Make sure there's an actual service selected
if not services:
module.fail_json(msg='no service selected to set downtime for')
######################################################################
if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
# Make sure there's an actual servicegroup selected
if not servicegroup:
module.fail_json(msg='no servicegroup selected to set downtime for')
# Make sure minutes is a number
try:
m = int(minutes)
if not isinstance(m, types.IntType):
module.fail_json(msg='minutes must be a number')
except Exception:
module.fail_json(msg='invalid entry for minutes')
##################################################################
if action in ['enable_alerts', 'disable_alerts']:
if not services:
module.fail_json(msg='a service is required when setting alerts')
if action in ['command']:
if not command:
module.fail_json(msg='no command passed for command action')
##################################################################
if not cmdfile:
module.fail_json(msg='unable to locate nagios.cfg')
##################################################################
ansible_nagios = Nagios(module, **module.params)
if module.check_mode:
module.exit_json(changed=True)
else:
ansible_nagios.act()
##################################################################
######################################################################
class Nagios(object):
"""
Perform common tasks in Nagios related to downtime and
notifications.
The complete set of external commands Nagios handles is documented
on their website:
http://old.nagios.org/developerinfo/externalcommands/commandlist.php
Note that in the case of `schedule_svc_downtime`,
`enable_svc_notifications`, and `disable_svc_notifications`, the
service argument should be passed as a list.
"""
def __init__(self, module, **kwargs):
self.module = module
self.action = kwargs['action']
self.author = kwargs['author']
self.comment = kwargs['comment']
self.host = kwargs['host']
self.servicegroup = kwargs['servicegroup']
self.minutes = int(kwargs['minutes'])
self.cmdfile = kwargs['cmdfile']
self.command = kwargs['command']
if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
self.services = kwargs['services']
else:
self.services = kwargs['services'].split(',')
self.command_results = []
def _now(self):
"""
The time in seconds since 12:00:00AM Jan 1, 1970
"""
return int(time.time())
def _write_command(self, cmd):
"""
Write the given command to the Nagios command file
"""
try:
fp = open(self.cmdfile, 'w')
fp.write(cmd)
fp.flush()
fp.close()
self.command_results.append(cmd.strip())
except IOError:
self.module.fail_json(msg='unable to write to nagios command file',
cmdfile=self.cmdfile)
def _fmt_dt_str(self, cmd, host, duration, author=None,
comment=None, start=None,
svc=None, fixed=1, trigger=0):
"""
Format an external-command downtime string.
cmd - Nagios command ID
host - Host schedule downtime on
duration - Minutes to schedule downtime for
author - Name to file the downtime as
comment - Reason for running this command (upgrade, reboot, etc)
start - Start of downtime in seconds since 12:00AM Jan 1 1970
Default is to use the entry time (now)
svc - Service to schedule downtime for, omit when for host downtime
fixed - Start now if 1, start when a problem is detected if 0
trigger - Optional ID of event to start downtime from. Leave as 0 for
fixed downtime.
Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
entry_time = self._now()
if start is None:
start = entry_time
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
duration_s = (duration * 60)
end = start + duration_s
if not author:
author = self.author
if not comment:
comment = self.comment
if svc is not None:
dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
str(duration_s), author, comment]
else:
# Downtime for a host if no svc specified
dt_args = [str(start), str(end), str(fixed), str(trigger),
str(duration_s), author, comment]
dt_arg_str = ";".join(dt_args)
dt_str = hdr + dt_arg_str + "\n"
return dt_str
def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
"""
Format an external-command downtime deletion string.
cmd - Nagios command ID
host - Host to remove scheduled downtime from
comment - Reason downtime was added (upgrade, reboot, etc)
start - Start of downtime in seconds since 12:00AM Jan 1 1970
svc - Service to remove downtime for, omit to remove all downtime for the host
Syntax: [submitted] COMMAND;<host_name>;
[<service_desription>];[<start_time>];[<comment>]
"""
entry_time = self._now()
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
if comment is None:
comment = self.comment
dt_del_args = []
if svc is not None:
dt_del_args.append(svc)
else:
dt_del_args.append('')
if start is not None:
dt_del_args.append(str(start))
else:
dt_del_args.append('')
if comment is not None:
dt_del_args.append(comment)
else:
dt_del_args.append('')
dt_del_arg_str = ";".join(dt_del_args)
dt_del_str = hdr + dt_del_arg_str + "\n"
return dt_del_str
def _fmt_notif_str(self, cmd, host=None, svc=None):
"""
Format an external-command notification string.
cmd - Nagios command ID.
host - Host to en/disable notifications on.. A value is not required
for global downtime
svc - Service to schedule downtime for. A value is not required
for host downtime.
Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
"""
entry_time = self._now()
notif_str = "[%s] %s" % (entry_time, cmd)
if host is not None:
notif_str += ";%s" % host
if svc is not None:
notif_str += ";%s" % svc
notif_str += "\n"
return notif_str
def schedule_svc_downtime(self, host, services=None, minutes=30):
"""
This command is used to schedule downtime for a particular
service.
During the specified downtime, Nagios will not send
notifications out about the service.
Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SVC_DOWNTIME"
if services is None:
services = []
for service in services:
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service)
self._write_command(dt_cmd_str)
def schedule_host_downtime(self, host, minutes=30):
"""
This command is used to schedule downtime for a particular
host.
During the specified downtime, Nagios will not send
notifications out about the host.
Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
def schedule_host_svc_downtime(self, host, minutes=30):
"""
This command is used to schedule downtime for
all services associated with a particular host.
During the specified downtime, Nagios will not send
notifications out about the host.
SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
def delete_host_downtime(self, host, services=None, comment=None):
"""
This command is used to remove scheduled downtime for a particular
host.
Syntax: DEL_DOWNTIME_BY_HOST_NAME;<host_name>;
[<service_desription>];[<start_time>];[<comment>]
"""
cmd = "DEL_DOWNTIME_BY_HOST_NAME"
if services is None:
dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
self._write_command(dt_del_cmd_str)
else:
for service in services:
dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
self._write_command(dt_del_cmd_str)
def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
particular hostgroup.
During the specified downtime, Nagios will not send
notifications out about the hosts.
Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
self._write_command(dt_cmd_str)
def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all services in
a particular hostgroup.
During the specified downtime, Nagios will not send
notifications out about the services.
Note that scheduling downtime for services does not
automatically schedule downtime for the hosts those services
are associated with.
Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
self._write_command(dt_cmd_str)
def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
particular servicegroup.
During the specified downtime, Nagios will not send
notifications out about the hosts.
Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
self._write_command(dt_cmd_str)
def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30):
"""
This command is used to schedule downtime for all services in
a particular servicegroup.
During the specified downtime, Nagios will not send
notifications out about the services.
Note that scheduling downtime for services does not
automatically schedule downtime for the hosts those services
are associated with.
Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
self._write_command(dt_cmd_str)
def disable_host_svc_notifications(self, host):
"""
This command is used to prevent notifications from being sent
out for all services on the specified host.
Note that this command does not disable notifications from
being sent out about the host.
Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
"""
cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def disable_host_notifications(self, host):
"""
This command is used to prevent notifications from being sent
out for the specified host.
Note that this command does not disable notifications for
services associated with this host.
Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = "DISABLE_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def disable_svc_notifications(self, host, services=None):
"""
This command is used to prevent notifications from being sent
out for the specified service.
Note that this command does not disable notifications from
being sent out about the host.
Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
"""
cmd = "DISABLE_SVC_NOTIFICATIONS"
if services is None:
services = []
for service in services:
notif_str = self._fmt_notif_str(cmd, host, svc=service)
self._write_command(notif_str)
def disable_servicegroup_host_notifications(self, servicegroup):
"""
This command is used to prevent notifications from being sent
out for all hosts in the specified servicegroup.
Note that this command does not disable notifications for
services associated with hosts in this service group.
Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
self._write_command(notif_str)
def disable_servicegroup_svc_notifications(self, servicegroup):
"""
This command is used to prevent notifications from being sent
out for all services in the specified servicegroup.
Note that this does not prevent notifications from being sent
out about the hosts in this servicegroup.
Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
self._write_command(notif_str)
def disable_hostgroup_host_notifications(self, hostgroup):
"""
Disables notifications for all hosts in a particular
hostgroup.
Note that this does not disable notifications for the services
associated with the hosts in the hostgroup - see the
DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
self._write_command(notif_str)
def disable_hostgroup_svc_notifications(self, hostgroup):
"""
Disables notifications for all services associated with hosts
in a particular hostgroup.
Note that this does not disable notifications for the hosts in
the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
command for that.
Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
self._write_command(notif_str)
def enable_host_notifications(self, host):
"""
Enables notifications for a particular host.
Note that this command does not enable notifications for
services associated with this host.
Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = "ENABLE_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def enable_host_svc_notifications(self, host):
"""
Enables notifications for all services on the specified host.
Note that this does not enable notifications for the host.
Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
"""
cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_svc_notifications(self, host, services=None):
"""
Enables notifications for a particular service.
Note that this does not enable notifications for the host.
Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
"""
cmd = "ENABLE_SVC_NOTIFICATIONS"
if services is None:
services = []
nagios_return = True
return_str_list = []
for service in services:
notif_str = self._fmt_notif_str(cmd, host, svc=service)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def enable_hostgroup_host_notifications(self, hostgroup):
"""
Enables notifications for all hosts in a particular hostgroup.
Note that this command does not enable notifications for
services associated with the hosts in this hostgroup.
Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_hostgroup_svc_notifications(self, hostgroup):
"""
Enables notifications for all services that are associated
with hosts in a particular hostgroup.
Note that this does not enable notifications for the hosts in
this hostgroup.
Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_servicegroup_host_notifications(self, servicegroup):
"""
Enables notifications for all hosts that have services that
are members of a particular servicegroup.
Note that this command does not enable notifications for
services associated with the hosts in this servicegroup.
Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_servicegroup_svc_notifications(self, servicegroup):
"""
Enables notifications for all services that are members of a
particular servicegroup.
Note that this does not enable notifications for the hosts in
this servicegroup.
Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def silence_host(self, host):
"""
This command is used to prevent notifications from being sent
out for the host and all services on the specified host.
This is equivalent to calling disable_host_svc_notifications
and disable_host_notifications.
Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = [
"DISABLE_HOST_SVC_NOTIFICATIONS",
"DISABLE_HOST_NOTIFICATIONS"
]
nagios_return = True
return_str_list = []
for c in cmd:
notif_str = self._fmt_notif_str(c, host)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def unsilence_host(self, host):
"""
This command is used to enable notifications for the host and
all services on the specified host.
This is equivalent to calling enable_host_svc_notifications
and enable_host_notifications.
Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = [
"ENABLE_HOST_SVC_NOTIFICATIONS",
"ENABLE_HOST_NOTIFICATIONS"
]
nagios_return = True
return_str_list = []
for c in cmd:
notif_str = self._fmt_notif_str(c, host)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def silence_nagios(self):
"""
This command is used to disable notifications for all hosts and services
in nagios.
This is a 'SHUT UP, NAGIOS' command
"""
cmd = 'DISABLE_NOTIFICATIONS'
self._write_command(self._fmt_notif_str(cmd))
def unsilence_nagios(self):
"""
This command is used to enable notifications for all hosts and services
in nagios.
This is a 'OK, NAGIOS, GO'' command
"""
cmd = 'ENABLE_NOTIFICATIONS'
self._write_command(self._fmt_notif_str(cmd))
def nagios_cmd(self, cmd):
"""
This sends an arbitrary command to nagios
It prepends the submitted time and appends a \n
You just have to provide the properly formatted command
"""
pre = '[%s]' % int(time.time())
post = '\n'
cmdstr = '%s %s%s' % (pre, cmd, post)
self._write_command(cmdstr)
def act(self):
"""
Figure out what you want to do from ansible, and then do the
needful (at the earliest).
"""
# host or service downtime?
if self.action == 'downtime':
if self.services == 'host':
self.schedule_host_downtime(self.host, self.minutes)
elif self.services == 'all':
self.schedule_host_svc_downtime(self.host, self.minutes)
else:
self.schedule_svc_downtime(self.host,
services=self.services,
minutes=self.minutes)
elif self.action == 'delete_downtime':
if self.services=='host':
self.delete_host_downtime(self.host)
elif self.services=='all':
self.delete_host_downtime(self.host, comment='')
else:
self.delete_host_downtime(self.host, services=self.services)
elif self.action == "servicegroup_host_downtime":
if self.servicegroup:
self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
elif self.action == "servicegroup_service_downtime":
if self.servicegroup:
self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
# toggle the host AND service alerts
elif self.action == 'silence':
self.silence_host(self.host)
elif self.action == 'unsilence':
self.unsilence_host(self.host)
# toggle host/svc alerts
elif self.action == 'enable_alerts':
if self.services == 'host':
self.enable_host_notifications(self.host)
elif self.services == 'all':
self.enable_host_svc_notifications(self.host)
else:
self.enable_svc_notifications(self.host,
services=self.services)
elif self.action == 'disable_alerts':
if self.services == 'host':
self.disable_host_notifications(self.host)
elif self.services == 'all':
self.disable_host_svc_notifications(self.host)
else:
self.disable_svc_notifications(self.host,
services=self.services)
elif self.action == 'silence_nagios':
self.silence_nagios()
elif self.action == 'unsilence_nagios':
self.unsilence_nagios()
elif self.action == 'command':
self.nagios_cmd(self.command)
# wtf?
else:
self.module.fail_json(msg="unknown action specified: '%s'" % \
self.action)
self.module.exit_json(nagios_commands=self.command_results,
changed=True)
######################################################################
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
---
title: "Preview Mode for Static Generation"
excerpt: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Praesent elementum facilisis leo vel fringilla est ullamcorper eget. At imperdiet dui accumsan sit amet nulla facilities morbi tempus."
coverImage: "/assets/blog/preview/cover.jpg"
date: "2020-03-16T05:35:07.322Z"
author:
name: Joe Haddad
picture: "/assets/blog/authors/joe.jpeg"
ogImage:
url: "/assets/blog/preview/cover.jpg"
---
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Praesent elementum facilisis leo vel fringilla est ullamcorper eget. At imperdiet dui accumsan sit amet nulla facilities morbi tempus. Praesent elementum facilisis leo vel fringilla. Congue mauris rhoncus aenean vel. Egestas sed tempus urna et pharetra pharetra massa massa ultricies.
Venenatis cras sed felis eget velit. Consectetur libero id faucibus nisl tincidunt. Gravida in fermentum et sollicitudin ac orci phasellus egestas tellus. Volutpat consequat mauris nunc congue nisi vitae. Id aliquet risus feugiat in ante metus dictum at tempor. Sed blandit libero volutpat sed cras. Sed odio morbi quis commodo odio aenean sed adipiscing. Velit euismod in pellentesque massa placerat. Mi bibendum neque egestas congue quisque egestas diam in arcu. Nisi lacus sed viverra tellus in. Nibh cras pulvinar mattis nunc sed. Luctus accumsan tortor posuere ac ut consequat semper viverra. Fringilla ut morbi tincidunt augue interdum velit euismod.
## Lorem Ipsum
Tristique senectus et netus et malesuada fames ac turpis. Ridiculous mus mauris vitae ultricies leo integer malesuada nunc vel. In mollis nunc sed id semper. Egestas tellus rutrum tellus pellentesque. Phasellus vestibulum lorem sed risus ultricies tristique nulla. Quis blandit turpis cursus in hac habitasse platea dictumst quisque. Eros donec ac odio tempor orci dapibus ultrices. Aliquam sem et tortor consequat id porta nibh. Adipiscing elit duis tristique sollicitudin nibh sit amet commodo nulla. Diam vulputate ut pharetra sit amet. Ut tellus elementum sagittis vitae et leo. Arcu non odio euismod lacinia at quis risus sed vulputate. | unknown | github | https://github.com/vercel/next.js | examples/blog-starter/_posts/preview.md |
#!/usr/bin/env python
##Copyright 2008-2017 Thomas Paviot (tpaviot@gmail.com)
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>.
import itertools
import math
import os
import sys
import time
import OCC
from OCC.Core.Aspect import Aspect_GFM_VER
from OCC.Core.AIS import AIS_Shape, AIS_Shaded, AIS_TexturedShape, AIS_WireFrame, AIS_Shape_SelectionMode
from OCC.Core.gp import gp_Dir, gp_Pnt, gp_Pnt2d, gp_Vec
from OCC.Core.BRepBuilderAPI import (BRepBuilderAPI_MakeVertex,
BRepBuilderAPI_MakeEdge,
BRepBuilderAPI_MakeEdge2d,
BRepBuilderAPI_MakeFace)
from OCC.Core.TopAbs import (TopAbs_FACE, TopAbs_EDGE, TopAbs_VERTEX,
TopAbs_SHELL, TopAbs_SOLID)
from OCC.Core.Geom import Geom_Curve, Geom_Surface
from OCC.Core.Geom2d import Geom2d_Curve
from OCC.Core.Visualization import Display3d
from OCC.Core.V3d import (V3d_ZBUFFER, V3d_Zpos, V3d_Zneg, V3d_Xpos,
V3d_Xneg, V3d_Ypos, V3d_Yneg, V3d_XposYnegZpos)
from OCC.Core.TCollection import TCollection_ExtendedString, TCollection_AsciiString
from OCC.Core.Quantity import (Quantity_Color, Quantity_TOC_RGB, Quantity_NOC_WHITE,
Quantity_NOC_BLACK, Quantity_NOC_BLUE1,
Quantity_NOC_CYAN1, Quantity_NOC_RED,
Quantity_NOC_GREEN, Quantity_NOC_ORANGE, Quantity_NOC_YELLOW)
from OCC.Core.Prs3d import Prs3d_Arrow, Prs3d_Text, Prs3d_TextAspect
from OCC.Core.Graphic3d import (Graphic3d_NOM_NEON_GNC, Graphic3d_NOT_ENV_CLOUDS,
Handle_Graphic3d_TextureEnv_Create, Graphic3d_TextureEnv,
Graphic3d_Camera, Graphic3d_RM_RAYTRACING,
Graphic3d_RM_RASTERIZATION,
Graphic3d_StereoMode_QuadBuffer,
Graphic3d_RenderingParams,
Graphic3d_MaterialAspect,
Graphic3d_TOSM_FRAGMENT,
Graphic3d_Structure,
Graphic3d_GraduatedTrihedron,
Graphic3d_NameOfMaterial)
from OCC.Core.Aspect import (Aspect_TOTP_RIGHT_LOWER, Aspect_FM_STRETCH,
Aspect_FM_NONE)
# Shaders and Units definition must be found by occ
# the fastest way to get done is to set the CASROOT env variable
# it must point to the /share folder.
if sys.platform == "win32":
# do the same for Units
if "CASROOT" in os.environ:
casroot_path = os.environ["CASROOT"]
# raise an error, force the user to correctly set the variable
err_msg = "Please set the CASROOT env variable (%s is not ok)" % casroot_path
if not os.path.isdir(casroot_path):
raise AssertionError(err_msg)
else: # on miniconda or anaconda or whatever conda
occ_package_path = os.path.dirname(OCC.__file__)
casroot_path = os.path.join(occ_package_path, '..', '..', '..',
'Library', 'share', 'oce')
# we check that all required files are at the right place
shaders_dict_found = os.path.isdir(os.path.join(casroot_path,
'src', 'Shaders'))
unitlexicon_found = os.path.isfile(os.path.join(casroot_path,
'src', 'UnitsAPI',
'Lexi_Expr.dat'))
unitsdefinition_found = os.path.isfile(os.path.join(casroot_path,
'src', 'UnitsAPI',
'Units.dat'))
if shaders_dict_found and unitlexicon_found and unitsdefinition_found:
os.environ["CASROOT"] = casroot_path
def rgb_color(r, g, b):
return Quantity_Color(r, g, b, Quantity_TOC_RGB)
def get_color_from_name(color_name):
''' from the string 'WHITE', returns Quantity_Color
WHITE.
color_name is the color name, case insensitive.
'''
enum_name = 'Quantity_NOC_%s' % color_name.upper()
if enum_name in globals():
color_num = globals()[enum_name]
elif enum_name+'1' in globals():
color_num = globals()[enum_name+'1']
print('Many colors for color name %s, using first.' % color_name)
else:
color_num = Quantity_NOC_WHITE
print('Color name not defined. Use White by default')
return Quantity_Color(color_num)
def to_string(_string):
return TCollection_ExtendedString(_string)
# some thing we'll need later
modes = itertools.cycle([TopAbs_FACE, TopAbs_EDGE,
TopAbs_VERTEX,
TopAbs_SHELL, TopAbs_SOLID])
class Viewer3d(Display3d):
def __init__(self):
Display3d.__init__(self)
self._parent = None # the parent opengl GUI container
self._inited = False
self._local_context_opened = False
self.Context = self.GetContext()
self.Viewer = self.GetViewer()
self.View = self.GetView()
self.default_drawer = None
self._struc_mgr = None
self._is_offscreen = None
self.selected_shapes = []
self._select_callbacks = []
self._overlay_items = []
def get_parent(self):
return self._parent
def register_overlay_item(self, overlay_item):
self._overlay_items.append(overlay_item)
self.View.MustBeResized()
self.View.Redraw()
def register_select_callback(self, callback):
""" Adds a callback that will be called each time a shape s selected
"""
if not callable(callback):
raise AssertionError("You must provide a callable to register the callback")
self._select_callbacks.append(callback)
def unregister_callback(self, callback):
""" Remove a callback from the callback list
"""
if not callback in self._select_callbacks:
raise AssertionError("This callback is not registered")
self._select_callbacks.remove(callback)
def MoveTo(self, X, Y):
self.Context.MoveTo(X, Y, self.View, True)
def FitAll(self):
self.View.ZFitAll()
self.View.FitAll()
def Create(self, window_handle=None, parent=None, create_default_lights=True,
draw_face_boundaries=True, phong_shading=True, display_glinfo=True):
self._window_handle = window_handle
self._parent = parent
if self._window_handle is None:
self.InitOffscreen(640, 480)
self._is_offscreen = True
else:
self.Init(self._window_handle)
self._is_offscreen = False
# display OpenGl Information
if display_glinfo:
self.GlInfo()
if create_default_lights:
self.Viewer.SetDefaultLights()
self.Viewer.SetLightOn()
self.camera = self.View.Camera()
self.default_drawer = self.Context.DefaultDrawer()
# draw black contour edges, like other famous CAD packages
if draw_face_boundaries:
self.default_drawer.SetFaceBoundaryDraw(True)
# turn up tesselation defaults, which are too conversative...
chord_dev = self.default_drawer.MaximalChordialDeviation() / 10.
self.default_drawer.SetMaximalChordialDeviation(chord_dev)
if phong_shading:
# gouraud shading by default, prefer phong instead
self.View.SetShadingModel(Graphic3d_TOSM_FRAGMENT)
# necessary for text rendering
self._struc_mgr = self.Context.MainPrsMgr().StructureManager()
# turn self._inited flag to True
self._inited = True
def OnResize(self):
self.View.MustBeResized()
def ResetView(self):
self.View.Reset()
def Repaint(self):
self.Viewer.Redraw()
def SetModeWireFrame(self):
self.View.SetComputedMode(False)
self.Context.SetDisplayMode(AIS_WireFrame, True)
def SetModeShaded(self):
self.View.SetComputedMode(False)
self.Context.SetDisplayMode(AIS_Shaded, True)
def SetModeHLR(self):
self.View.SetComputedMode(True)
def SetOrthographicProjection(self):
self.camera.SetProjectionType(Graphic3d_Camera.Projection_Orthographic)
def SetPerspectiveProjection(self):
self.camera.SetProjectionType(Graphic3d_Camera.Projection_Perspective)
def View_Top(self):
self.View.SetProj(V3d_Zpos)
def View_Bottom(self):
self.View.SetProj(V3d_Zneg)
def View_Left(self):
self.View.SetProj(V3d_Xneg)
def View_Right(self):
self.View.SetProj(V3d_Xpos)
def View_Front(self):
self.View.SetProj(V3d_Yneg)
def View_Rear(self):
self.View.SetProj(V3d_Ypos)
def View_Iso(self):
self.View.SetProj(V3d_XposYnegZpos)
def EnableTextureEnv(self, name_of_texture=Graphic3d_NOT_ENV_CLOUDS):
""" enable environment mapping. Possible modes are
Graphic3d_NOT_ENV_CLOUDS
Graphic3d_NOT_ENV_CV
Graphic3d_NOT_ENV_MEDIT
Graphic3d_NOT_ENV_PEARL
Graphic3d_NOT_ENV_SKY1
Graphic3d_NOT_ENV_SKY2
Graphic3d_NOT_ENV_LINES
Graphic3d_NOT_ENV_ROAD
Graphic3d_NOT_ENV_UNKNOWN
"""
texture_env = Graphic3d_TextureEnv(name_of_texture)
self.View.SetTextureEnv(texture_env)
self.View.Redraw()
def DisableTextureEnv(self):
a_null_texture = Handle_Graphic3d_TextureEnv_Create()
self.View.SetTextureEnv(a_null_texture) # Passing null handle to clear the texture data
self.View.Redraw()
def SetRenderingParams(self,
Method=Graphic3d_RM_RASTERIZATION,
RaytracingDepth=3,
IsShadowEnabled=True,
IsReflectionEnabled=False,
IsAntialiasingEnabled=False,
IsTransparentShadowEnabled=False,
StereoMode=Graphic3d_StereoMode_QuadBuffer,
AnaglyphFilter=Graphic3d_RenderingParams.Anaglyph_RedCyan_Optimized,
ToReverseStereo=False):
""" Default values are :
Method=Graphic3d_RM_RASTERIZATION,
RaytracingDepth=3,
IsShadowEnabled=True,
IsReflectionEnabled=False,
IsAntialiasingEnabled=False,
IsTransparentShadowEnabled=False,
StereoMode=Graphic3d_StereoMode_QuadBuffer,
AnaglyphFilter=Graphic3d_RenderingParams.Anaglyph_RedCyan_Optimized,
ToReverseStereo=False)
"""
self.ChangeRenderingParams(Method,
RaytracingDepth,
IsShadowEnabled,
IsReflectionEnabled,
IsAntialiasingEnabled,
IsTransparentShadowEnabled,
StereoMode,
AnaglyphFilter,
ToReverseStereo)
def SetRasterizationMode(self):
""" to enable rasterization mode, just call the SetRenderingParams
with default values
"""
self.SetRenderingParams()
def SetRaytracingMode(self, depth=3):
""" enables the raytracing mode
"""
self.SetRenderingParams(Method=Graphic3d_RM_RAYTRACING,
RaytracingDepth=depth,
IsAntialiasingEnabled=True,
IsShadowEnabled=True,
IsReflectionEnabled=True,
IsTransparentShadowEnabled=True)
def ExportToImage(self, image_filename):
self.View.Dump(image_filename)
def display_graduated_trihedron(self):
a_trihedron_data = Graphic3d_GraduatedTrihedron()
self.View.GraduatedTrihedronDisplay(a_trihedron_data)
def display_triedron(self):
""" Show a black triedron in lower right corner
"""
self.View.TriedronDisplay(Aspect_TOTP_RIGHT_LOWER, Quantity_Color(Quantity_NOC_BLACK), 0.1, V3d_ZBUFFER)
def hide_triedron(self):
""" Show a black triedron in lower right corner
"""
self.View.TriedronErase()
def set_bg_gradient_color(self, color1, color2, fill_method=Aspect_GFM_VER):
""" set a bg vertical gradient color.
color1 is [R1, G1, B1], each being bytes or an instance of Quantity_Color
color2 is [R2, G2, B2], each being bytes or an instance of Quantity_Color
fill_method is one of Aspect_GFM_VER value Aspect_GFM_NONE, Aspect_GFM_HOR,
Aspect_GFM_VER, Aspect_GFM_DIAG1, Aspect_GFM_DIAG2, Aspect_GFM_CORNER1, Aspect_GFM_CORNER2,
Aspect_GFM_CORNER3, Aspect_GFM_CORNER4
"""
if isinstance(color1, list) and isinstance(color2, list):
R1, G1, B1 = color1
R2, G2, B2 = color2
color1 = rgb_color(float(R1)/255., float(G1)/255., float(B1)/255.)
color2 = rgb_color(float(R2)/255., float(G2)/255., float(B2)/255.)
elif not isinstance(color1, Quantity_Color) and isinstance(color2, Quantity_Color):
raise AssertionError("color1 and color2 mmust be either [R, G, B] lists or a Quantity_Color")
self.View.SetBgGradientColors(color1, color2, fill_method, True)
def SetBackgroundImage(self, image_filename, stretch=True):
""" displays a background image (jpg, png etc.)
"""
if not os.path.isfile(image_filename):
raise IOError("image file %s not found." % image_filename)
if stretch:
self.View.SetBackgroundImage(image_filename, Aspect_FM_STRETCH, True)
else:
self.View.SetBackgroundImage(image_filename, Aspect_FM_NONE, True)
def DisplayVector(self, vec, pnt, update=False):
""" displays a vector as an arrow
"""
if self._inited:
aStructure = Graphic3d_Structure(self._struc_mgr)
pnt_as_vec = gp_Vec(pnt.X(), pnt.Y(), pnt.Z())
start = pnt_as_vec + vec
pnt_start = gp_Pnt(start.X(), start.Y(), start.Z())
Prs3d_Arrow.Draw(
aStructure,
pnt_start,
gp_Dir(vec),
math.radians(20),
vec.Magnitude()
)
aStructure.Display()
# it would be more coherent if a AIS_InteractiveObject
# would be returned
if update:
self.Repaint()
return aStructure
def DisplayMessage(self, point, text_to_write, height=None, message_color=None, update=False):
"""
:point: a gp_Pnt or gp_Pnt2d instance
:text_to_write: a string
:message_color: triple with the range 0-1
"""
aStructure = Graphic3d_Structure(self._struc_mgr)
text_aspect = Prs3d_TextAspect()
if message_color is not None:
text_aspect.SetColor(rgb_color(*message_color))
if height is not None:
text_aspect.SetHeight(height)
if isinstance(point, gp_Pnt2d):
point = gp_Pnt(point.X(), point.Y(), 0)
Prs3d_Text.Draw(aStructure,
text_aspect,
to_string(text_to_write),
point)
aStructure.Display()
# @TODO: it would be more coherent if a AIS_InteractiveObject
# is be returned
if update:
self.Repaint()
return aStructure
def DisplayShape(self, shapes, material=None, texture=None, color=None, transparency=None, update=False):
""" display one or a set of displayable objects
"""
ais_shapes = [] # the list of all displayed shapes
if issubclass(shapes.__class__, gp_Pnt):
# if a gp_Pnt is passed, first convert to vertex
vertex = BRepBuilderAPI_MakeVertex(shapes)
shapes = [vertex.Shape()]
elif isinstance(shapes, gp_Pnt2d):
vertex = BRepBuilderAPI_MakeVertex(gp_Pnt(shapes.X(), shapes.Y(), 0))
shapes = [vertex.Shape()]
elif isinstance(shapes, Geom_Surface):
bounds = True
toldegen = 1e-6
face = BRepBuilderAPI_MakeFace()
face.Init(shapes, bounds, toldegen)
face.Build()
shapes = [face.Shape()]
elif isinstance(shapes, Geom_Curve):
edge = BRepBuilderAPI_MakeEdge(shapes)
shapes = [edge.Shape()]
elif isinstance(shapes, Geom2d_Curve):
edge2d = BRepBuilderAPI_MakeEdge2d(shapes)
shapes = [edge2d.Shape()]
# if only one shapes, create a list with a single shape
if not isinstance(shapes, list):
shapes = [shapes]
# build AIS_Shapes list
for shape in shapes:
if material or texture:
if texture:
shape_to_display = AIS_TexturedShape(shape)
filename, toScaleU, toScaleV, toRepeatU, toRepeatV, originU, originV = texture.GetProperties()
shape_to_display.SetTextureFileName(TCollection_AsciiString(filename))
shape_to_display.SetTextureMapOn()
shape_to_display.SetTextureScale(True, toScaleU, toScaleV)
shape_to_display.SetTextureRepeat(True, toRepeatU, toRepeatV)
shape_to_display.SetTextureOrigin(True, originU, originV)
shape_to_display.SetDisplayMode(3)
elif material:
shape_to_display = AIS_Shape(shape)
if isinstance(material, Graphic3d_NameOfMaterial):
shape_to_display.SetMaterial(Graphic3d_MaterialAspect(material))
else:
shape_to_display.SetMaterial(material)
else:
# TODO: can we use .Set to attach all TopoDS_Shapes
# to this AIS_Shape instance?
shape_to_display = AIS_Shape(shape)
ais_shapes.append(shape_to_display)
# if not SOLO:
# # computing graphic properties is expensive
# # if an iterable is found, so cluster all TopoDS_Shape under
# # an AIS_MultipleConnectedInteractive
# #shape_to_display = AIS_MultipleConnectedInteractive()
# for ais_shp in ais_shapes:
# # TODO : following line crashes with oce-0.18
# # why ? fix ?
# #shape_to_display.Connect(i)
# self.Context.Display(ais_shp, False)
# set the graphic properties
if material is None:
#The default material is too shiny to show the object
#color well, so I set it to something less reflective
for shape_to_display in ais_shapes:
shape_to_display.SetMaterial(Graphic3d_MaterialAspect(Graphic3d_NOM_NEON_GNC))
if color:
if isinstance(color, str):
color = get_color_from_name(color)
elif isinstance(color, int):
color = Quantity_Color(color)
for shp in ais_shapes:
self.Context.SetColor(shp, color, False)
if transparency:
for shape_to_display in ais_shapes:
shape_to_display.SetTransparency(transparency)
# display the shapes
for shape_to_display in ais_shapes:
self.Context.Display(shape_to_display, False)
if update:
# especially this call takes up a lot of time...
self.FitAll()
self.Repaint()
return ais_shapes
def DisplayColoredShape(self, shapes, color='YELLOW', update=False, ):
if isinstance(color, str):
dict_color = {'WHITE': Quantity_NOC_WHITE,
'BLUE': Quantity_NOC_BLUE1,
'RED': Quantity_NOC_RED,
'GREEN': Quantity_NOC_GREEN,
'YELLOW': Quantity_NOC_YELLOW,
'CYAN': Quantity_NOC_CYAN1,
'BLACK': Quantity_NOC_BLACK,
'ORANGE': Quantity_NOC_ORANGE}
clr = dict_color[color]
elif isinstance(color, Quantity_Color):
clr = color
else:
raise ValueError('color should either be a string ( "BLUE" ) or a Quantity_Color(0.1, 0.8, 0.1) got %s' % color)
return self.DisplayShape(shapes, color=clr, update=update)
def EnableAntiAliasing(self):
self.SetNbMsaaSample(4)
def DisableAntiAliasing(self):
self.SetNbMsaaSample(0)
def EraseAll(self):
self.Context.EraseAll(True)
def Tumble(self, num_images, animation=True):
self.View.Tumble(num_images, animation)
def Pan(self, dx, dy):
self.View.Pan(dx, dy)
def SetSelectionMode(self, mode=None):
topo_level = next(modes)
if mode is None:
self.Context.Activate(AIS_Shape_SelectionMode(topo_level), True)
else:
self.Context.Activate(AIS_Shape_SelectionMode(mode), True)
self.Context.UpdateSelected(True)
def SetSelectionModeVertex(self):
self.SetSelectionMode(TopAbs_VERTEX)
def SetSelectionModeEdge(self):
self.SetSelectionMode(TopAbs_EDGE)
def SetSelectionModeFace(self):
self.SetSelectionMode(TopAbs_FACE)
def SetSelectionModeShape(self):
self.Context.Deactivate()
def SetSelectionModeNeutral(self):
self.Context.Deactivate()
def GetSelectedShapes(self):
return self.selected_shapes
def GetSelectedShape(self):
"""
Returns the current selected shape
"""
return self.selected_shape
def SelectArea(self, Xmin, Ymin, Xmax, Ymax):
self.Context.Select(Xmin, Ymin, Xmax, Ymax, self.View, True)
self.Context.InitSelected()
# reinit the selected_shapes list
self.selected_shapes = []
while self.Context.MoreSelected():
if self.Context.HasSelectedShape():
self.selected_shapes.append(self.Context.SelectedShape())
self.Context.NextSelected()
# callbacks
for callback in self._select_callbacks:
callback(self.selected_shapes, Xmin, Ymin, Xmax, Ymax)
def Select(self, X, Y):
self.Context.Select(True)
self.Context.InitSelected()
self.selected_shapes = []
if self.Context.MoreSelected():
if self.Context.HasSelectedShape():
self.selected_shapes.append(self.Context.SelectedShape())
# callbacks
for callback in self._select_callbacks:
callback(self.selected_shapes, X, Y)
def ShiftSelect(self, X, Y):
self.Context.ShiftSelect(True)
self.Context.InitSelected()
self.selected_shapes = []
while self.Context.MoreSelected():
if self.Context.HasSelectedShape():
self.selected_shapes.append(self.Context.SelectedShape())
self.Context.NextSelected()
# hilight newly selected unhighlight those no longer selected
self.Context.UpdateSelected(True)
# callbacks
for callback in self._select_callbacks:
callback(self.selected_shapes, X, Y)
def Rotation(self, X, Y):
self.View.Rotation(X, Y)
def DynamicZoom(self, X1, Y1, X2, Y2):
self.View.Zoom(X1, Y1, X2, Y2)
def ZoomFactor(self, zoom_factor):
self.View.SetZoom(zoom_factor)
def ZoomArea(self, X1, Y1, X2, Y2):
self.View.WindowFit(X1, Y1, X2, Y2)
def Zoom(self, X, Y):
self.View.Zoom(X, Y)
def StartRotation(self, X, Y):
self.View.StartRotation(X, Y)
class OffscreenRenderer(Viewer3d):
""" The offscreen renderer is inherited from Viewer3d.
The DisplayShape method is overriden to export to image
each time it is called.
"""
def __init__(self, screen_size=(640, 480)):
Viewer3d.__init__(self)
# create the renderer
self.Create()
self.SetSize(screen_size[0], screen_size[1])
self.SetModeShaded()
self.set_bg_gradient_color([206, 215, 222], [128, 128, 128])
self.display_triedron()
self.capture_number = 0
def DisplayShape(self, shapes, material=None, texture=None, color=None, transparency=None, update=True):
# call the "original" DisplayShape method
r = super(OffscreenRenderer, self).DisplayShape(shapes, material, texture,
color, transparency, update) # always update
if os.getenv("PYTHONOCC_OFFSCREEN_RENDERER_DUMP_IMAGE") == "1": # dump to jpeg file
timestamp = ("%f" % time.time()).split(".")[0]
self.capture_number += 1
image_filename = "capture-%i-%s.jpeg" % (self.capture_number,
timestamp.replace(" ", "-"))
if os.getenv("PYTHONOCC_OFFSCREEN_RENDERER_DUMP_IMAGE_PATH"):
path = os.getenv("PYTHONOCC_OFFSCREEN_RENDERER_DUMP_IMAGE_PATH")
if not os.path.isdir(path):
raise IOError("%s is not a valid path" % path)
else:
path = os.getcwd()
image_full_name = os.path.join(path, image_filename)
self.View.Dump(image_full_name)
if not os.path.isfile(image_full_name):
raise IOError("OffscreenRenderer failed to render image to file")
print("OffscreenRenderer content dumped to %s" % image_full_name)
return r | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# Author: Marvin Pinto <me@marvinp.ca>
# Author: Dennis Lutter <lad1337@gmail.com>
# Author: Aaron Bieber <deftly@gmail.com>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import urllib2
import sickbeard
from sickbeard import logger
from sickbeard.common import notifyStrings, NOTIFY_SNATCH, NOTIFY_DOWNLOAD, NOTIFY_SUBTITLE_DOWNLOAD, NOTIFY_GIT_UPDATE, NOTIFY_GIT_UPDATE_TEXT
class FreeMobileNotifier(object):
def test_notify(self, cust_id=None, apiKey=None):
return self._notifyFreeMobile('Test', "This is a test notification from SickRage", cust_id, apiKey, force=True)
def _sendFreeMobileSMS(self, title, msg, cust_id=None, apiKey=None):
"""
Sends a SMS notification
msg: The message to send (unicode)
title: The title of the message
userKey: The pushover user id to send the message to (or to subscribe with)
returns: True if the message succeeded, False otherwise
"""
if cust_id is None:
cust_id = sickbeard.FREEMOBILE_ID
if apiKey is None:
apiKey = sickbeard.FREEMOBILE_APIKEY
logger.log(u"Free Mobile in use with API KEY: " + apiKey, logger.DEBUG)
# build up the URL and parameters
msg = msg.strip()
msg_quoted = urllib2.quote(title.encode('utf-8') + ": " + msg.encode('utf-8'))
URL = "https://smsapi.free-mobile.fr/sendmsg?user=" + cust_id + "&pass=" + apiKey + "&msg=" + msg_quoted
req = urllib2.Request(URL)
# send the request to Free Mobile
try:
urllib2.urlopen(req)
except IOError as e:
if hasattr(e, 'code'):
if e.code == 400:
message = "Missing parameter(s)."
logger.log(message, logger.ERROR)
return False, message
if e.code == 402:
message = "Too much SMS sent in a short time."
logger.log(message, logger.ERROR)
return False, message
if e.code == 403:
message = "API service isn't enabled in your account or ID / API key is incorrect."
logger.log(message, logger.ERROR)
return False, message
if e.code == 500:
message = "Server error. Please retry in few moment."
logger.log(message, logger.ERROR)
return False, message
except Exception as e:
message = u"Error while sending SMS: {0}".format(e)
logger.log(message, logger.ERROR)
return False, message
message = "Free Mobile SMS successful."
logger.log(message, logger.INFO)
return True, message
def notify_snatch(self, ep_name, title=notifyStrings[NOTIFY_SNATCH]):
if sickbeard.FREEMOBILE_NOTIFY_ONSNATCH:
self._notifyFreeMobile(title, ep_name)
def notify_download(self, ep_name, title=notifyStrings[NOTIFY_DOWNLOAD]):
if sickbeard.FREEMOBILE_NOTIFY_ONDOWNLOAD:
self._notifyFreeMobile(title, ep_name)
def notify_subtitle_download(self, ep_name, lang, title=notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD]):
if sickbeard.FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notifyFreeMobile(title, ep_name + ": " + lang)
def notify_git_update(self, new_version="??"):
if sickbeard.USE_FREEMOBILE:
update_text = notifyStrings[NOTIFY_GIT_UPDATE_TEXT]
title = notifyStrings[NOTIFY_GIT_UPDATE]
self._notifyFreeMobile(title, update_text + new_version)
def notify_login(self, ipaddress=""):
if sickbeard.USE_FREEMOBILE:
update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
title = common.notifyStrings[common.NOTIFY_LOGIN]
self._notifyFreeMobile(title, update_text.format(ipaddress))
def _notifyFreeMobile(self, title, message, cust_id=None, apiKey=None, force=False):
"""
Sends a SMS notification
title: The title of the notification to send
message: The message string to send
cust_id: Your Free Mobile customer ID
apikey: Your Free Mobile API key
force: Enforce sending, for instance for testing
"""
if not sickbeard.USE_FREEMOBILE and not force:
logger.log(u"Notification for Free Mobile not enabled, skipping this notification", logger.DEBUG)
return False, "Disabled"
logger.log(u"Sending a SMS for " + message, logger.DEBUG)
return self._sendFreeMobileSMS(title, message, cust_id, apiKey)
notifier = FreeMobileNotifier | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from peacock.utils import ExeLauncher
import json
import mooseutils
from PyQt5.QtWidgets import QApplication
class JsonData(object):
"""
Class that holds the json produced by an executable.
"""
def __init__(self, app_path="", extra_args=[], **kwds):
"""
Constructor.
Input:
app_path: Path to the executable.
"""
super(JsonData, self).__init__(**kwds)
self.json_data = None
self.app_path = None
self.extra_args = extra_args
if app_path:
self.appChanged(app_path)
def _processEvents(self):
"""
If we are in a QApplication, process events so
the GUI stays responsive.
"""
qapp = QApplication.instance()
if qapp:
qapp.processEvents()
def appChanged(self, app_path):
"""
Called when the executable changed.
Input:
app_path: New executable path
"""
try:
self._processEvents()
raw_data = self._getRawDump(app_path)
self._processEvents()
self.json_data = json.loads(raw_data)
self._processEvents()
self.app_path = app_path
except Exception as e:
mooseutils.mooseWarning("Failed to load json from '%s': %s" % (app_path, e))
def _getRawDump(self, app_path):
"""
Generate the raw data from the executable.
Return:
the data
"""
# "-options_left 0" is used to stop the debug version of PETSc from printing
# out WARNING messages that sometime confuse the json parser
data = ExeLauncher.runExe(app_path, ["-options_left", "0", "--json"] + self.extra_args)
data = data.split('**START JSON DATA**\n')[1]
data = data.split('**END JSON DATA**')[0]
return data
def toPickle(self):
"""
Return a dict that can be pickled
"""
return {"app_path": self.app_path,
"json_data": self.json_data,
}
def fromPickle(self, data):
"""
Read in from a dict that was once pickled.
Input:
data[dict]: dict that was generated from toPickle()
"""
self.app_path = data["app_path"]
self.json_data = data["json_data"]
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print("Usage: %s <exe path>" % sys.argv[0])
sys.exit(1)
j = JsonData(sys.argv[1])
print(j.json_data.keys()) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: lineinfile
short_description: Manage lines in text files
description:
- This module ensures a particular line is in a file, or replace an
existing line using a back-referenced regular expression.
- This is primarily useful when you want to change a single line in a file only.
- See the M(ansible.builtin.replace) module if you want to change multiple, similar lines
or check M(ansible.builtin.blockinfile) if you want to insert/update/remove a block of lines in a file.
For other cases, see the M(ansible.builtin.copy) or M(ansible.builtin.template) modules.
version_added: "0.7"
options:
path:
description:
- The file to modify.
- Before Ansible 2.3 this option was only usable as O(dest), O(destfile) and O(name).
type: path
required: true
aliases: [ dest, destfile, name ]
regexp:
description:
- The regular expression to look for in every line of the file.
- For O(state=present), the pattern to replace if found. Only the last line found will be replaced.
- For O(state=absent), the pattern of the line(s) to remove.
- If the regular expression is not matched, the line will be
added to the file in keeping with O(insertbefore) or O(insertafter)
settings.
- When modifying a line the regexp should typically match both the initial state of
the line as well as its state after replacement by O(line) to ensure idempotence.
- Uses Python regular expressions. See U(https://docs.python.org/3/library/re.html).
type: str
aliases: [ regex ]
version_added: '1.7'
search_string:
description:
- The literal string to look for in every line of the file. This does not have to match the entire line.
- For O(state=present), the line to replace if the string is found in the file. Only the last line found will be replaced.
- For O(state=absent), the line(s) to remove if the string is in the line.
- If the literal expression is not matched, the line will be
added to the file in keeping with O(insertbefore) or O(insertafter)
settings.
- Mutually exclusive with O(backrefs) and O(regexp).
type: str
version_added: '2.11'
state:
description:
- Whether the line should be there or not.
type: str
choices: [ absent, present ]
default: present
line:
description:
- The line to insert/replace into the file.
- Required for O(state=present).
- If O(backrefs) is set, may contain backreferences that will get
expanded with the O(regexp) capture groups if the regexp matches.
type: str
aliases: [ value ]
backrefs:
description:
- Used with O(state=present).
- If set, O(line) can contain backreferences (both positional and named)
that will get populated if the O(regexp) matches.
- This parameter changes the operation of the module slightly;
O(insertbefore) and O(insertafter) will be ignored, and if the O(regexp)
does not match anywhere in the file, the file will be left unchanged.
- If the O(regexp) does match, the last matching line will be replaced by
the expanded line parameter.
- Mutually exclusive with O(search_string).
type: bool
default: no
version_added: "1.1"
insertafter:
description:
- Used with O(state=present).
- If specified, the line will be inserted after the last match of specified regular expression.
- If the first match is required, use(firstmatch=yes).
- A special value is available; V(EOF) for inserting the line at the end of the file.
- If specified regular expression has no matches or no value is passed, V(EOF) will be used instead.
- If O(insertbefore) is set, default value V(EOF) will be ignored.
- If regular expressions are passed to both O(regexp) and O(insertafter), O(insertafter) is only honored if no match for O(regexp) is found.
- May not be used with O(backrefs) or O(insertbefore).
type: str
insertbefore:
description:
- Used with O(state=present).
- If specified, the line will be inserted before the last match of specified regular expression.
- If the first match is required, use O(firstmatch=yes).
- A value is available; V(BOF) for inserting the line at the beginning of the file.
- If specified regular expression has no matches, the line will be inserted at the end of the file.
- If regular expressions are passed to both O(regexp) and O(insertbefore), O(insertbefore) is only honored if no match for O(regexp) is found.
- May not be used with O(backrefs) or O(insertafter).
type: str
version_added: "1.1"
create:
description:
- Used with O(state=present).
- If specified, the file will be created if it does not already exist.
- By default it will fail if the file is missing.
type: bool
default: no
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
firstmatch:
description:
- Used with O(insertafter) or O(insertbefore).
- If set, O(insertafter) and O(insertbefore) will work with the first line that matches the given regular expression.
type: bool
default: no
version_added: "2.5"
encoding:
description:
- The character set in which the target file is encoded.
- For a list of available built-in encodings, see U(https://docs.python.org/3/library/codecs.html#standard-encodings)
type: str
default: utf-8
version_added: "2.20"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.files
- files
- validate
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: posix
safe_file_operations:
support: full
vault:
support: none
notes:
- As of Ansible 2.3, the O(dest) option has been changed to O(path) as default, but O(dest) still works as well.
seealso:
- module: ansible.builtin.blockinfile
- module: ansible.builtin.copy
- module: ansible.builtin.file
- module: ansible.builtin.replace
- module: ansible.builtin.template
- module: community.windows.win_lineinfile
author:
- Daniel Hokka Zakrissoni (@dhozac)
- Ahti Kitsik (@ahtik)
- Jose Angel Munoz (@imjoseangel)
"""
EXAMPLES = r"""
# NOTE: Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
- name: Ensure SELinux is set to enforcing mode
ansible.builtin.lineinfile:
path: /etc/selinux/config
regexp: '^SELINUX='
line: SELINUX=enforcing
- name: Make sure group wheel is not in the sudoers configuration
ansible.builtin.lineinfile:
path: /etc/sudoers
state: absent
regexp: '^%wheel'
- name: Replace a localhost entry with our own
ansible.builtin.lineinfile:
path: /etc/hosts
regexp: '^127\.0\.0\.1'
line: 127.0.0.1 localhost
owner: root
group: root
mode: '0644'
- name: Replace a localhost entry searching for a literal string to avoid escaping
ansible.builtin.lineinfile:
path: /etc/hosts
search_string: '127.0.0.1'
line: 127.0.0.1 localhost
owner: root
group: root
mode: '0644'
- name: Ensure the default Apache port is 8080
ansible.builtin.lineinfile:
path: /etc/httpd/conf/httpd.conf
regexp: '^Listen '
insertafter: '^#Listen '
line: Listen 8080
- name: Ensure php extension matches new pattern
ansible.builtin.lineinfile:
path: /etc/httpd/conf/httpd.conf
search_string: '<FilesMatch ".php[45]?$">'
insertafter: '^\t<Location \/>\n'
line: ' <FilesMatch ".php[34]?$">'
- name: Ensure we have our own comment added to /etc/services
ansible.builtin.lineinfile:
path: /etc/services
regexp: '^# port for http'
insertbefore: '^www.*80/tcp'
line: '# port for http by default'
- name: Add a line to a file if the file does not exist, without passing regexp
ansible.builtin.lineinfile:
path: /tmp/testfile
line: 192.168.1.99 foo.lab.net foo
create: yes
# NOTE: Yaml requires escaping backslashes in double quotes but not in single quotes
- name: Ensure the JBoss memory settings are exactly as needed
ansible.builtin.lineinfile:
path: /opt/jboss-as/bin/standalone.conf
regexp: '^(.*)Xms(\d+)m(.*)$'
line: '\1Xms${xms}m\3'
backrefs: yes
# NOTE: Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
- name: Validate the sudoers file before saving
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
regexp: '^%ADMIN ALL='
line: '%ADMIN ALL=(ALL) NOPASSWD: ALL'
validate: /usr/sbin/visudo -cf %s
# See https://docs.python.org/3/library/re.html for further details on syntax
- name: Use backrefs with alternative group syntax to avoid conflicts with variable values
ansible.builtin.lineinfile:
path: /tmp/config
regexp: ^(host=).*
line: \g<1>{{ hostname }}
backrefs: yes
"""
RETURN = r"""#"""
import os
import re
import tempfile
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
def write_changes(module, lines, dest, encoding=None):
tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
with os.fdopen(tmpfd, 'w', encoding=encoding) as f:
f.writelines(lines)
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict'))
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile,
to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'),
unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message, diff):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False, diff=diff):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def present(module, dest, regexp, search_string, line, insertafter, insertbefore, create,
backup, backrefs, firstmatch):
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
encoding = module.params.get('encoding', None)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
b_destpath = os.path.dirname(b_dest)
if b_destpath and not os.path.exists(b_destpath) and not module.check_mode:
try:
os.makedirs(b_destpath)
except Exception as e:
module.fail_json(msg='Error creating %s (%s)' % (to_text(b_destpath), to_text(e)))
lines = []
else:
with open(b_dest, 'r', encoding=encoding) as f:
lines = f.readlines()
if module._diff:
diff['before'] = ''.join(lines)
if regexp is not None:
re_m = re.compile(regexp)
if insertafter not in (None, 'BOF', 'EOF'):
re_ins = re.compile(insertafter)
elif insertbefore not in (None, 'BOF'):
re_ins = re.compile(insertbefore)
else:
re_ins = None
# index[0] is the line num where regexp has been found
# index[1] is the line num where insertafter/insertbefore has been found
index = [-1, -1]
match = None
exact_line_match = False
# The module's doc says
# "If regular expressions are passed to both regexp and
# insertafter, insertafter is only honored if no match for regexp is found."
# Therefore:
# 1. regexp or search_string was found -> ignore insertafter, replace the founded line
# 2. regexp or search_string was not found -> insert the line after 'insertafter' or 'insertbefore' line
# Given the above:
# 1. First check that there is no match for regexp:
if regexp is not None:
for lineno, cur_line in enumerate(lines):
match_found = re_m.search(cur_line)
if match_found:
index[0] = lineno
match = match_found
if firstmatch:
break
# 2. Second check that there is no match for search_string:
if search_string is not None:
for lineno, cur_line in enumerate(lines):
match_found = search_string in cur_line
if match_found:
index[0] = lineno
match = match_found
if firstmatch:
break
# 3. When no match found on the previous step,
# parse for searching insertafter/insertbefore:
if not match:
for lineno, cur_line in enumerate(lines):
if line == cur_line.rstrip('\r\n'):
index[0] = lineno
exact_line_match = True
elif re_ins is not None and re_ins.search(cur_line):
if insertafter:
# + 1 for the next line
index[1] = lineno + 1
if firstmatch:
break
if insertbefore:
# index[1] for the previous line
index[1] = lineno
if firstmatch:
break
msg = ''
changed = False
linesep = os.linesep
# Exact line or Regexp matched a line in the file
if index[0] != -1:
if backrefs and match:
new_line = match.expand(line)
else:
# Don't do backref expansion if not asked.
new_line = line
if not new_line.endswith(linesep):
new_line += linesep
# If no regexp or search_string was given and no line match is found anywhere in the file,
# insert the line appropriately if using insertbefore or insertafter
if (regexp, search_string, match) == (None, None, None) and not exact_line_match:
# Insert lines
if insertafter and insertafter != 'EOF':
# Ensure there is a line separator after the found string
# at the end of the file.
if lines and not lines[-1][-1:] in ('\n', '\r'):
lines[-1] = lines[-1] + linesep
# If the line to insert after is at the end of the file
# use the appropriate index value.
if len(lines) == index[1]:
if lines[index[1] - 1].rstrip('\r\n') != line:
lines.append(line + linesep)
msg = 'line added'
changed = True
elif lines[index[1]].rstrip('\r\n') != line:
lines.insert(index[1], line + linesep)
msg = 'line added'
changed = True
elif insertbefore and insertbefore != 'BOF':
# If the line to insert before is at the beginning of the file
# use the appropriate index value.
if index[1] <= 0:
if lines[index[1]].rstrip('\r\n') != line:
lines.insert(index[1], line + linesep)
msg = 'line added'
changed = True
elif lines[index[1] - 1].rstrip('\r\n') != line:
lines.insert(index[1], line + linesep)
msg = 'line added'
changed = True
elif lines[index[0]] != new_line:
lines[index[0]] = new_line
msg = 'line replaced'
changed = True
elif backrefs:
# Do absolutely nothing, since it's not safe generating the line
# without the regexp matching to populate the backrefs.
pass
# Add it to the beginning of the file
elif insertbefore == 'BOF' or insertafter == 'BOF':
lines.insert(0, line + linesep)
msg = 'line added'
changed = True
# Add it to the end of the file if requested or
# if insertafter/insertbefore didn't match anything
# (so default behaviour is to add at the end)
elif insertafter == 'EOF' or index[1] == -1:
# If the file is not empty then ensure there's a newline before the added line
if lines and not lines[-1][-1:] in ('\n', '\r'):
lines.append(linesep)
lines.append(line + linesep)
msg = 'line added'
changed = True
elif insertafter and index[1] != -1:
# Don't insert the line if it already matches at the index.
# If the line to insert after is at the end of the file use the appropriate index value.
if len(lines) == index[1]:
if lines[index[1] - 1].rstrip('\r\n') != line:
lines.append(line + linesep)
msg = 'line added'
changed = True
elif line != lines[index[1]].rstrip('\n\r'):
lines.insert(index[1], line + linesep)
msg = 'line added'
changed = True
# insert matched, but not the regexp or search_string
else:
lines.insert(index[1], line + linesep)
msg = 'line added'
changed = True
if module._diff:
diff['after'] = ''.join(lines)
backupdest = ""
if changed and not module.check_mode:
if backup and os.path.exists(b_dest):
backupdest = module.backup_local(dest)
write_changes(module, lines, dest, encoding)
if module.check_mode and not os.path.exists(b_dest):
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
def absent(module, dest, regexp, search_string, line, backup):
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
module.exit_json(changed=False, msg="file not present")
msg = ''
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
encoding = module.params['encoding']
with open(b_dest, 'r', encoding=encoding) as f:
lines = f.readlines()
if module._diff:
diff['before'] = ''.join(lines)
if regexp is not None:
re_c = re.compile(regexp)
found = []
def matcher(cur_line):
if regexp is not None:
match_found = re_c.search(cur_line)
elif search_string is not None:
match_found = search_string in cur_line
else:
match_found = line == cur_line.rstrip('\r\n')
if match_found:
found.append(cur_line)
return not match_found
lines = [l for l in lines if matcher(l)]
changed = len(found) > 0
if module._diff:
diff['after'] = ''.join(lines)
backupdest = ""
if changed and not module.check_mode:
if backup:
backupdest = module.backup_local(dest)
write_changes(module, lines, dest, encoding)
if changed:
msg = "%s line(s) removed" % len(found)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest, diff=difflist)
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
state=dict(type='str', default='present', choices=['absent', 'present']),
regexp=dict(type='str', aliases=['regex']),
search_string=dict(type='str'),
line=dict(type='str', aliases=['value']),
encoding=dict(type='str', default='utf-8'),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
backrefs=dict(type='bool', default=False),
create=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
firstmatch=dict(type='bool', default=False),
validate=dict(type='str'),
),
mutually_exclusive=[
['insertbefore', 'insertafter'], ['regexp', 'search_string'], ['backrefs', 'search_string']],
add_file_common_args=True,
supports_check_mode=True,
)
params = module.params
create = params['create']
backup = params['backup']
backrefs = params['backrefs']
path = params['path']
firstmatch = params['firstmatch']
regexp = params['regexp']
search_string = params['search_string']
line = params['line']
if '' in [regexp, search_string]:
msg = ("The %s is an empty string, which will match every line in the file. "
"This may have unintended consequences, such as replacing the last line in the file rather than appending.")
param_name = 'search string'
if regexp == '':
param_name = 'regular expression'
msg += " If this is desired, use '^' to match every line in the file and avoid this warning."
module.warn(msg % param_name)
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.isdir(b_path):
module.fail_json(rc=256, msg='Path %s is a directory !' % path)
if params['state'] == 'present':
if backrefs and regexp is None:
module.fail_json(msg='regexp is required with backrefs=true')
if line is None:
module.fail_json(msg='line is required with state=present')
# Deal with the insertafter default value manually, to avoid errors
# because of the mutually_exclusive mechanism.
ins_bef, ins_aft = params['insertbefore'], params['insertafter']
if ins_bef is None and ins_aft is None:
ins_aft = 'EOF'
present(module, path, regexp, search_string, line,
ins_aft, ins_bef, create, backup, backrefs, firstmatch)
else:
if (regexp, search_string, line) == (None, None, None):
module.fail_json(msg='one of line, search_string, or regexp is required with state=absent')
absent(module, path, regexp, search_string, line, backup)
if __name__ == '__main__':
main() | python | github | https://github.com/ansible/ansible | lib/ansible/modules/lineinfile.py |
"""
Helper functions for loading environment settings.
"""
from __future__ import print_function
import os
import sys
import json
from lazy import lazy
from path import path
import memcache
class Env(object):
"""
Load information about the execution environment.
"""
# Root of the git repository (edx-platform)
REPO_ROOT = path(__file__).abspath().parent.parent.parent
# Reports Directory
REPORT_DIR = REPO_ROOT / 'reports'
# Bok_choy dirs
BOK_CHOY_DIR = REPO_ROOT / "common" / "test" / "acceptance"
BOK_CHOY_LOG_DIR = REPO_ROOT / "test_root" / "log"
BOK_CHOY_REPORT_DIR = REPORT_DIR / "bok_choy"
BOK_CHOY_COVERAGERC = BOK_CHOY_DIR / ".coveragerc"
# For the time being, stubs are used by both the bok-choy and lettuce acceptance tests
# For this reason, the stubs package is currently located in the Django app called "terrain"
# where other lettuce configuration is stored.
BOK_CHOY_STUB_DIR = REPO_ROOT / "common" / "djangoapps" / "terrain"
# Directory that videos are served from
VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video"
BOK_CHOY_SERVERS = {
'lms': {
'port': 8003,
'log': BOK_CHOY_LOG_DIR / "bok_choy_lms.log"
},
'cms': {
'port': 8031,
'log': BOK_CHOY_LOG_DIR / "bok_choy_studio.log"
}
}
BOK_CHOY_STUBS = {
'xqueue': {
'port': 8040,
'log': BOK_CHOY_LOG_DIR / "bok_choy_xqueue.log",
'config': 'register_submission_url=http://0.0.0.0:8041/test/register_submission',
},
'ora': {
'port': 8041,
'log': BOK_CHOY_LOG_DIR / "bok_choy_ora.log",
'config': '',
},
'comments': {
'port': 4567,
'log': BOK_CHOY_LOG_DIR / "bok_choy_comments.log",
},
'video': {
'port': 8777,
'log': BOK_CHOY_LOG_DIR / "bok_choy_video_sources.log",
'config': "root_dir={}".format(VIDEO_SOURCE_DIR),
},
'youtube': {
'port': 9080,
'log': BOK_CHOY_LOG_DIR / "bok_choy_youtube.log",
}
}
# Mongo databases that will be dropped before/after the tests run
BOK_CHOY_MONGO_DATABASE = "test"
BOK_CHOY_CACHE = memcache.Client(['0.0.0.0:11211'], debug=0)
# Test Ids Directory
TEST_DIR = REPO_ROOT / ".testids"
# Files used to run each of the js test suites
# TODO: Store this as a dict. Order seems to matter for some
# reason. See issue TE-415.
JS_TEST_ID_FILES = [
REPO_ROOT / 'lms/static/js_test.yml',
REPO_ROOT / 'lms/static/js_test_coffee.yml',
REPO_ROOT / 'cms/static/js_test.yml',
REPO_ROOT / 'cms/static/js_test_squire.yml',
REPO_ROOT / 'common/lib/xmodule/xmodule/js/js_test.yml',
REPO_ROOT / 'common/static/js_test.yml',
]
JS_TEST_ID_KEYS = [
'lms',
'lms-coffee',
'cms',
'cms-squire',
'xmodule',
'common',
]
JS_REPORT_DIR = REPORT_DIR / 'javascript'
# Directories used for common/lib/ tests
LIB_TEST_DIRS = []
for item in (REPO_ROOT / "common/lib").listdir():
if (REPO_ROOT / 'common/lib' / item).isdir():
LIB_TEST_DIRS.append(path("common/lib") / item.basename())
LIB_TEST_DIRS.append(path("pavelib/paver_tests"))
# Directory for i18n test reports
I18N_REPORT_DIR = REPORT_DIR / 'i18n'
# Service variant (lms, cms, etc.) configured with an environment variable
# We use this to determine which envs.json file to load.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# If service variant not configured in env, then pass the correct
# environment for lms / cms
if not SERVICE_VARIANT: # this will intentionally catch "";
if any(i in sys.argv[1:] for i in ('cms', 'studio')):
SERVICE_VARIANT = 'cms'
else:
SERVICE_VARIANT = 'lms'
@lazy
def env_tokens(self):
"""
Return a dict of environment settings.
If we couldn't find the JSON file, issue a warning and return an empty dict.
"""
# Find the env JSON file
if self.SERVICE_VARIANT:
env_path = self.REPO_ROOT.parent / "{service}.env.json".format(service=self.SERVICE_VARIANT)
else:
env_path = path("env.json").abspath()
# If the file does not exist, here or one level up,
# issue a warning and return an empty dict
if not env_path.isfile():
env_path = env_path.parent.parent / env_path.basename()
if not env_path.isfile():
print(
"Warning: could not find environment JSON file "
"at '{path}'".format(path=env_path),
file=sys.stderr,
)
return dict()
# Otherwise, load the file as JSON and return the resulting dict
try:
with open(env_path) as env_file:
return json.load(env_file)
except ValueError:
print(
"Error: Could not parse JSON "
"in {path}".format(path=env_path),
file=sys.stderr,
)
sys.exit(1)
@lazy
def feature_flags(self):
"""
Return a dictionary of feature flags configured by the environment.
"""
return self.env_tokens.get('FEATURES', dict()) | unknown | codeparrot/codeparrot-clean | ||
import os
import pathlib
import unittest
from maildaemon.config import load_config
from maildaemon.connection_group import ConnectionGroup
_HERE = pathlib.Path(__file__).parent
_TEST_CONFIG_PATH = _HERE.joinpath('maildaemon_test_config.json')
@unittest.skipUnless(os.environ.get('TEST_COMM') or os.environ.get('CI'),
'skipping tests that require server connection')
class Tests(unittest.TestCase):
config = load_config(_TEST_CONFIG_PATH)
def test_connection(self):
conns = {'test-imap': self.config['connections']['test-imap'],
'test-imap-ssl': self.config['connections']['test-imap-ssl'],
'test-pop-ssl': self.config['connections']['test-pop-ssl']}
connections = ConnectionGroup.from_dict(conns)
self.assertEqual(len(connections), 3)
connections.connect_all()
connections.disconnect_all()
def test_purge_dead(self):
conns = {'test-imap-ssl': self.config['connections']['test-imap-ssl'],
'test-pop': self.config['connections']['test-pop'],
'test-pop-ssl': self.config['connections']['test-pop-ssl']}
connections = ConnectionGroup.from_dict(conns)
self.assertEqual(len(connections), 3)
connections.connect_all()
connections.purge_dead()
connections.disconnect_all() | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/commentLeadingCloseBrace.ts] ////
//// [commentLeadingCloseBrace.ts]
declare function commentedParameters(...args): any;
function ifelse() {
if (commentedParameters(1, 2)) {
/*comment1*/
commentedParameters(3, 4);
/*comment2*/
} else {
commentedParameters(5, 6);
}
}
//// [commentLeadingCloseBrace.js]
"use strict";
function ifelse() {
if (commentedParameters(1, 2)) {
/*comment1*/
commentedParameters(3, 4);
/*comment2*/
}
else {
commentedParameters(5, 6);
}
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/commentLeadingCloseBrace.js |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package framework
import (
"context"
"sort"
"strings"
"github.com/hashicorp/vault/sdk/logical"
)
// DEPRECATED: Don't use this. It's too inflexible, nearly impossible to use
// with some modern Vault features, and imposes specific API designs.
//
// PolicyMap is a specialization of PathMap that expects the values to
// be lists of policies. This assists in querying and loading policies
// from the PathMap.
type PolicyMap struct {
PathMap
DefaultKey string
PolicyKey string
}
func (p *PolicyMap) Policies(ctx context.Context, s logical.Storage, names ...string) ([]string, error) {
policyKey := "value"
if p.PolicyKey != "" {
policyKey = p.PolicyKey
}
if p.DefaultKey != "" {
newNames := make([]string, len(names)+1)
newNames[0] = p.DefaultKey
copy(newNames[1:], names)
names = newNames
}
set := make(map[string]struct{})
for _, name := range names {
v, err := p.Get(ctx, s, name)
if err != nil {
return nil, err
}
valuesRaw, ok := v[policyKey]
if !ok {
continue
}
values, ok := valuesRaw.(string)
if !ok {
continue
}
for _, p := range strings.Split(values, ",") {
if p = strings.TrimSpace(p); p != "" {
set[p] = struct{}{}
}
}
}
list := make([]string, 0, len(set))
for k := range set {
list = append(list, k)
}
sort.Strings(list)
return list, nil
} | go | github | https://github.com/hashicorp/vault | sdk/framework/policy_map.go |
#!/usr/bin/env python
# Tests for dtella/common/ipv4.py
import fix_path
import unittest
from dtella.common.ipv4 import CidrNumToMask
from dtella.common.ipv4 import CidrStringToIPMask
from dtella.common.ipv4 import IsSubsetOf
from dtella.common.ipv4 import MaskToCidrNum
from dtella.common.ipv4 import SubnetMatcher
class IPv4TestCase(unittest.TestCase):
def testCidrNumToMask(self):
self.assertRaises(ValueError, CidrNumToMask, -1)
self.assertEqual(CidrNumToMask(0), 0)
self.assertEqual(CidrNumToMask(16), (~0) << 16)
self.assertEqual(CidrNumToMask(24), (~0) << 8)
self.assertEqual(CidrNumToMask(32), ~0)
self.assertRaises(ValueError, CidrNumToMask, 33)
def testMaskToCidrNum(self):
self.assertEqual(MaskToCidrNum(0), 0)
self.assertEqual(MaskToCidrNum(~0 << 8), 24)
self.assertEqual(MaskToCidrNum(~0 << 1), 31)
self.assertEqual(MaskToCidrNum(~0), 32)
self.assertRaises(ValueError, MaskToCidrNum, 12345)
def testCidrStringToIPMask(self):
self.assertEqual(CidrStringToIPMask("1.2.3.4/5"),
(0x01020304, ~0<<(32-5)))
self.assertEqual(CidrStringToIPMask("1.2.3.4"), (0x01020304, ~0))
self.assertRaises(ValueError, CidrStringToIPMask, "1.2.3.4//5")
def testIsSubsetOf(self):
C = CidrStringToIPMask
self.assertTrue(IsSubsetOf(C("132.3.12.34"), C("132.3.0.0/0")))
self.assertTrue(IsSubsetOf(C("132.3.12.34"), C("132.3.0.0/16")))
self.assertTrue(IsSubsetOf(C("0.0.0.0/0"), C("0.0.0.0/0")))
self.assertTrue(IsSubsetOf(C("0.0.0.0/1"), C("0.0.0.0/0")))
self.assertFalse(IsSubsetOf(C("0.0.0.0/0"), C("0.0.0.0/1")))
self.assertFalse(IsSubsetOf(C("192.168.0.255"), C("192.168.1.0/24")))
self.assertTrue(IsSubsetOf(C("192.168.1.0"), C("192.168.1.0/24")))
self.assertTrue(IsSubsetOf(C("192.168.1.255"), C("192.168.1.0/24")))
self.assertFalse(IsSubsetOf(C("192.168.2.0"), C("192.168.1.0/24")))
self.assertTrue(IsSubsetOf(C("192.168.1.0/24"), C("192.168.0.0/16")))
self.assertFalse(IsSubsetOf(C("10.0.0.0/24"), C("192.168.0.0/16")))
def testSubnetMatcher(self):
C = CidrStringToIPMask
matcher = SubnetMatcher()
self.assertFalse(matcher.containsRange(C("1.2.3.4")))
self.assertFalse(matcher.containsRange(C("132.3.0.0/0")))
matcher.addRange(C("132.3.0.0/0"))
self.assertTrue(matcher.containsRange(C("0.0.0.0")))
self.assertTrue(matcher.containsRange(C("1.2.3.4")))
self.assertTrue(matcher.containsRange(C("132.3.12.34")))
self.assertTrue(matcher.containsRange(C("255.255.255.255")))
matcher.clear()
matcher.addRange(C("128.210.0.0/15"))
matcher.addRange(C("128.10.0.0/16"))
matcher.addRange(C("1.0.0.0/8"))
self.assertFalse(matcher.containsRange(C("0.0.0.0")))
self.assertTrue(matcher.containsRange(C("1.2.3.4")))
self.assertFalse(matcher.containsRange(C("128.209.255.255")))
self.assertTrue(matcher.containsRange(C("128.210.0.0")))
self.assertTrue(matcher.containsRange(C("128.211.123.1")))
self.assertTrue(matcher.containsRange(C("128.211.255.255")))
self.assertFalse(matcher.containsRange(C("128.212.0.0")))
self.assertFalse(matcher.containsRange(C("128.9.255.255")))
self.assertTrue(matcher.containsRange(C("128.10.0.0")))
self.assertTrue(matcher.containsRange(C("128.10.255.255")))
self.assertFalse(matcher.containsRange(C("128.11.0.0")))
self.assertFalse(matcher.containsRange(C("128.210.0.0/14")))
self.assertTrue(matcher.containsRange(C("128.210.0.0/16")))
self.assertTrue(matcher.containsRange(C("128.211.0.0/16")))
self.assertEqual(len(matcher.nets), 3)
matcher.addRange(C("1.2.3.4/0"))
matcher.addRange(C("1.2.3.4/5"))
matcher.addRange(C("128.210.0.0/16"))
self.assertEqual(len(matcher.nets), 1)
self.assertTrue(matcher.containsRange(C("0.0.0.0/0")))
self.assertTrue(matcher.containsRange(C("0.0.0.0/1")))
self.assertTrue(matcher.containsRange(C("128.0.0.0/1")))
self.assertTrue(matcher.containsRange(C("0.0.0.0")))
self.assertTrue(matcher.containsRange(C("127.255.255.255")))
self.assertTrue(matcher.containsRange(C("128.0.0.0")))
self.assertTrue(matcher.containsRange(C("255.255.255.255")))
matcher.clear()
matcher.addRange(C("0.0.0.0/1"))
matcher.addRange(C("128.0.0.0/1"))
self.assertTrue(matcher.containsRange(C("0.0.0.0")))
self.assertTrue(matcher.containsRange(C("127.255.255.255")))
self.assertTrue(matcher.containsRange(C("128.0.0.0")))
self.assertTrue(matcher.containsRange(C("255.255.255.255")))
self.assertTrue(matcher.containsRange(C("0.0.0.0/1")))
self.assertTrue(matcher.containsRange(C("128.0.0.0/1")))
# Does not support aggregation.
self.assertFalse(matcher.containsRange(C("0.0.0.0/0")))
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
DomCrawler Component
====================
The DomCrawler component eases DOM navigation for HTML and XML documents.
Resources
---------
* [Documentation](https://symfony.com/doc/current/components/dom_crawler.html)
* [Contributing](https://symfony.com/doc/current/contributing/index.html)
* [Report issues](https://github.com/symfony/symfony/issues) and
[send Pull Requests](https://github.com/symfony/symfony/pulls)
in the [main Symfony repository](https://github.com/symfony/symfony) | unknown | github | https://github.com/symfony/symfony | src/Symfony/Component/DomCrawler/README.md |
#! /usr/bin/env python
# Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
import glob
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
version = 'build-all.py, version 1.99'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
all_options = {}
compile64 = os.environ.get('CROSS_COMPILE64')
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
if not os.environ.get('CROSS_COMPILE'):
fail("CROSS_COMPILE must be set in the environment")
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
failed_targets = []
class LogRunner:
def __init__(self, logname, make_env):
self.logname = logname
self.fd = open(logname, 'w')
self.make_env = make_env
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=self.make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.flush()
return result
class Builder():
def __init__(self, name, defconfig):
self.name = name
self.defconfig = defconfig
self.confname = self.defconfig.split('/')[-1]
# Determine if this is a 64-bit target based on the location
# of the defconfig.
self.make_env = os.environ.copy()
if "/arm64/" in defconfig:
if compile64:
self.make_env['CROSS_COMPILE'] = compile64
else:
fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
self.make_env['ARCH'] = 'arm64'
else:
self.make_env['ARCH'] = 'arm'
self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
def build(self):
dest_dir = os.path.join(build_dir, self.name)
log_name = "%s/log-%s.log" % (build_dir, self.name)
print 'Building %s in %s log %s' % (self.name, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = self.defconfig
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
# shutil.copyfile(defconfig, dotconfig) # Not really right.
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
with open('/dev/null', 'r') as devnull:
subprocess.check_call(['make', 'O=%s' % dest_dir,
'SELINUX_DEFCONFIG=selinux_defconfig',
'SELINUX_LOG_DEFCONFIG=selinux_log_defconfig',
'TIMA_DEFCONFIG=tima_defconfig',
self.confname], env=self.make_env,
stdin=devnull)
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of
# previous build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
build = LogRunner(log_name, self.make_env)
for t in build_targets:
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(t, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
with open('/dev/null', 'r') as devnull:
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=self.make_env, stdin=devnull)
shutil.copyfile(savedefconfig, defconfig)
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
with open(file, 'a') as defconfig:
defconfig.write(str + '\n')
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = []
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'mdm*_defconfig',
r'mpq*_defconfig',
)
arch64_pats = (
r'msm_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
name = os.path.basename(n)[:-10]
names.append(Builder(name, n))
if 'CROSS_COMPILE64' in os.environ:
for p in arch64_pats:
for n in glob.glob('arch/arm64/configs/' + p):
name = os.path.basename(n)[:-10] + "-64"
names.append(Builder(name, n))
return names
def build_many(targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(target.defconfig, all_options.updateconfigs)
target.build()
if failed_targets:
fail("\n ".join(["Failed targets:"] +
[target.name for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs:
print " %s" % target.name
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs)
elif args == ['perf']:
targets = []
for t in configs:
if "perf" in t.name:
targets.append(t)
build_many(targets)
elif args == ['noperf']:
targets = []
for t in configs:
if "perf" not in t.name:
targets.append(t)
build_many(targets)
elif len(args) > 0:
all_configs = {}
for t in configs:
all_configs[t.name] = t
targets = []
for t in args:
if t not in all_configs:
parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
targets.append(all_configs[t])
build_many(targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
from random import random
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.graphics import Color
from kivy.graphics import Ellipse
from kivy.graphics import Line
class PaintWidget(Widget):
def on_touch_down(self, touch):
color = (random(), random(), random())
with self.canvas:
Color(*color, mode="hsv")
d = 30.0
Ellipse(pos=(touch.x - d/2, touch.y - d/2), size=(d,d))
touch.ud["line"] = Line(points=(touch.x, touch.y), width=d)
def on_touch_move(self, touch):
touch.ud["line"].points += [touch.x, touch.y]
class PaintApp(App):
def build(self):
parent = Widget()
painter = PaintWidget()
clearbtn = Button(text="clear")
parent.add_widget(painter)
parent.add_widget(clearbtn)
def clear_canvas(obj):
painter.canvas.clear()
clearbtn.bind(on_release=clear_canvas)
return parent
if __name__ == "__main__":
PaintApp().run() | unknown | codeparrot/codeparrot-clean | ||
from functools import wraps
from django.utils.decorators import available_attrs
def xframe_options_deny(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'DENY' as long as the response doesn't already have that
header set.
e.g.
@xframe_options_deny
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options') is None:
resp['X-Frame-Options'] = 'DENY'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_sameorigin(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'SAMEORIGIN' as long as the response doesn't already have
that header set.
e.g.
@xframe_options_sameorigin
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options') is None:
resp['X-Frame-Options'] = 'SAMEORIGIN'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_exempt(view_func):
"""
Modifies a view function by setting a response variable that instructs
XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header.
e.g.
@xframe_options_exempt
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
resp.xframe_options_exempt = True
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) | unknown | codeparrot/codeparrot-clean | ||
"""Dispatcher
Please see policy.py for a discussion on dispatchers and policies
"""
import pythoncom, traceback, win32api
from sys import exc_info
#
from win32com.server.exception import IsCOMServerException
from win32com.util import IIDToInterfaceName
import win32com
class DispatcherBase:
""" The base class for all Dispatchers.
This dispatcher supports wrapping all operations in exception handlers,
and all the necessary delegation to the policy.
This base class supports the printing of "unexpected" exceptions. Note, however,
that exactly where the output of print goes may not be useful! A derived class may
provide additional semantics for this.
"""
def __init__(self, policyClass, object):
self.policy = policyClass(object)
# The logger we should dump to. If None, we should send to the
# default location (typically 'print')
self.logger = getattr(win32com, "logger", None)
# Note the "return self._HandleException_()" is purely to stop pychecker
# complaining - _HandleException_ will itself raise an exception for the
# pythoncom framework, so the result will never be seen.
def _CreateInstance_(self, clsid, reqIID):
try:
self.policy._CreateInstance_(clsid, reqIID)
return pythoncom.WrapObject(self, reqIID)
except:
return self._HandleException_()
def _QueryInterface_(self, iid):
try:
return self.policy._QueryInterface_(iid)
except:
return self._HandleException_()
def _Invoke_(self, dispid, lcid, wFlags, args):
try:
return self.policy._Invoke_(dispid, lcid, wFlags, args)
except:
return self._HandleException_()
def _GetIDsOfNames_(self, names, lcid):
try:
return self.policy._GetIDsOfNames_(names, lcid)
except:
return self._HandleException_()
def _GetTypeInfo_(self, index, lcid):
try:
return self.policy._GetTypeInfo_(index, lcid)
except:
return self._HandleException_()
def _GetTypeInfoCount_(self):
try:
return self.policy._GetTypeInfoCount_()
except:
return self._HandleException_()
def _GetDispID_(self, name, fdex):
try:
return self.policy._GetDispID_(name, fdex)
except:
return self._HandleException_()
def _InvokeEx_(self, dispid, lcid, wFlags, args, kwargs, serviceProvider):
try:
return self.policy._InvokeEx_(dispid, lcid, wFlags, args, kwargs, serviceProvider)
except:
return self._HandleException_()
def _DeleteMemberByName_(self, name, fdex):
try:
return self.policy._DeleteMemberByName_(name, fdex)
except:
return self._HandleException_()
def _DeleteMemberByDispID_(self, id):
try:
return self.policy._DeleteMemberByDispID_(id)
except:
return self._HandleException_()
def _GetMemberProperties_(self, id, fdex):
try:
return self.policy._GetMemberProperties_(id, fdex)
except:
return self._HandleException_()
def _GetMemberName_(self, dispid):
try:
return self.policy._GetMemberName_(dispid)
except:
return self._HandleException_()
def _GetNextDispID_(self, fdex, flags):
try:
return self.policy._GetNextDispID_(fdex, flags)
except:
return self._HandleException_()
def _GetNameSpaceParent_(self):
try:
return self.policy._GetNameSpaceParent_()
except:
return self._HandleException_()
def _HandleException_(self):
"""Called whenever an exception is raised.
Default behaviour is to print the exception.
"""
# If not a COM exception, print it for the developer.
if not IsCOMServerException():
if self.logger is not None:
self.logger.exception("pythoncom server error")
else:
traceback.print_exc()
# But still raise it for the framework.
reraise()
def _trace_(self, *args):
if self.logger is not None:
record = " ".join(map(str, args))
self.logger.debug(record)
else:
for arg in args[:-1]:
print arg,
print args[-1]
class DispatcherTrace(DispatcherBase):
"""A dispatcher, which causes a 'print' line for each COM function called.
"""
def _QueryInterface_(self, iid):
rc = DispatcherBase._QueryInterface_(self, iid)
if not rc:
self._trace_("in %s._QueryInterface_ with unsupported IID %s (%s)" % (`self.policy._obj_`, IIDToInterfaceName(iid),iid))
return rc
def _GetIDsOfNames_(self, names, lcid):
self._trace_("in _GetIDsOfNames_ with '%s' and '%d'\n" % (names, lcid))
return DispatcherBase._GetIDsOfNames_(self, names, lcid)
def _GetTypeInfo_(self, index, lcid):
self._trace_("in _GetTypeInfo_ with index=%d, lcid=%d\n" % (index, lcid))
return DispatcherBase._GetTypeInfo_(self, index, lcid)
def _GetTypeInfoCount_(self):
self._trace_("in _GetTypeInfoCount_\n")
return DispatcherBase._GetTypeInfoCount_(self)
def _Invoke_(self, dispid, lcid, wFlags, args):
self._trace_("in _Invoke_ with", dispid, lcid, wFlags, args)
return DispatcherBase._Invoke_(self, dispid, lcid, wFlags, args)
def _GetDispID_(self, name, fdex):
self._trace_("in _GetDispID_ with", name, fdex)
return DispatcherBase._GetDispID_(self, name, fdex)
def _InvokeEx_(self, dispid, lcid, wFlags, args, kwargs, serviceProvider):
self._trace_("in %r._InvokeEx_-%s%r [%x,%s,%r]" % (self.policy._obj_, dispid, args, wFlags, lcid, serviceProvider))
return DispatcherBase._InvokeEx_(self, dispid, lcid, wFlags, args, kwargs, serviceProvider)
def _DeleteMemberByName_(self, name, fdex):
self._trace_("in _DeleteMemberByName_ with", name, fdex)
return DispatcherBase._DeleteMemberByName_(self, name, fdex)
def _DeleteMemberByDispID_(self, id):
self._trace_("in _DeleteMemberByDispID_ with", id)
return DispatcherBase._DeleteMemberByDispID_(self, id)
def _GetMemberProperties_(self, id, fdex):
self._trace_("in _GetMemberProperties_ with", id, fdex)
return DispatcherBase._GetMemberProperties_(self, id, fdex)
def _GetMemberName_(self, dispid):
self._trace_("in _GetMemberName_ with", dispid)
return DispatcherBase._GetMemberName_(self, dispid)
def _GetNextDispID_(self, fdex, flags):
self._trace_("in _GetNextDispID_ with", fdex, flags)
return DispatcherBase._GetNextDispID_(self, fdex, flags)
def _GetNameSpaceParent_(self):
self._trace_("in _GetNameSpaceParent_")
return DispatcherBase._GetNameSpaceParent_(self)
class DispatcherWin32trace(DispatcherTrace):
"""A tracing dispatcher that sends its output to the win32trace remote collector.
"""
def __init__(self, policyClass, object):
DispatcherTrace.__init__(self, policyClass, object)
if self.logger is None:
# If we have no logger, setup our output.
import win32traceutil # Sets up everything.
self._trace_("Object with win32trace dispatcher created (object=%s)" % `object`)
class DispatcherOutputDebugString(DispatcherTrace):
"""A tracing dispatcher that sends its output to win32api.OutputDebugString
"""
def _trace_(self, *args):
for arg in args[:-1]:
win32api.OutputDebugString(str(arg)+" ")
win32api.OutputDebugString(str(args[-1])+"\n")
class DispatcherWin32dbg(DispatcherBase):
"""A source-level debugger dispatcher
A dispatcher which invokes the debugger as an object is instantiated, or
when an unexpected exception occurs.
Requires Pythonwin.
"""
def __init__(self, policyClass, ob):
# No one uses this, and it just causes py2exe to drag all of
# pythonwin in.
#import pywin.debugger
pywin.debugger.brk()
print "The DispatcherWin32dbg dispatcher is deprecated!"
print "Please let me know if this is a problem."
print "Uncomment the relevant lines in dispatcher.py to re-enable"
# DEBUGGER Note - You can either:
# * Hit Run and wait for a (non Exception class) exception to occur!
# * Set a breakpoint and hit run.
# * Step into the object creation (a few steps away!)
DispatcherBase.__init__(self, policyClass, ob)
def _HandleException_(self):
""" Invoke the debugger post mortem capability """
# Save details away.
typ, val, tb = exc_info()
#import pywin.debugger, pywin.debugger.dbgcon
debug = 0
try:
raise typ, val
except Exception: # AARG - What is this Exception???
# Use some inside knowledge to borrow a Debugger option which dictates if we
# stop at "expected" exceptions.
debug = pywin.debugger.GetDebugger().get_option(pywin.debugger.dbgcon.OPT_STOP_EXCEPTIONS)
except:
debug = 1
if debug:
try:
pywin.debugger.post_mortem(tb, typ, val) # The original exception
except:
traceback.print_exc()
# But still raise it.
del tb
reraise()
def reraise():
"""Handy function for re-raising errors.
Note: storing a traceback in a local variable can introduce reference
loops if you aren't careful. Specifically, that local variable should
not be within an execution context contained with the traceback.
By using a utility function, we ensure that our local variable holding
the traceback is not referenced by the traceback itself.
"""
t, v, tb = exc_info()
raise t, v, tb
try:
import win32trace
DefaultDebugDispatcher = DispatcherWin32trace
except ImportError: # no win32trace module - just use a print based one.
DefaultDebugDispatcher = DispatcherTrace | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import resource
import faces
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# Linting in the MongoDB codebase
## C++ Linters
### `clang-tidy`
The `buildscripts/clang_tidy.py` shell script runs the `clang-tidy` linter. In order to run
`clang-tidy` you must have a compilation database (`compile_commands.json` file).
Ex: `python3 buildscripts/clang_tidy.py`
| Linter | Configuration File(s) | Help Command | Documentation |
| ------------ | --------------------- | ------------------- | -------------------------------------------------------------------------------------------------------- |
| `clang-tidy` | `.clang-tidy` | `clang-tidy --help` | [https://clang.llvm.org/extra/clang-tidy/index.html](https://clang.llvm.org/extra/clang-tidy/index.html) |
### `errorcodes.py`
The `buildscripts/errorcodes.py` script runs a custom error code linter, which verifies that all
assertion codes are distinct. You can see the usage by running the following command:
`buildscripts/errorcodes.py --help`.
Ex: `buildscripts/errorcodes.py`
### `quickmongolint.py`
The `buildscripts/quickmongolint.py` script runs a simple MongoDB C++ linter. You can see the usage
by running the following command: `buildscripts/quickmongolint.py --help`. You can take a look at
`buildscripts/linter/mongolint.py` to better understand the rules for this linter.
Ex: `buildscripts/quickmongolint.py lint`
## Javascript Linters
The `bazel run lint` command runs the `eslint` javascript linter.
| Linter | Configuration File(s) | Help Command | Documentation |
| -------- | --------------------- | ------------ | ------------------------------------------ |
| `eslint` | `.eslint.config.mjs` | | [https://eslint.org/](https://eslint.org/) |
## Yaml Linters
The `buildscripts/yamllinters.sh` shell script runs the yaml linters. The supported yaml linters
are: `yamllint` & `evergreen-lint`. `evergreen-lint` is a custom MongoDB linter used specifically
for `evergreen` yaml files.
Ex: `bash buildscripts/yamllinters.sh`
| Linter | Configuration File(s) | Help Command | Documentation |
| ---------------- | ------------------------- | --------------------------------- | ---------------------------------------------------------------------------------------------- |
| `yamllint` | `etc/yamllint_config.yml` | `yamllint --help` | [https://readthedocs.org/projects/yamllint/](https://readthedocs.org/projects/yamllint/) |
| `evergreen-lint` | `etc/evergreen_lint.yml` | `python -m evergreen_lint --help` | [https://github.com/evergreen-ci/config-linter](https://github.com/evergreen-ci/config-linter) |
## Python Linters
The `bazel run lint` command runs all Python linters as well as several other linters in our code base. You can
run auto-remediations via:
`bazel run lint --fix`.
Ex: `bazel run lint`
| Linter | Configuration File(s) | Help Command | Documentation |
| ------ | --------------------- | ------------ | ------------------------------------------------------------ |
| `ruff` | `pyproject.toml` | | [https://docs.astral.sh/ruff/](https://docs.astral.sh/ruff/) | | unknown | github | https://github.com/mongodb/mongo | docs/linting.md |
''' Text annotation module '''
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class AnnotatableFields(object):
"""Fields for `TextModule` and `TextDescriptor`."""
data = String(help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Text Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='imagery:red,parallelism:blue',
)
source = String(
display_name=_("Source/Citation"),
help=_("Optional for citing source of any material used. Automatic citation can be done using <a href=\"http://easybib.com\">EasyBib</a>"),
scope=Scope.settings,
default='None',
)
diacritics = String(
display_name=_("Diacritic Marks"),
help=_("Add diacritic marks to be added to a text using the comma-separated form, i.e. markname;urltomark;baseline,markname2;urltomark2;baseline2"),
scope=Scope.settings,
default='',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class TextAnnotationModule(AnnotatableFields, XModule):
''' Text Annotation Module '''
js = {'coffee': [],
'js': []}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'textannotation'
def __init__(self, *args, **kwargs):
super(TextAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'course_key': self.runtime.course_id,
'display_name': self.display_name_with_default,
'tag': self.instructor_tags,
'source': self.source,
'instructions_html': self.instructions,
'content_html': self.content,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'diacritic_marks': self.diacritics,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('textannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class TextAnnotationDescriptor(AnnotatableFields, RawDescriptor):
''' Text Annotation Descriptor '''
module_class = TextAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(TextAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
TextAnnotationDescriptor.annotation_storage_url,
TextAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.