code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
from nose.plugins.attrib import attr
class TestRottenTomatoesLookup(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
# tests search
- {title: 'Toy Story'}
- {title: 'The Matrix'}
- {title: 'Star Wars: Episode I - The Phantom Menace (in 3D)'}
# tests direct id
- {title: '[Group] Taken 720p', rt_id: 770680780}
# tests title + year
- {title: 'Rush.Hour[1998]1080p[Eng]-FOO'}
# test short title, with repack and without year
- {title: 'Up.REPACK.720p.Bluray.x264-FlexGet'}
rottentomatoes_lookup: yes
"""
@attr(online=True)
def test_rottentomatoes_lookup(self):
self.execute_task('test')
# check that these were created
assert self.task.find_entry(rt_name='Toy Story', rt_year=1995, rt_id=9559, imdb_id='tt0114709'), \
'Didn\'t populate RT info for Toy Story'
assert self.task.find_entry(imdb_id='tt0114709'), \
'Didn\'t populate imdb_id info for Toy Story'
assert self.task.find_entry(rt_name='The Matrix', rt_year=1999, rt_id=12897, imdb_id='tt0133093'), \
'Didn\'t populate RT info for The Matrix'
assert self.task.find_entry(rt_name='Star Wars: Episode I - The Phantom Menace',
rt_year=1999, rt_id=10008), \
'Didn\'t populate RT info for Star Wars: Episode I - The Phantom Menace (in 3D)'
assert self.task.find_entry(rt_name='Taken', rt_year=2008, rt_id=770680780), \
'Didn\'t populate RT info for Taken'
assert self.task.find_entry(rt_name='Rush Hour', rt_year=1998, rt_id=10201), \
'Didn\'t populate RT info for Rush Hour'
assert self.task.find_entry(rt_name='Up', rt_year=2009, rt_id=770671912), \
'Didn\'t populate RT info for Up' | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataFlowJavaOperator(BaseOperator):
"""
Start a Java Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
```
default_args = {
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'europe-west1-d',
'stagingLocation': 'gs://my-staging-bucket/staging/'
}
}
```
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar. Use ``options`` to pass on
options to your job.
```
t1 = DataFlowOperation(
task_id='datapflow_example',
jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY'
},
dag=my-dag)
```
Both ``jar`` and ``options`` are templated so you can use variables in them.
"""
template_fields = ['options', 'jar']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
jar,
dataflow_default_options=None,
options=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new DataFlowJavaOperator.
For more detail on about job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param jar: The reference to a self executing DataFlow jar.
:type jar: string
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.
:type options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
"""
super(DataFlowJavaOperator, self).__init__(*args, **kwargs)
dataflow_default_options = dataflow_default_options or {}
options = options or {}
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.dataflow_default_options = dataflow_default_options
self.options = options
def execute(self, context):
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
dataflow_options = copy.copy(self.dataflow_default_options)
dataflow_options.update(self.options)
hook.start_java_dataflow(self.task_id, dataflow_options, self.jar) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.images import views
urlpatterns = patterns(
'openstack_dashboard.dashboards.admin.images.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<image_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<id>[^/]+)/update_metadata/$',
views.UpdateMetadataView.as_view(), name='update_metadata'),
url(r'^(?P<image_id>[^/]+)/detail/$',
views.DetailView.as_view(), name='detail')
) | unknown | codeparrot/codeparrot-clean | ||
# default separator (whitespace)
print("a b".rsplit())
#print(" a b ".rsplit(None))
#print(" a b ".rsplit(None, 1))
#print(" a b ".rsplit(None, 2))
#print(" a b c ".rsplit(None, 1))
#print(" a b c ".rsplit(None, 0))
#print(" a b c ".rsplit(None, -1))
# empty separator should fail (this actually delegates to .split())
try:
"abc".rsplit('')
except ValueError:
print("ValueError")
# empty separator should fail (error handled in .rsplit())
try:
'a a a a'.rsplit('', 5)
except ValueError:
print('ValueError')
# bad separator type
try:
'a a a a'.rsplit(1)
except TypeError:
print('TypeError')
# non-empty separator
print("abc".rsplit("a"))
print("abc".rsplit("b"))
print("abc".rsplit("c"))
print("abc".rsplit("z"))
print("abc".rsplit("ab"))
print("abc".rsplit("bc"))
print("abc".rsplit("abc"))
print("abc".rsplit("abcd"))
print("abcabc".rsplit("bc"))
print("abcabc".rsplit("bc", 0))
print("abcabc".rsplit("bc", 1))
print("abcabc".rsplit("bc", 2))
print("10/11/12".rsplit("/", 1))
print("10/11/12".rsplit("/", 2))
print("10/11/12".rsplit("/", 3))
print("10/11/12".rsplit("/", 4))
print("10/11/12".rsplit("/", 5))
print("/*10/*11/*12/*".rsplit("/*", 1))
print("/*10/*11/*12/*".rsplit("/*", 2))
print("/*10/*11/*12/*".rsplit("/*", 3))
print("/*10/*11/*12/*".rsplit("/*", 4))
print("/*10/*11/*12/*".rsplit("/*", 5))
print(b"abcabc".rsplit(b"bc", 2)) | unknown | codeparrot/codeparrot-clean | ||
#
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
""" | unknown | codeparrot/codeparrot-clean | ||
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Long: disallow-username-in-url
Help: Disallow username in URL
Added: 7.61.0
Category: curl
Multi: boolean
See-also:
- proto
Example:
- --disallow-username-in-url $URL
---
# `--disallow-username-in-url`
Exit with error if passed a URL containing a username. Probably most useful
when the URL is being provided at runtime or similar.
Accepting and using credentials in a URL is normally considered a security
hazard as they are easily leaked that way. | unknown | github | https://github.com/curl/curl | docs/cmdline-opts/disallow-username-in-url.md |
import unittest
import rugby_rankings.ratings_input
class TestRatingsInput(unittest.TestCase):
def test_construct(self):
inputObj = rugby_rankings.ratings_input.RatingsInput(0.0, 0.0, 0, 0)
self.assertTrue(
isinstance(inputObj, rugby_rankings.ratings_input.RatingsInput)
)
inputObj = rugby_rankings.ratings_input.RatingsInput(
0.0, 0.0, 0, 0, True, True
)
self.assertTrue(
isinstance(inputObj, rugby_rankings.ratings_input.RatingsInput)
)
def test_types(self):
inputObj = rugby_rankings.ratings_input.RatingsInput(
1.111, 90.199, 3, 2.2
)
self.assertTrue(
isinstance(inputObj.get_rounded_team_a_rating(), float)
)
self.assertEqual(inputObj.get_rounded_team_a_rating(), 1.11)
self.assertEqual(inputObj.get_rounded_team_b_rating(), 90.20)
inputObj = rugby_rankings.ratings_input.RatingsInput(
1.111, 90.199, 3, 2.2, True, True
)
self.assertEqual(inputObj.is_rugby_world_cup, True)
self.assertEqual(inputObj.is_neutral_venue, True)
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
import sys
from benchmark_base import BenchmarkBase
import torch
class Benchmark(BenchmarkBase):
N = 20
def __init__(self):
super().__init__(
category="update_hint",
backend="inductor",
device="cpu",
)
def name(self):
return f"{self.category()}_regression"
def description(self):
return "information at https://github.com/pytorch/pytorch/pull/129893"
def _prepare_once(self):
torch._dynamo.config.capture_scalar_outputs = True
torch.manual_seed(0)
self.splits = torch.randint(10, (self.N,))
sz = self.splits.sum().item()
self.input = torch.randn(sz)
def _prepare(self):
torch._dynamo.reset()
def _work(self):
@torch.compile(fullgraph=True)
def f(a, b):
xs = b.tolist()
for x in xs:
torch._check(x >= 0)
torch._check(x <= self.N)
return a.split(xs)
f(self.input, self.splits)
def main():
result_path = sys.argv[1]
Benchmark().enable_compile_time_instruction_count().collect_all().append_results(
result_path
)
if __name__ == "__main__":
main() | python | github | https://github.com/pytorch/pytorch | benchmarks/dynamo/pr_time_benchmarks/benchmarks/update_hint_benchmark.py |
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test iris.util
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import inspect
import unittest
import cf_units
import numpy as np
import iris.analysis
import iris.coords
import iris.tests.stock as stock
import iris.util
class TestMonotonic(unittest.TestCase):
def assertMonotonic(self, array, direction=None, **kwargs):
if direction is not None:
mono, dir = iris.util.monotonic(array, return_direction=True, **kwargs)
if not mono:
self.fail('Array was not monotonic:/n %r' % array)
if dir != np.sign(direction):
self.fail('Array was monotonic but not in the direction expected:'
'/n + requested direction: %s/n + resultant direction: %s' % (direction, dir))
else:
mono = iris.util.monotonic(array, **kwargs)
if not mono:
self.fail('Array was not monotonic:/n %r' % array)
def assertNotMonotonic(self, array, **kwargs):
mono = iris.util.monotonic(array, **kwargs)
if mono:
self.fail("Array was monotonic when it shouldn't be:/n %r" % array)
def test_monotonic_pve(self):
a = np.array([3, 4, 5.3])
self.assertMonotonic(a)
self.assertMonotonic(a, direction=1)
# test the reverse for negative monotonic.
a = a[::-1]
self.assertMonotonic(a)
self.assertMonotonic(a, direction=-1)
def test_not_monotonic(self):
b = np.array([3, 5.3, 4])
self.assertNotMonotonic(b)
def test_monotonic_strict(self):
b = np.array([3, 5.3, 4])
self.assertNotMonotonic(b, strict=True)
self.assertNotMonotonic(b)
b = np.array([3, 5.3, 5.3])
self.assertNotMonotonic(b, strict=True)
self.assertMonotonic(b, direction=1)
b = b[::-1]
self.assertNotMonotonic(b, strict=True)
self.assertMonotonic(b, direction=-1)
b = np.array([0.0])
self.assertRaises(ValueError, iris.util.monotonic, b)
self.assertRaises(ValueError, iris.util.monotonic, b, strict=True)
b = np.array([0.0, 0.0])
self.assertNotMonotonic(b, strict=True)
self.assertMonotonic(b)
class TestReverse(unittest.TestCase):
def test_simple(self):
a = np.arange(12).reshape(3, 4)
np.testing.assert_array_equal(a[::-1], iris.util.reverse(a, 0))
np.testing.assert_array_equal(a[::-1, ::-1], iris.util.reverse(a, [0, 1]))
np.testing.assert_array_equal(a[:, ::-1], iris.util.reverse(a, 1))
np.testing.assert_array_equal(a[:, ::-1], iris.util.reverse(a, [1]))
self.assertRaises(ValueError, iris.util.reverse, a, [])
self.assertRaises(ValueError, iris.util.reverse, a, -1)
self.assertRaises(ValueError, iris.util.reverse, a, 10)
self.assertRaises(ValueError, iris.util.reverse, a, [-1])
self.assertRaises(ValueError, iris.util.reverse, a, [0, -1])
def test_single(self):
a = np.arange(36).reshape(3, 4, 3)
np.testing.assert_array_equal(a[::-1], iris.util.reverse(a, 0))
np.testing.assert_array_equal(a[::-1, ::-1], iris.util.reverse(a, [0, 1]))
np.testing.assert_array_equal(a[:, ::-1, ::-1], iris.util.reverse(a, [1, 2]))
np.testing.assert_array_equal(a[..., ::-1], iris.util.reverse(a, 2))
self.assertRaises(ValueError, iris.util.reverse, a, -1)
self.assertRaises(ValueError, iris.util.reverse, a, 10)
self.assertRaises(ValueError, iris.util.reverse, a, [-1])
self.assertRaises(ValueError, iris.util.reverse, a, [0, -1])
class TestClipString(unittest.TestCase):
def setUp(self):
self.test_string = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
self.rider = "**^^**$$..--__" # A good chance at being unique and not in the string to be tested!
def test_oversize_string(self):
# Test with a clip length that means the string will be clipped
clip_length = 109
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
# Check the length is between what we requested ( + rider length) and the length of the original string
self.assertTrue(clip_length + len(self.rider) <= len(result) < len(self.test_string), "String was not clipped.")
# Also test the rider was added
self.assertTrue(self.rider in result, "Rider was not added to the string when it should have been.")
def test_undersize_string(self):
# Test with a clip length that is longer than the string
clip_length = 10999
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
self.assertEqual(len(result), len(self.test_string), "String was clipped when it should not have been.")
# Also test that no rider was added on the end if the string was not clipped
self.assertFalse(self.rider in result, "Rider was adding to the string when it should not have been.")
def test_invalid_clip_lengths(self):
# Clip values less than or equal to zero are not valid
for clip_length in [0, -100]:
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
self.assertEqual(len(result), len(self.test_string), "String was clipped when it should not have been.")
def test_default_values(self):
# Get the default values specified in the function
argspec = inspect.getargspec(iris.util.clip_string)
arg_dict = dict(zip(argspec.args[-2:], argspec.defaults))
result = iris.util.clip_string(self.test_string, arg_dict["clip_length"], arg_dict["rider"])
self.assertLess(len(result), len(self.test_string), "String was not clipped.")
rider_returned = result[-len(arg_dict["rider"]):]
self.assertEqual(rider_returned, arg_dict['rider'],
'Default rider was not applied.')
def test_trim_string_with_no_spaces(self):
clip_length = 200
no_space_string = "a" * 500
# Since this string has no spaces, clip_string will not be able to gracefully clip it
# but will instead clip it exactly where the user specified
result = iris.util.clip_string(no_space_string, clip_length, self.rider)
expected_length = clip_length + len(self.rider)
# Check the length of the returned string is equal to clip length + length of rider
self.assertEqual(
len(result),
expected_length,
'Mismatch in expected length of clipped string. Length was %s, '
'expected value is %s' % (len(result), expected_length))
class TestDescribeDiff(iris.tests.IrisTest):
def test_identical(self):
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
return_sio = six.StringIO()
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_sio)
return_str = return_sio.getvalue()
self.assertString(return_str, 'compatible_cubes.str.txt')
def test_different(self):
# test incompatible attributes
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.attributes['Conventions'] = 'CF-1.5'
test_cube_b.attributes['Conventions'] = 'CF-1.6'
return_sio = six.StringIO()
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_sio)
return_str = return_sio.getvalue()
self.assertString(return_str, 'incompatible_attr.str.txt')
# test incompatible names
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.standard_name = "relative_humidity"
return_sio = six.StringIO()
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_sio)
return_str = return_sio.getvalue()
self.assertString(return_str, 'incompatible_name.str.txt')
# test incompatible unit
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.units = cf_units.Unit('m')
return_sio = six.StringIO()
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_sio)
return_str = return_sio.getvalue()
self.assertString(return_str, 'incompatible_unit.str.txt')
# test incompatible methods
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d().collapsed('model_level_number', iris.analysis.MEAN)
return_sio = six.StringIO()
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_sio)
return_str = return_sio.getvalue()
self.assertString(return_str, 'incompatible_meth.str.txt')
def test_output_file(self):
# test incompatible attributes
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d().collapsed('model_level_number', iris.analysis.MEAN)
test_cube_a.attributes['Conventions'] = 'CF-1.5'
test_cube_b.attributes['Conventions'] = 'CF-1.6'
test_cube_a.standard_name = "relative_humidity"
test_cube_a.units = cf_units.Unit('m')
with self.temp_filename() as filename:
with open(filename, 'w') as f:
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=f)
f.close()
self.assertFilesEqual(filename,
'incompatible_cubes.str.txt')
class TestAsCompatibleShape(tests.IrisTest):
def test_slice(self):
cube = tests.stock.realistic_4d()
sliced = cube[1, :, 2, :-2]
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced, cube)
self.assertEqual(res, expected)
def test_transpose(self):
cube = tests.stock.realistic_4d()
transposed = cube.copy()
transposed.transpose()
expected = cube
res = iris.util.as_compatible_shape(transposed, cube)
self.assertEqual(res, expected)
def test_slice_and_transpose(self):
cube = tests.stock.realistic_4d()
sliced_and_transposed = cube[1, :, 2, :-2]
sliced_and_transposed.transpose()
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced_and_transposed, cube)
self.assertEqual(res, expected)
def test_collapsed(self):
cube = tests.stock.realistic_4d()
collapsed = cube.collapsed('model_level_number', iris.analysis.MEAN)
expected_shape = list(cube.shape)
expected_shape[1] = 1
expected_data = collapsed.data.reshape(expected_shape)
res = iris.util.as_compatible_shape(collapsed, cube)
self.assertCML(res, ('util', 'as_compatible_shape_collapsed.cml'),
checksum=False)
self.assertMaskedArrayEqual(expected_data, res.data)
def test_reduce_dimensionality(self):
# Test that as_compatible_shape() can demote
# length one dimensions to scalars.
cube = tests.stock.realistic_4d()
src = cube[:, 2:3]
expected = reduced = cube[:, 2]
res = iris.util.as_compatible_shape(src, reduced)
self.assertEqual(res, expected)
def test_anonymous_dims(self):
cube = tests.stock.realistic_4d()
# Move all coords from dim_coords to aux_coords.
for coord in cube.dim_coords:
dim = cube.coord_dims(coord)
cube.remove_coord(coord)
cube.add_aux_coord(coord, dim)
sliced = cube[1, :, 2, :-2]
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced, cube)
self.assertEqual(res, expected)
def test_scalar_auxcoord(self):
def dim_to_aux(cube, coord_name):
"""Convert coordinate on cube from DimCoord to AuxCoord."""
coord = cube.coord(coord_name)
coord = iris.coords.AuxCoord.from_coord(coord)
cube.replace_coord(coord)
cube = tests.stock.realistic_4d()
src = cube[:, :, 3]
dim_to_aux(src, 'grid_latitude')
expected = cube[:, :, 3:4]
dim_to_aux(expected, 'grid_latitude')
res = iris.util.as_compatible_shape(src, cube)
self.assertEqual(res, expected)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.spark_sql_hook import SparkSqlHook
class SparkSqlOperator(BaseOperator):
"""
Execute Spark SQL query
:param sql: The SQL query to execute
:type sql: str
:param conf: arbitrary Spark configuration property
:type conf: str (format: PROP=VALUE)
:param conn_id: connection_id string
:type conn_id: str
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors (Default: all the available cores on the worker)
:type total_executor_cores: int
:param executor_cores: (Standalone & YARN only) Number of cores per executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param master: spark://host:port, mesos://host:port, yarn, or local
:type master: str
:param name: Name of the job
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param verbose: Whether to pass the verbose flag to spark-sql
:type verbose: bool
:param yarn_queue: The YARN queue to submit to (Default: "default")
:type yarn_queue: str
"""
template_fields = ["_sql"]
template_ext = [".sql", ".hql"]
@apply_defaults
def __init__(self,
sql,
conf=None,
conn_id='spark_sql_default',
total_executor_cores=None,
executor_cores=None,
executor_memory=None,
keytab=None,
master='yarn',
name='default-name',
num_executors=None,
yarn_queue='default',
*args,
**kwargs):
super(SparkSqlOperator, self).__init__(*args, **kwargs)
self._sql = sql
self._conf = conf
self._conn_id = conn_id
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._keytab = keytab
self._master = master
self._name = name
self._num_executors = num_executors
self._yarn_queue = yarn_queue
self._hook = None
def execute(self, context):
"""
Call the SparkSqlHook to run the provided sql query
"""
self._hook = SparkSqlHook(sql=self._sql,
conf=self._conf,
conn_id=self._conn_id,
total_executor_cores=self._total_executor_cores,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
keytab=self._keytab,
name=self._name,
num_executors=self._num_executors,
master=self._master,
yarn_queue=self._yarn_queue
)
self._hook.run_query()
def on_kill(self):
self._hook.kill() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import bisect
import QtCore
import rospy
import rospkg
from python_qt_binding.QtCore import Qt, QObject, QAbstractItemModel
from vigir_pluginlib_msgs.msg import PluginState, PluginDescription
# Tree Model for Plugins
class PluginTreeModel(QtCore.QAbstractItemModel):
def __init__(self, parent=None, *args):
super(PluginTreeModel, self).__init__(parent)
self._root_item = PluginTreeItem()
self._plugin_states = []
def clear(self):
self._root_item.clear()
def columnCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self._root_item.columnCount()
def rowCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return parent.internalPointer().childCount()
else:
return self._root_item.childCount()
def headerData(self, section, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
if section == 0:
return 'Class'
elif section == 1:
return 'Name'
return None
def insertRows(self, row, count, parent=QtCore.QModelIndex()):
parent_item = self.getItem(parent)
self.beginInsertRows(parent, row, row + count - 1)
success = parent_item.insertChildren(row, count)
self.endInsertRows()
return success
def removeRows(self, row, count, parent=QtCore.QModelIndex()):
parent_item = self.getItem(parent)
self.beginRemoveRows(parent, row, row + count - 1)
success = parent_item.removeChildren(row, count)
self.endRemoveRows()
# remove empty branch
if success and parent_item.parentItem() and not parent_item.childCount():
return self.removeRows(parent_item.childNumber(), 1)
return success
def addBranch(self, base_class):
# check if branch already does exist
branch = self.findBranch(base_class)
if branch.isValid():
return branch
state = PluginState()
state.description.base_class = base_class
# add new branch
temp_list = [child.getPluginState().description.base_class for child in self._root_item.childs()]
position = bisect.bisect(temp_list, base_class)
if self.insertRows(position, 1):
branch_item = self._root_item.child(position)
branch_item.setData(state)
return self.index(position, 0)
return QtCore.QModelIndex()
def addItem(self, state):
# search for branch with type_class
branch = self.addBranch(state.description.base_class)
branch_item = self.getItem(branch)
# check if child already does exist
child = self.findChild(state.description, branch)
if child.isValid():
return child
# add new item to branch
entry = (state.description.type_class, state.description.name)
temp_list = [(child.getPluginState().description.type_class, child.getPluginState().description.name) for child in branch_item.childs()]
position = bisect.bisect(temp_list, entry)
if self.insertRows(position, 1, branch):
child_item = branch_item.child(position)
child_item.setData(state)
return self.index(position, 0, branch)
return QtCore.QModelIndex()
def getItem(self, index=QtCore.QModelIndex()):
if index.isValid():
return index.internalPointer()
else:
return self._root_item
def setData(self, data):
self.clear()
self.updateData(data)
def updateData(self, data):
# update empty entries to keep UI tidy
for state in data:
if not state.description.base_class:
state.description.base_class = 'Unknown'
if not state.description.type_class:
state.description.type_class = 'Unknown'
# collect entries which does not exist in recent update anymore
rows = []
for branch_item in self._root_item.childs():
branch_index = self.index(branch_item.childNumber(), 0)
for child_item in branch_item.childs():
result = filter(lambda state: state.description == child_item.getPluginState().description, data)
if not result:
rows.append((child_item.childNumber(), branch_index))
# remove obsolete entries, TODO: check for branch!
rows.sort(reverse=True)
for row in rows:
self.removeRows(row[0], 1, row[1])
# adding entries
for state in data:
# adding new entries
self.addItem(state)
def data(self, index, role):
if not index.isValid():
return None
elif role == QtCore.Qt.DisplayRole:
return index.internalPointer().data(index.column())
else:
return None
def flags(self, index=QtCore.QModelIndex()):
if index.isValid():
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | super(PluginTreeModel, self).flags(index) #QtCore.Qt.ItemIsEditable
else:
return QtCore.Qt.NoItemFlags
def index(self, row, column, parent=QtCore.QModelIndex()):
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if parent.isValid() and parent.column() != 0:
return QtCore.QModelIndex()
parent_item = self.getItem(parent)
child_item = parent_item.child(row)
if child_item:
return self.createIndex(row, column, child_item)
else:
return QtCore.QModelIndex()
def parent(self, index=QtCore.QModelIndex()):
if not index.isValid():
return QtCore.QModelIndex()
child_item = self.getItem(index)
parent_item = child_item.parentItem()
if parent_item == self._root_item:
return QtCore.QModelIndex()
return self.createIndex(parent_item.childNumber(), 0, parent_item)
def findChild(self, description, parent=QtCore.QModelIndex()):
parent_item = self.getItem(parent)
if parent_item == self._root_item:
parent = self.findBranch(description.base_class, parent)
if not parent.isValid():
return QtCore.QModelIndex()
child_item = parent_item.findChild(description)
if child_item:
return self.index(child_item.childNumber(), 0, parent)
else:
return QtCore.QModelIndex()
def findBranch(self, base_class, parent=QtCore.QModelIndex()):
parent_item = self.getItem(parent)
child_item = parent_item.findBranch(base_class)
if child_item:
return self.index(child_item.childNumber(), 0, parent)
else:
return QtCore.QModelIndex()
def expandChildren(self, index, view):
if not index.isValid():
return
for i in range(0, index.model().rowCount(index)):
child = index.child(i, 0)
self.expandChildren(child, view)
if not view.expanded(index):
view.expand(index)
def expandAll(self):
self.expandChildren(self.createIndex(0, 0, self._root_item))
# Tree Item for Plugins
class PluginTreeItem:
def __init__(self, state=PluginState(), parent=None):
self._parent_item = parent
self._child_items = []
self._plugin_state = PluginState()
self.setData(state)
def clear(self):
for child in self._child_items:
child.clear()
self._child_items = []
self._plugin_state = PluginState()
def child(self, row):
if row < self.childCount():
return self._child_items[row]
else:
return None
def childs(self):
return self._child_items
def childCount(self):
return len(self._child_items)
def childNumber(self):
if self._parent_item is not None:
return self._parent_item._child_items.index(self)
return 0
def insertChildren(self, position, count):
if position < 0 or position > self.childCount():
return False
for row in range(0, count):
self._child_items.insert(position, PluginTreeItem(parent=self))
return True
def removeChildren(self, position, count):
if position < 0 or position > self.childCount():
return False
del self._child_items[position:position+count]
return True
def columnCount(self):
return 2
def data(self, column):
if column == 0:
if self.childCount() > 0:
return self._plugin_state.description.base_class
else:
return self._plugin_state.description.type_class
elif column == 1:
if self.childCount() > 0:
return ""
else:
return self._plugin_state.description.name
else:
return None
def setData(self, state):
self._plugin_state = state
def getPluginState(self):
return self._plugin_state
def findChild(self, description):
child_item = filter(lambda child: child.getPluginState().description == description, self._child_items)
if not child_item:
return None
else:
return child_item[0]
def findBranch(self, base_class):
branch_item = filter(lambda child: child.getPluginState().description.base_class == base_class, self._child_items)
if not branch_item:
return None
else:
return branch_item[0]
def parentItem(self):
return self._parent_item | unknown | codeparrot/codeparrot-clean | ||
#include "snowball_runtime.h"
#ifdef SNOWBALL_RUNTIME_THROW_EXCEPTIONS
# include <new>
# include <stdexcept>
# define SNOWBALL_RETURN_OK return
# define SNOWBALL_RETURN_OR_THROW(R, E) throw E
# define SNOWBALL_PROPAGATE_ERR(F) F
#else
# define SNOWBALL_RETURN_OK return 0
# define SNOWBALL_RETURN_OR_THROW(R, E) return R
# define SNOWBALL_PROPAGATE_ERR(F) do { \
int snowball_err = F; \
if (snowball_err < 0) return snowball_err; \
} while (0)
#endif
#define CREATE_SIZE 1
extern symbol * create_s(void) {
symbol * p;
void * mem = malloc(HEAD + (CREATE_SIZE + 1) * sizeof(symbol));
if (mem == NULL)
SNOWBALL_RETURN_OR_THROW(NULL, std::bad_alloc());
p = (symbol *) (HEAD + (char *) mem);
CAPACITY(p) = CREATE_SIZE;
SET_SIZE(p, 0);
return p;
}
extern void lose_s(symbol * p) {
if (p == NULL) return;
free((char *) p - HEAD);
}
/*
new_p = skip_utf8(p, c, l, n); skips n characters forwards from p + c.
new_p is the new position, or -1 on failure.
-- used to implement hop and next in the utf8 case.
*/
extern int skip_utf8(const symbol * p, int c, int limit, int n) {
int b;
if (n < 0) return -1;
for (; n > 0; n--) {
if (c >= limit) return -1;
b = p[c++];
if (b >= 0xC0) { /* 1100 0000 */
while (c < limit) {
b = p[c];
if (b >= 0xC0 || b < 0x80) break;
/* break unless b is 10------ */
c++;
}
}
}
return c;
}
/*
new_p = skip_b_utf8(p, c, lb, n); skips n characters backwards from p + c - 1
new_p is the new position, or -1 on failure.
-- used to implement hop and next in the utf8 case.
*/
extern int skip_b_utf8(const symbol * p, int c, int limit, int n) {
int b;
if (n < 0) return -1;
for (; n > 0; n--) {
if (c <= limit) return -1;
b = p[--c];
if (b >= 0x80) { /* 1000 0000 */
while (c > limit) {
b = p[c];
if (b >= 0xC0) break; /* 1100 0000 */
c--;
}
}
}
return c;
}
/* Code for character groupings: utf8 cases */
static int get_utf8(const symbol * p, int c, int l, int * slot) {
int b0, b1, b2;
if (c >= l) return 0;
b0 = p[c++];
if (b0 < 0xC0 || c == l) { /* 1100 0000 */
*slot = b0;
return 1;
}
b1 = p[c++] & 0x3F;
if (b0 < 0xE0 || c == l) { /* 1110 0000 */
*slot = (b0 & 0x1F) << 6 | b1;
return 2;
}
b2 = p[c++] & 0x3F;
if (b0 < 0xF0 || c == l) { /* 1111 0000 */
*slot = (b0 & 0xF) << 12 | b1 << 6 | b2;
return 3;
}
*slot = (b0 & 0x7) << 18 | b1 << 12 | b2 << 6 | (p[c] & 0x3F);
return 4;
}
static int get_b_utf8(const symbol * p, int c, int lb, int * slot) {
int a, b;
if (c <= lb) return 0;
b = p[--c];
if (b < 0x80 || c == lb) { /* 1000 0000 */
*slot = b;
return 1;
}
a = b & 0x3F;
b = p[--c];
if (b >= 0xC0 || c == lb) { /* 1100 0000 */
*slot = (b & 0x1F) << 6 | a;
return 2;
}
a |= (b & 0x3F) << 6;
b = p[--c];
if (b >= 0xE0 || c == lb) { /* 1110 0000 */
*slot = (b & 0xF) << 12 | a;
return 3;
}
*slot = (p[--c] & 0x7) << 18 | (b & 0x3F) << 12 | a;
return 4;
}
extern int in_grouping_U(struct SN_env * z, const unsigned char * s, int min, int max, int repeat) {
do {
int ch;
int w = get_utf8(z->p, z->c, z->l, & ch);
if (!w) return -1;
if (ch > max || (ch -= min) < 0 || (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0)
return w;
z->c += w;
} while (repeat);
return 0;
}
extern int in_grouping_b_U(struct SN_env * z, const unsigned char * s, int min, int max, int repeat) {
do {
int ch;
int w = get_b_utf8(z->p, z->c, z->lb, & ch);
if (!w) return -1;
if (ch > max || (ch -= min) < 0 || (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0)
return w;
z->c -= w;
} while (repeat);
return 0;
}
extern int out_grouping_U(struct SN_env * z, const unsigned char * s, int min, int max, int repeat) {
do {
int ch;
int w = get_utf8(z->p, z->c, z->l, & ch);
if (!w) return -1;
if (!(ch > max || (ch -= min) < 0 || (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0))
return w;
z->c += w;
} while (repeat);
return 0;
}
extern int out_grouping_b_U(struct SN_env * z, const unsigned char * s, int min, int max, int repeat) {
do {
int ch;
int w = get_b_utf8(z->p, z->c, z->lb, & ch);
if (!w) return -1;
if (!(ch > max || (ch -= min) < 0 || (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0))
return w;
z->c -= w;
} while (repeat);
return 0;
}
/* Code for character groupings: non-utf8 cases */
extern int in_grouping(struct SN_env * z, const unsigned char * s, int min, int max, int repeat) {
do {
int ch;
if (z->c >= z->l) return -1;
ch = z->p[z->c];
if (ch > max || (ch -= min) < 0 || (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0)
return 1;
z->c++;
} while (repeat);
return 0;
}
extern int in_grouping_b(struct SN_env * z, const unsigned char * s, int min, int max, int repeat) {
do {
int ch;
if (z->c <= z->lb) return -1;
ch = z->p[z->c - 1];
if (ch > max || (ch -= min) < 0 || (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0)
return 1;
z->c--;
} while (repeat);
return 0;
}
extern int out_grouping(struct SN_env * z, const unsigned char * s, int min, int max, int repeat) {
do {
int ch;
if (z->c >= z->l) return -1;
ch = z->p[z->c];
if (!(ch > max || (ch -= min) < 0 || (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0))
return 1;
z->c++;
} while (repeat);
return 0;
}
extern int out_grouping_b(struct SN_env * z, const unsigned char * s, int min, int max, int repeat) {
do {
int ch;
if (z->c <= z->lb) return -1;
ch = z->p[z->c - 1];
if (!(ch > max || (ch -= min) < 0 || (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0))
return 1;
z->c--;
} while (repeat);
return 0;
}
extern int eq_s(struct SN_env * z, int s_size, const symbol * s) {
if (z->l - z->c < s_size || memcmp(z->p + z->c, s, s_size * sizeof(symbol)) != 0) return 0;
z->c += s_size; return 1;
}
extern int eq_s_b(struct SN_env * z, int s_size, const symbol * s) {
if (z->c - z->lb < s_size || memcmp(z->p + z->c - s_size, s, s_size * sizeof(symbol)) != 0) return 0;
z->c -= s_size; return 1;
}
extern int eq_v(struct SN_env * z, const symbol * p) {
return eq_s(z, SIZE(p), p);
}
extern int eq_v_b(struct SN_env * z, const symbol * p) {
return eq_s_b(z, SIZE(p), p);
}
extern int find_among(struct SN_env * z, const struct among * v, int v_size,
int (*call_among_func)(struct SN_env*)) {
int i = 0;
int j = v_size;
int c = z->c; int l = z->l;
const symbol * q = z->p + c;
const struct among * w;
int common_i = 0;
int common_j = 0;
int first_key_inspected = 0;
while (1) {
int k = i + ((j - i) >> 1);
int diff = 0;
int common = common_i < common_j ? common_i : common_j; /* smaller */
w = v + k;
{
int i2; for (i2 = common; i2 < w->s_size; i2++) {
if (c + common == l) { diff = -1; break; }
diff = q[common] - w->s[i2];
if (diff != 0) break;
common++;
}
}
if (diff < 0) {
j = k;
common_j = common;
} else {
i = k;
common_i = common;
}
if (j - i <= 1) {
if (i > 0) break; /* v->s has been inspected */
if (j == i) break; /* only one item in v */
/* - but now we need to go round once more to get
v->s inspected. This looks messy, but is actually
the optimal approach. */
if (first_key_inspected) break;
first_key_inspected = 1;
}
}
w = v + i;
while (1) {
if (common_i >= w->s_size) {
z->c = c + w->s_size;
if (!w->function) return w->result;
z->af = w->function;
if (call_among_func(z)) {
z->c = c + w->s_size;
return w->result;
}
}
if (!w->substring_i) return 0;
w += w->substring_i;
}
}
/* find_among_b is for backwards processing. Same comments apply */
extern int find_among_b(struct SN_env * z, const struct among * v, int v_size,
int (*call_among_func)(struct SN_env*)) {
int i = 0;
int j = v_size;
int c = z->c; int lb = z->lb;
const symbol * q = z->p + c - 1;
const struct among * w;
int common_i = 0;
int common_j = 0;
int first_key_inspected = 0;
while (1) {
int k = i + ((j - i) >> 1);
int diff = 0;
int common = common_i < common_j ? common_i : common_j;
w = v + k;
{
int i2; for (i2 = w->s_size - 1 - common; i2 >= 0; i2--) {
if (c - common == lb) { diff = -1; break; }
diff = q[- common] - w->s[i2];
if (diff != 0) break;
common++;
}
}
if (diff < 0) { j = k; common_j = common; }
else { i = k; common_i = common; }
if (j - i <= 1) {
if (i > 0) break;
if (j == i) break;
if (first_key_inspected) break;
first_key_inspected = 1;
}
}
w = v + i;
while (1) {
if (common_i >= w->s_size) {
z->c = c - w->s_size;
if (!w->function) return w->result;
z->af = w->function;
if (call_among_func(z)) {
z->c = c - w->s_size;
return w->result;
}
}
if (!w->substring_i) return 0;
w += w->substring_i;
}
}
/* Increase the size of the buffer pointed to by p to at least n symbols.
* On success, returns 0. If insufficient memory, returns -1.
*/
static int increase_size(symbol ** p, int n) {
int new_size = n + 20;
void * mem = realloc((char *) *p - HEAD,
HEAD + (new_size + 1) * sizeof(symbol));
symbol * q;
if (mem == NULL) return -1;
q = (symbol *) (HEAD + (char *)mem);
CAPACITY(q) = new_size;
*p = q;
return 0;
}
/* to replace symbols between c_bra and c_ket in z->p by the
s_size symbols at s.
Returns 0 on success, -1 on error.
*/
extern SNOWBALL_ERR replace_s(struct SN_env * z, int c_bra, int c_ket, int s_size, const symbol * s)
{
int adjustment = s_size - (c_ket - c_bra);
if (adjustment != 0) {
int len = SIZE(z->p);
if (adjustment + len > CAPACITY(z->p)) {
SNOWBALL_PROPAGATE_ERR(increase_size(&z->p, adjustment + len));
}
memmove(z->p + c_ket + adjustment,
z->p + c_ket,
(len - c_ket) * sizeof(symbol));
SET_SIZE(z->p, adjustment + len);
z->l += adjustment;
if (z->c >= c_ket)
z->c += adjustment;
else if (z->c > c_bra)
z->c = c_bra;
}
if (s_size) memmove(z->p + c_bra, s, s_size * sizeof(symbol));
SNOWBALL_RETURN_OK;
}
# define REPLACE_S(Z, B, K, SIZE, S) \
SNOWBALL_PROPAGATE_ERR(replace_s(Z, B, K, SIZE, S))
static SNOWBALL_ERR slice_check(struct SN_env * z) {
if (z->bra < 0 ||
z->bra > z->ket ||
z->ket > z->l ||
z->l > SIZE(z->p)) /* this line could be removed */
{
#if 0
fprintf(stderr, "faulty slice operation:\n");
debug(z, -1, 0);
#endif
SNOWBALL_RETURN_OR_THROW(-1, std::logic_error("Snowball slice invalid"));
}
SNOWBALL_RETURN_OK;
}
# define SLICE_CHECK(Z) SNOWBALL_PROPAGATE_ERR(slice_check(Z))
extern SNOWBALL_ERR slice_from_s(struct SN_env * z, int s_size, const symbol * s) {
SLICE_CHECK(z);
REPLACE_S(z, z->bra, z->ket, s_size, s);
z->ket = z->bra + s_size;
SNOWBALL_RETURN_OK;
}
extern SNOWBALL_ERR slice_from_v(struct SN_env * z, const symbol * p) {
return slice_from_s(z, SIZE(p), p);
}
extern SNOWBALL_ERR slice_del(struct SN_env * z) {
SLICE_CHECK(z);
{
int slice_size = z->ket - z->bra;
if (slice_size != 0) {
int len = SIZE(z->p);
memmove(z->p + z->bra,
z->p + z->ket,
(len - z->ket) * sizeof(symbol));
SET_SIZE(z->p, len - slice_size);
z->l -= slice_size;
if (z->c >= z->ket)
z->c -= slice_size;
else if (z->c > z->bra)
z->c = z->bra;
}
}
z->ket = z->bra;
SNOWBALL_RETURN_OK;
}
extern SNOWBALL_ERR insert_s(struct SN_env * z, int bra, int ket, int s_size, const symbol * s) {
REPLACE_S(z, bra, ket, s_size, s);
if (bra <= z->ket) {
int adjustment = s_size - (ket - bra);
z->ket += adjustment;
if (bra <= z->bra) z->bra += adjustment;
}
SNOWBALL_RETURN_OK;
}
extern SNOWBALL_ERR insert_v(struct SN_env * z, int bra, int ket, const symbol * p) {
return insert_s(z, bra, ket, SIZE(p), p);
}
extern SNOWBALL_ERR slice_to(struct SN_env * z, symbol ** p) {
SLICE_CHECK(z);
{
int len = z->ket - z->bra;
if (CAPACITY(*p) < len) {
SNOWBALL_PROPAGATE_ERR(increase_size(p, len));
}
memmove(*p, z->p + z->bra, len * sizeof(symbol));
SET_SIZE(*p, len);
}
SNOWBALL_RETURN_OK;
}
extern SNOWBALL_ERR assign_to(struct SN_env * z, symbol ** p) {
int len = z->l;
if (CAPACITY(*p) < len) {
SNOWBALL_PROPAGATE_ERR(increase_size(p, len));
}
memmove(*p, z->p, len * sizeof(symbol));
SET_SIZE(*p, len);
SNOWBALL_RETURN_OK;
}
extern int len_utf8(const symbol * p) {
int size = SIZE(p);
int len = 0;
while (size--) {
symbol b = *p++;
if (b >= 0xC0 || b < 0x80) ++len;
}
return len;
} | c | github | https://github.com/postgres/postgres | src/backend/snowball/libstemmer/utilities.c |
# Copyright 2015, Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from . import frontend
class FakeTime(object):
"""Fake implementations of GetUserCpuTime, GetUserCpuTime and BusyWait.
Each call to BusyWait advances both the cpu and the wall clocks by fixed
intervals (cpu_time_step and wall_time_step, respectively). This can be
used to simulate arbitrary fraction of CPU time available to the process.
"""
def __init__(self, cpu_time_step=1.0, wall_time_step=1.0):
self.cpu_time = 0.0
self.wall_time = 0.0
self.cpu_time_step = cpu_time_step
self.wall_time_step = wall_time_step
def get_walltime(self):
return self.wall_time
def get_user_cputime(self):
return self.cpu_time
def busy_wait(self):
self.wall_time += self.wall_time_step
self.cpu_time += self.cpu_time_step
class TestHandlers(unittest.TestCase):
def setUp(self):
self.fake_time = FakeTime()
self.cpu_burner = frontend.CpuBurner()
self.cpu_burner.get_user_cputime = self.fake_time.get_user_cputime
self.cpu_burner.get_walltime = self.fake_time.get_walltime
self.cpu_burner.busy_wait = self.fake_time.busy_wait
# In this test scenario CPU time advances at 25% of the wall time speed.
# Given the request requires 1 CPU core second, we expect it to finish
# within the timeout (5 seconds) and return success.
def test_ok_response(self):
self.fake_time.cpu_time_step = 0.25
(code, _) = self.cpu_burner.handle_http_request()
self.assertEqual(200, code)
# In this test scenario CPU time advances at 15% of the wall time speed.
# Given the request requires 1 CPU core second, we expect it to timeout
# after 5 simulated wall time seconds and return error 500.
def test_timeout(self):
self.fake_time.cpu_time_step = 0.15
(code, _) = self.cpu_burner.handle_http_request()
self.assertEqual(500, code)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
### CAPOW (calculation and plotting of optimal weights) ###
# NOTES: #
# Written using pyqt (4.10.1), matplotlib (2.0.2), numpy (1.12.1), scipy (0.19.0), python (2.7.5)
# requires window_tab.py (gui) and shelx_weighting.py (code to run weighting minimization)
# input: .fcf, SHELX LIST 4 or 8, (with .cif and .ins) or .fco, XD2016,(with .cif and .mas)
##Known issues:
# "RuntimeWarning: PyOS_InputHook is not available for interactive use of PyGTK" - problem is when importing pyplot from matplotlib. Works using matplotlib (2.0.2).
## To report errors/give feedback:
## - N.Johnson5@ncl.ac.uk or Michael.Probert@ncl.ac.uk
import sys, os
## Check that matplotlib, scipy, numpy, pyqt4 are all installed on computer ##
run = True #Flag, will change to false if module versions do not match.
try:
from matplotlib import __version__ as mp_u_vers #getting version of matplotlib on user computer
except ImportError:
print "Program Exiting: Matplotlib module is needed to run this code. Please install and try again."
sys.exit()
try:
from scipy import __version__ as s_u_vers #getting version of scipyon user computer
except ImportError:
print "Program Exiting: Scipy module is needed to run this code. Please install and try again."
sys.exit()
try:
from numpy import __version__ as n_u_vers #getting version of numpy on user computer
except ImportError:
print "Program Exiting: Numpy module is needed to run this code. Please install and try again."
sys.exit()
try:
from PyQt4.Qt import PYQT_VERSION_STR as pq_u_vers # getting version of pyqt4, method from pyqt website, on user computer
except ImportError:
print "Program Exiting: PyQt module is needed to run this code. Please install and try again."
sys.exit()
py_u_vers = sys.version.split()[0] #getting version of python on user computer, prints long string with version number at start
## Check versions of required modules ##
def bruce(w_vers, u_vers):
"""function to return whether user version is higher, lower, or same as written version"""
if u_vers > w_vers:
return "H"
elif w_vers == u_vers:
return "S"
else:
return "L"
def compare_versions(written_vers, user_vers):
"""function to compare tested vs user versions of python/packages"""
w_vers_list = written_vers.split(".")
u_vers_list = user_vers.split(".")
list_0 = bruce(w_vers_list[0],u_vers_list[0])
if list_0 == "L":
return "L"
elif list_0 == "H":
return "H"
else:
list_1 = bruce(w_vers_list[1],u_vers_list[1])
if list_1 == "L":
return "L"
elif list_1 == "H":
return "H"
else:
list_2 = bruce(w_vers_list[2],u_vers_list[2])
if list_2 == "L":
return "L"
elif list_2 == "H":
return "H"
else:
return list_2, "should not happen"
## version of python/package code, program written with
mp_w_vers = "2.0.2" #matplotlib
s_w_vers = "0.19.0" #scipy
n_w_vers = "1.12.1" #numpy
pq_w_vers = "4.10.1" #pyqt4
py_w_vers = "2.7.5" #python
## Checking if module versions are the same as what the code was written with ##
check_packages = ["", "", "", "", ""] #for use later when checking whether
if mp_u_vers != mp_w_vers :
check_packages[0] = compare_versions(mp_w_vers, mp_u_vers)
if check_packages[0] == "L":
print "Program Exiting: The code was written and tested with matplotlib (%s). Your version of matplotlib is (%s). Using an earlier version of matplotlib will cause the program to break." %(mp_w_vers, mp_u_vers)
run = False
else:
check_packages[0] = "S"
if s_u_vers != s_w_vers:
run = False
check_packages[1] = compare_versions(s_w_vers,s_u_vers)
if check_packages[1] == "L":
print "Program Exiting: The code was written and tested with scipy (%s). Your version of scipy is (%s). Using an earlier version of scipy may cause the program to break. " %(s_w_vers, s_u_vers)
else:
check_packages[1] = "S"
if n_u_vers != n_w_vers:
run = False
check_packages[2] = compare_versions(n_w_vers,n_u_vers)
if check_packages[2] == "L":
print "Program Exiting: The code was written and tested with numpy (%s). Your version of numpy is (%s). Using an earlier version of numpy may cause the program to break." %(n_w_vers, np_u_vers)
else:
check_packages[2] = "S"
if pq_u_vers != pq_w_vers:
run = False
check_packages[3] = compare_versions(pq_w_vers,pq_u_vers)
if check_packages[3] == "L":
print "Program Exiting: The code was written and tested with PyQt4 (%s). Your version of PyQt4 is (%s). Using an earlier version of PyQt4 may cause the program to break." % (pq_w_vers, pq_u_vers)
else:
check_packages[3] = "S"
if py_u_vers != py_w_vers:
run = False
check_packages[4] = compare_versions(py_w_vers,py_u_vers)
if check_packages[4] == "L":
print "Program Exiting: The code was written and tested with python (%s). Your version of python is (%s). Using an earlier version of python may cause the program to break." %(py_w_vers, py_u_vers)
else:
check_packages[4] = "S"
## If modules are not the same, check if user still wants to run code ###
if run == False:
for i in check_packages:
if i == "L":
print "Please update python/packages and try again."
sys.exit()
## code should only be running here if there are "H" in list.
#assumes that code is still compatible with later versions of the packages
print "Warning: CAPOW was written with : \n python: %s \n matplotlib: %s \n scipy: %s \n numpy: %s \n PyQt4: %s" % (py_w_vers, mp_w_vers, s_w_vers, n_w_vers, pq_w_vers)
print "This computer is using : \n python: %s \n matplotlib: %s \n scipy: %s \n numpy: %s \n PyQt4: %s"% (py_u_vers, mp_u_vers, s_u_vers, n_u_vers, pq_u_vers)
print "There may be issues with incompatibilities of functions."
#sys.exit()
else:
pass
print "Running CAPOW"
import math
from copy import deepcopy
from PyQt4.uic import loadUiType
from PyQt4 import QtCore, QtGui
try:
from window_tab import Ui_MainWindow
except IOError:
print "Program exiting: window-tab.py file is not present in this folder. Please download a copy and try again."
sys.exit()
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
from matplotlib import cm
import matplotlib.pyplot as plt #this might break with older versions of matplotlib giving a runtime error.
import numpy as np
import scipy.stats as stats
try:
from shelx_weighting import shelx_weighting_calc
except ImportError:
print "Program exiting: shelx_weighting.py is not present in this folder. Please download a copy and try again."
sys.exit()
###
class Main(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self,scale_factor):
super(Main, self).__init__()
self.setupUi(self)
self.qlist_item = QtGui.QListWidgetItem(self.mpl_figs)
self.tab2_info = QtGui.QListWidgetItem(self.tab2_infobox)
self.scale_factor = scale_factor
self.set_up() #sets up gui and matplotlib widget
self.file_selected = False #Flag incase user trys to run weighting scheme code before a file has been selected, if False error message will show.
def set_up(self):
"""Function to connect all the buttons and set up the table headers for the widget"""
#connecting buttons and functions
self.apply_button.clicked.connect(self.recalc_fig)
self.calc_weight_button.clicked.connect(self.calculate_weighting_scheme)
self.copy_weights.clicked.connect(self.send_weights)
self.drk_button.clicked.connect(self.drk_lim)
self.clear_button.clicked.connect(self.clr_lim)
self.clearweights.clicked.connect(self.clear_weights)
self.actionSelect_File.triggered.connect(self.select_file)
# adding equality signs into filter tables
self.i_filt_eq.insertItem(0,">")
self.i_filt_eq.insertItem(1,"<")
self.i_sig_filt_eq.insertItem(0,">")
self.i_sig_filt_eq.insertItem(1,"<")
self.i_filt_eq_2.insertItem(0,">")
self.i_filt_eq_2.insertItem(1,"<")
self.i_sig_filt_eq_2.insertItem(0,">")
self.i_sig_filt_eq_2.insertItem(1,"<")
self.resoln_upper_eq.insertItem(0,"<") #swapped this round to make it read nicer. Resoln < 1 (so will keep all values of resolution that are less than 1.)
self.resoln_upper_eq_2.insertItem(0,"<") #swapped
self.resoln_lower_eq_2.insertItem(0,">") #swapped
self.resoln_lower_eq.insertItem(0,">") #swapped
self.filt_eq = ">"
self.sig_filt_eq = ">"
#naming header for weighting scheme tab table
headers = ["a","b","goof","wr2","stop a","stop b","I cutoff","Isig cutoff","resoln <","resoln >","no of reflns","start a","start b","bin","bin variance"]
self.tablemodel = QtGui.QStandardItemModel() #internet says to make it a model to work
self.tablemodel.setHorizontalHeaderLabels(headers)
self.weighting_tableview.setModel(self.tablemodel)
#sets up matplotlib widget
self.fig1 = plt.figure()
self.ax1f1 = self.fig1.add_subplot(1,1,1)
self.ax1f1.plot([-4,4],[-4,4],'--',color='r') #plotting expected plot line
self.canvas = FigureCanvas(self.fig1)
self.mplvl.addWidget(self.canvas)
self.canvas.draw()
self.toolbar = NavigationToolbar(self.canvas,self.mplwindow, coordinates=True)
self.mplvl.addWidget(self.toolbar)
def select_file(self):
"""Allows user to select a file to use and then creates normal probability plot from the data"""
dlg = QtGui.QFileDialog()
marker = os.getcwd()
plt.cla() #clear matplotlib
file_name = QtGui.QFileDialog.getOpenFileName(self, 'Select structure factor file', marker,"fcf or fco files (*.fcf *.fco)")
#file_name = QtGui.QFileDialog.getOpenFileName(self, 'Select structure factor file', '/home/',"fcf or fco files (*.fcf *.fco)") #change '/home/' to change the folder the code opens first.
file_name = str(file_name)
self.code_run = True #flag to stop weighting scheme code from running. Used later
if os.path.isfile(file_name) == False: #if user did not select a file
self.qlist_item.setText("Error: File has not been selected")
else:
self.i_file = file_name
#self.addmpl()
self.calc_weight_button.setEnabled(True)
self.file_selected = True #Flag incase user trys to run weighting scheme code before a file has been selected
### Reset items from a potential previous file
self.lamda = 0.0
self.shelx_a = 0.0
self.shelx_b = 0.0
self.mas_a = 0.0
self.mas_b = 0.0
self.mas_c = 0.0
self.mas_d = 0.0
self.mas_e = 0.0
self.mas_f = 0.3333333333
self.n_independant_params = ""
### reset fields in normal probability plot tab ####
i_filt =self.i_filt.setText("")
i_sig_filt = self.i_sig_filt.setText("")
resoln_upper_filt = self.resoln_upper_filt.setText("")
resoln_lower_filt = self.resoln_lower_filt.setText("")
self.clr_lim()
self.tab2_info.setText("")
self.qlist_item.setText("")
### reset fields in weighting calculator tab ###
self.clear_weights()
self.resoln_upper_filt_2.setText("")
self.resoln_lower_filt_2.setText("")
self.i_filt_2.setText("")
self.i_sig_filt_2.setText("")
self.i_filt_2.setText("")
self.i_sig_filt_2.setText("")
self.a_stop.setText("")
self.b_stop.setText("")
self.a_start.setText("") #need to check for float
self.b_start.setText("") #need to check for float
self.i_filt_eq.setCurrentIndex(0)
self.i_sig_filt_eq.setCurrentIndex(0)
#need shelx to be checked, and weighting binning, I to be checked.
### need cif file to get number of independant parameters
file_loc_list = os.path.split(file_name) #separating file name from folders: using os for file functions so is useable on other operating systems
self.calculate_start_check.setChecked(True)
self.bintype_intensity.setChecked(True)
self.bintype_resolution.setChecked(False)
self.i_filt_eq_2.setCurrentIndex(0)
self.i_sig_filt_eq_2.setCurrentIndex(0)
marker = file_loc_list[0] #marker to be used when opening file dialog to select file, opens in folder that fco/fcf file was selected from
### code will look for and select a cif file with the same starting name as input file, if no file, will open a file dialog to select one ###
no_cif = False
if file_name[-3:] == "fco":
try:
potential_lsm_file_lst = os.path.split(file_name)
new_file_name = potential_lsm_file_lst[1][0:-4] + "_lsm.cif"#xd_lsm.cif made during xdlsm refinement
potential_lsm_file = os.path.join(potential_lsm_file_lst[0], new_file_name)
open(potential_lsm_file,"r")
cif_file = potential_lsm_file
except IOError:
cif_file = QtGui.QFileDialog.getOpenFileName(self, 'Select cif file', marker, "cif file (*.cif)")
if os.path.isfile(str(cif_file)) == False: #if user did not select a file
self.qlist_item.setText("Error: Cif file has not been selected")
no_cif = True
else:
try:
potential_cif_file_lst = os.path.split(file_name)
new_cif_file = potential_cif_file_lst[1][0:-3]+ "cif"
potential_cif_file = os.path.join(potential_cif_file_lst[0], new_cif_file)
open(potential_cif_file,"r")
cif_file = potential_cif_file
except IOError:
cif_file = QtGui.QFileDialog.getOpenFileName(self, 'Select cif file', marker, "cif file (*.cif)")
if os.path.isfile(str(cif_file)) == False: #if user did not select a file
self.qlist_item.setText("Error: Cif file has not been selected")
no_cif = True
if no_cif == True:
pass
else:
for line in open(cif_file,"r").readlines():
if line.startswith("_refine_ls_number_parameters"):
n_params = line.split()[1]
self.n_independant_params = float(n_params)
if self.n_independant_params == "":
self.cif_info = False #error capture for later
self.qlist_item.setText("Number of independant parameters is not present in cif file. Program has stopped.")
else:
self.cif_info = True
self.addmpl()
def addmpl(self):
"""gets values to create normal probability plot"""
run_flag = self.get_graph_values(self.i_file) #STOP/GO depending on if graph values have been extracted successfully
if run_flag == "STOP": #if run flag is STOP then values have not been extracted successfully from code.
self.code_run = False #This flag stops weighting scheme code from being able to run if there was a problem with importing files
elif run_flag == "NoP": #no parameter from cif
self.code_run = False
elif run_flag == "NoI": #no info from ins/mas
self.code_run = False
elif run_flag == "NoV": #no values found in fcf/fco
self.code_run = False
else:
self.R2_text = ""
self.clear_weights()
y = [-4,0,4] #x and y for expected normal probability line
x = [-4,0,4]
def onpick2(event):
""" function to return information about reflections clicked on in normal probability plot"""
ind = event.ind
text_box_text = ""
text_box_list = ["Reflections Selected:","label:resoln:(fm,fc,sig(F)):,(graph x,y)"]
if len(ind) < 25:
for i in ind:
text_box_list.append("%s : %f : (%f, %f, %f) : (%f, %f)"%(self.labels[i],np.take(self.no_col,i) ,np.take(self.fm_only,i),np.take(self.fc_only,i),np.take(self.sigf_only,i),np.take(self.res[0][0], i), np.take(self.res[0][1], i)))
if len(text_box_list) > 0:
text_box_text = "\n".join(text_box_list)#
if len(self.R2_text) != 0:
text_box_text = self.R2_text + "\n" + text_box_text
self.qlist_item.setText(text_box_text)
f = 1/3.0 #default f value
if self.i_file[-3:] == "fcf":
if self.weight_applied == True:
self.calc_graph(-99999,-99999,999999,-999999,self.shelx_a,self.shelx_b,0.0,0.0,0.0,f) #shelx_a and shelx_b taken from ins file
else:
self.calc_graph(-99999,-99999,999999,-999999,-2.0,0.0,0.0,0.0,0.0,f) #-2.0 is xd default, a < -1.0 gives statistical weights
else:
self.calc_graph(-99999,-99999,999999,-999999,self.mas_a,self.mas_b,self.mas_c,self.mas_d,self.mas_e,self.mas_f)
x_min= False
x_max= False
y_min= False
y_max = False
plt.cla()
self.plot_norm(x_min,x_max,y_min,y_max)
self.ax1f1.plot(x,y,'--',color='r')
self.canvas.draw()
self.canvas.mpl_connect('pick_event', onpick2)
def select_info_file(self,marker,file_type):
"""function to bring up file dialog to select info - ins/mas file to provide extra information about refinement - lambda, applied weight"""
dlg = QtGui.QFileDialog()
if file_type == "fcf":
file_name = QtGui.QFileDialog.getOpenFileName(self, 'Select .ins or .res file', marker,"ins file (*.ins *.res)")
elif file_type == "fco":
file_name = QtGui.QFileDialog.getOpenFileName(self, 'Select .mas file', marker,"mas file (*.mas )")
#self.info_file.setText(file_name)
#self.info_file_name
if os.path.isfile(str(file_name)) == False: #if user did not select a file
self.qlist_item.setText("Error: No ins/mas file has not been selected")
file_name = False
return file_name
def check_info_file(self,i_file, file_type):
""" checks that info file exists"""
info_file_lst = os.path.split(i_file)
if file_type == "fcf":
new_file_name = info_file_lst[1][0:-3] + "ins"
info_file = os.path.join(info_file_lst[0], new_file_name)
file_type="fcf"
elif file_type == "fco":
new_file_name = info_file_lst[1][0:-3] + "mas"
info_file = os.path.join(info_file_lst[0], new_file_name)
file_type ="fco"
try:
open_ins = open(info_file,"r")# ##potential issue if ins file does not have same name as fcf file
except IOError:
marker = os.path.split(i_file)[0]
info_file = self.select_info_file(marker, file_type)
return info_file
def get_graph_values(self,i_file):
"""function to obtain values of fc, fm and su from imported file. Calculates resolution from lambda for fcf file."""
f= open(i_file,"r") #open selected file
g=f.readlines()
s = 0
start_defined = False
self.F_c = []
self.F_m = []
self.sig_F = []
self.sth = []
self.resoln = []
self.F_v_F = []
file_type = i_file[-3:] #xd = fco, shelx = fcf
self.labels_all = []
self.sinth_all = []
if file_type == "fco":
for line in g:
s += 1
if line.startswith(" _refln_XD_refine_code"):
start = s
val_order = [4,3,5] # order of line split for [fm, fc, fsig], to be used when getting values from file
start_defined = True
elif line.startswith("data_"):
self.code_mark = line.split("_")[1] #gets data flag name from xd.mas
info_file_name = self.check_info_file(i_file, "fco")
if info_file_name == False:
no_info_file = True #No ins/mas file has been selected
else:
no_info_file = False
open_fco_name = open(info_file_name,"r")
open_fco = open_fco_name.readlines()
for line in open_fco:
if line.startswith("WEIGHT") or line.startswith("!WEIGHT"):
weight_line = line.split()
self.mas_a = float(weight_line[1])
self.mas_b =float(weight_line[2])
self.mas_c =float(weight_line[3])
self.mas_d =float(weight_line[4])
self.mas_e =float(weight_line[5])
self.mas_f = float(weight_line[6])
self.weight_applied = True
self.a_edit.setText(str(self.mas_a))
self.b_edit.setText(str(self.mas_b))
self.c_edit.setText(str(self.mas_c))
self.d_edit.setText(str(self.mas_d))
self.e_edit.setText(str(self.mas_e))
self.f_edit.setText(str(self.mas_f))
elif line.startswith("WAVE"):
wave_line = line.split()
self.lamda = float(wave_line[1])
open_fco_name.close()
elif file_type == "fcf":
start_string = "NOTDEFINED"
self.code_mark = "[Plot Name]"
#### get weights and unit cell parameters from filename.ins ####
info_file_name = self.check_info_file(i_file, "fcf")
if info_file_name == False:
no_info_file = True
else:
no_info_file = False
open_ins_name = open(info_file_name,"r")
open_ins = open_ins_name.readlines()
for line in open_ins:
if line.startswith("CELL"):
cell_params = line.split()
self.lamda = float(cell_params[1])
unit_a =float(cell_params[2])
unit_b =float(cell_params[3])
unit_c =float(cell_params[4])
#numpy uses angles in radians for trig, so need to convert (as we will use these values later)
unit_alpha =np.radians(float(cell_params[5]))
unit_beta =np.radians(float(cell_params[6]))
unit_gamma= np.radians(float(cell_params[7]))
elif line.startswith("WGHT"):
#takes weight values from ins file to use in graph.
shelx_weight = line.split()
self.shelx_a = float(shelx_weight[1])
self.shelx_b = float(shelx_weight[2])
self.weight_applied = True
self.a_edit.setText(str(self.shelx_a))
self.b_edit.setText(str(self.shelx_b))
self.c_edit.setText("0.0")
self.d_edit.setText("0.0")
self.e_edit.setText("0.0")
self.f_edit.setText("1/3")
#shelx currently only calculates a and b.
#send these values to initial graph
open_ins_name.close()
######
for line in g:
s += 1
if line.startswith("_shelx_refln_list_code"):
list_code = line.split()[1]
if list_code == "4":
#start_defined = True
start_string = " _refln_observed_status"
val_order = [4, 3, 5] #[Fm2, Fc2,Fsig2]
elif list_code == "8":
start_string = " _shelx_refinement_sigma"
#start_defined = True
val_order = [3, 5, 4] #[Fm2, Fc2 ,Fsig2]
else:
start_defined = False
self.qlist_item.setText("Code can only work with LIST 4 and 8")
break
elif line.startswith(start_string) or line.startswith(" %s" %start_string): #sometimes there is an extra space before the start string in fcf
start = s
start_defined = True
else:
start_defined = False
self.qlist_item.setText("Program only works with .fco or .fcf (LIST 4,8).")
if self.cif_info == False:
self.code_running = False
return "NoP" #meaning we do not have the number of independant parameters from the cif
elif no_info_file == True:
self.code_running = False
return "NoI"
elif start_defined == False: #if start of list has not been defined, code will break
self.qlist_item.setText("Error: Values not found in file.")
self.code_running = False
self.recalc = False #so that graph cannot be recalculated because there is no file
return "STOP" #have been unable to get values from input file therefore will not make graph and print error message
else:
for i in range(start,len(g)):
l = g[i]
lst = l.split()
if len(lst) == 0: continue
self.F_m.append(float(lst[val_order[0]]))
self.F_c.append(float(lst[val_order[1]]))
self.sig_F.append(float(lst[val_order[2]]))
#get key of hkl for labels #
ha = int(lst[0])
la =int(lst[2])
ku = int(lst[1])
h = str(ha).rjust(4, " ")
k = str(ku).rjust(4, " ")
lz = str(la).rjust(4, " ")
key_lst = [h,k,lz]
key = "".join(key_lst)
key = key.lstrip()
self.labels_all.append(key)
if file_type == "fcf":
#formula for triclinic from giacavazzo pg 66
denom = (1- (np.cos(unit_alpha))**2 - (np.cos(unit_beta))**2 - (np.cos(unit_gamma))**2 + 2*np.cos(unit_alpha)*np.cos(unit_beta)*np.cos(unit_gamma))
term1 =(ha**2/unit_a**2)*(np.sin(unit_alpha)**2)
term2 =(ku**2/unit_b**2)*(np.sin(unit_beta)**2)
term3 = (la**2/unit_c**2)*(np.sin(unit_gamma)**2)
term4 =((2*ku*la)/(unit_b*unit_c))*(np.cos(unit_beta)*np.cos(unit_gamma) - np.cos(unit_alpha))
term5 =((2*ha*la)/(unit_a*unit_c))*(np.cos(unit_alpha)*np.cos(unit_gamma) - np.cos(unit_beta))
term6 = ((2*ku*ha)/(unit_b*unit_a))*(np.cos(unit_beta)*np.cos(unit_alpha) - np.cos(unit_gamma))
num = term1 + term2 +term3 +term4+ term5 +term6
one_dhkl = np.sqrt(num/denom) #as value calculated is 1/dhkl**2
resoln = self.lamda*one_dhkl/2
self.sth.append(resoln)
sinthl = one_dhkl/2
self.resoln.append(sinthl)
elif file_type == "fco":
self.sth.append(float(lst[6])*self.lamda) #should this not be *lamda, isn't valule in fco sinth/lamda (think it is)
self.resoln.append(float(lst[6]))
else:
self.sth.append(1)
if len(self.F_m) == 0:
self.qlist_item.setText("No values found in file.")
return "NoV"
else:
self.code_running = True
self.recalc = True # preliminarily allowing graph to be recalculated as values have been collected
return "GO" #values have been collected, therefore return go to show that code should continue
def calc_graph(self,i_filt,i_sig_filt,resoln_upper_filt,resoln_lower_filt,a,b,c,d,e,f):
"""function to produce normal probability plot"""
if self.code_running == False: #Check that values from fco/fcf have been gathered.
pass
else:
### lists for values to be plotted
F_val = []
f_sub = []
f_m_only = []
sig_f_only = []
f_c_only = []
fc = [] #colour of points
#set weight string for printing
if a <= -1:
self.weight_string = "Weighting applied:\n Statistical weights \n"
else:
self.weight_string = "Weighting applied:\n a: %f, b:%f, c%f, d:%f, e:%f, f: %f\n" %(round(a,6), round(b,6), round(c,6), round(d,6), round(e,6),round(f,6))
i_sig = []
labels = []
w = []
def calc_and_append(i,a,b,c,d,e,f):
"""function to calculate weighting value for each reflection and append values to lists for further use"""
s = self.sig_F[i]
if c > 0:
q = np.exp(c*self.sth[i])
elif c< 0:
q = 1 - np.exp(c*self.sth[i])
else: #c == 0
q = 1.0
if a < -1.0: #statistical weights
a = 0.0
b = 0.0
c = 0.0
d = 0.0
e = 0.0
f = 1/3.0
#p = (f*self.F_m[i] + (1-f)*self.F_c[i])
base = (s**2)
w2 = base/q
elif a > -1.0:
p = (f*self.F_m[i] + (1-f)*self.F_c[i])
base = (s**2 + (a*p)**2 + b*p + d + (self.sth[i]/self.lamda)*e)
w2 = base/q
else: # a = -0.1, unit weights
#p = 0.0
w2 = 1.0
#base = (s**2 + (a*p)**2 + b*p + d + (self.sth[i]/self.lamda)*e)
#w2 = base/q
#w_tot = sum(w*np.square(fo-scale_factor*fc))
w.append(np.sqrt(w2))
f_no = ((self.F_c[i]-self.F_m[i]))/np.sqrt(w2) # need w in there somehow.
i_sig.append(self.F_m[i]/self.sig_F[i])
f_m_only.append(self.F_m[i])
sig_f_only.append(self.sig_F[i])
fc.append(self.resoln[i]) #change this based on what you want colour to be!! - probably want this to be an option (resoln is sintheta/lamda)
F_val.append(f_no) #Fval is the observed resolution value
f_c_only.append(self.F_c[i])
labels.append(self.labels_all[i])
less_than = 0
for i in range(0,len(self.F_m)):
#if np.sqrt(self.F_m[i]) > 0.0: #cutoff makes it match drkplot,
if self.F_m[i] > -9999999999.0: #0.0: # as square rooting a number less than 0 would make np.sqrt freak out - think this line should be changed to the 0.0!!!!!!!!!!
resoln_i = float(self.resoln[i]) #resolution being sintheta/lamda
### filtering reflections based on limits ###
utrue = resoln_i >= float(resoln_lower_filt) #is the resolution number above the lower filt
ltrue = float(resoln_upper_filt) >= resoln_i # is resolution number below the upper filt
# lower filt < resolution < upper filt == good! :D
if utrue == True:
if ltrue == True:
if self.filt_eq == ">":
if self.F_m[i] >= i_filt:
if self.sig_filt_eq == ">":
if self.F_m[i]/self.sig_F[i] >= i_sig_filt:
calc_and_append(i,a,b,c,d,e,f) # append
elif self.sig_filt_eq == "<":
if self.F_m[i]/self.sig_F[i] <= i_sig_filt:
calc_and_append(i,a,b,c,d,e,f) #when this condition is hit, then removed, bad things happen (oh yes because it is reset
elif self.filt_eq == "<":
if self.F_m[i] <= i_filt:
if self.sig_filt_eq == ">":
if self.F_m[i]/self.sig_F[i] >= i_sig_filt:
calc_and_append(i,a,b,c,d,e,f) # append
elif self.sig_filt_eq == "<":
if self.F_m[i]/self.sig_F[i] <= i_sig_filt:
calc_and_append(i,a,b,c,d,e,f) #append
else:
less_than += 1
if len(F_val) == 0: #if this occurs there are no values in the list, therefore no graph will be drawn
self.nocalc_text = "No values match this criteria"
text_box_text = self.nocalc_text
self.qlist_item.setText(text_box_text)
self.recalc = False
else:
self.recalc = True
self.res = stats.probplot(F_val)
zipped = zip(F_val,fc,i_sig,labels,w,f_m_only,sig_f_only,f_c_only) #sort in order
sort = sorted(zipped) #sorts by first column.
self.no_col = []
self.labels = []
self.weights = []
self.fm_only = []
self.sigf_only = []
self.fc_only = []
for item in sort:
self.no_col.append(item[1])
self.labels.append(item[3])
self.weights.append(item[4])
self.fm_only.append(item[5])
self.sigf_only.append(item[6])
self.fc_only.append(item[7])
def calculate_R2(F_val):
"""function to calculate R^2 value of normal probability plot"""
res_m = self.res[1][0]
res_c = self.res[1][1]
y_avg = np.mean(F_val)
tot_sq = 0
reg_sq = 0
norm_sq_tot = 0
se_sq = 0
goof_tot = 0
for i in range(0,len(self.res[0][0])):
x = self.res[0][0][i]
y = self.res[0][1][i]
ypred = res_m*x+res_c
ssr = (ypred - y_avg)**2
sst = (y - y_avg)**2
se = (y - ypred)**2
norm_sq = (y-x)**2
tot_sq += sst
reg_sq += ssr
se_sq += se
norm_sq_tot += norm_sq
if x == 0:
pass
else:
goof_add = ((y-x)**2)/x #calculating goodness of fit
goof_tot += goof_add
R2_straight = 1- (norm_sq_tot/tot_sq)
return 1 - se_sq/tot_sq, R2_straight#R2
R2_val, r2_s = calculate_R2(F_val)
self.R2_text = "%sR2 : %f \nR2 straight: %f \nNo. of Reflections: %d \n"%(self.weight_string, R2_val, r2_s, len(F_val))
text_box_text = self.R2_text
self.qlist_item.setText(text_box_text)
def recalc_fig(self):
"""function triggered by apply button on normal probability plot tab, recalculates plot with defined weight and/or data limits and axes"""
self.run_recalc = True #set as true, used for while loop to check that a -f and limits are all numerical or fractions
while self.run_recalc == True: #using while loop to allow stopping of function if values are non-numeric, or file is missing.
if self.file_selected == False:
self.qlist_item.setText("Error: File has not been selected")
break
if self.cif_info == False:
self.qlist_item.setText("Error: number of independant parameters is not in cif file")
break
a_val = self.a_edit.text()
if len(a_val) < 1: #if there is nothing in box, len = 0, set default (0.0)
a_val = 0.0
else:
a_val = self.check_int(a_val)
if a_val == "False":
break
b_val =self.b_edit.text()
if len(b_val) < 1:
b_val = 0.0
else:
b_val = self.check_int(b_val)
if b_val == "False":
break
c_val =self.c_edit.text()
if len(c_val) < 1:
c_val = 0.0
else:
c_val = self.check_int(c_val)
if c_val == "False":
break
d_val =self.d_edit.text()
if len(d_val) < 1:
d_val = 0.0
else:
d_val = self.check_int(d_val)
if d_val == "False":
break
e_val =self.e_edit.text()
if len(e_val) < 1:
e_val = 0.0
else:
e_val = self.check_int(e_val)
if e_val == "False":
break
f_val = self.f_edit.text()
if len(f_val) < 1:
f_val = 1/3.0 #default === 1/3.0
else:
f_val = self.check_int(f_val)
if f_val == "False":
break
self.filt_eq = self.i_filt_eq.currentText() #checking which inequality sign is selected for intensity filter
self.sig_filt_eq = self.i_sig_filt_eq.currentText()#checking which inequality sign is selected intensity/s.u. filter
i_filt =self.i_filt.text()
i_sig_filt = self.i_sig_filt.text()
resoln_upper_filt = self.resoln_upper_filt.text()
resoln_lower_filt = self.resoln_lower_filt.text()
if len(self.i_sig_filt.text()) > 0:
i_sig_filt= self.check_int(i_sig_filt)
if i_sig_filt == False:
break
else:
if self.sig_filt_eq == ">":
i_sig_filt = -9999999999.9
else:
i_sig_filt = 9999999999.9
if len(self.i_filt.text()) > 0:
i_filt = self.check_int(i_filt)
if i_filt == False:
break
else:
if self.filt_eq == ">":
i_filt = -9999999999.9
else:
i_filt = 9999999999.9
### checking resoln_upper_filt and lower _filt ####
if resoln_upper_filt == "":
resoln_upper_filt = 9999999999999.0
else:
resoln_lower_filt =self.check_int(resoln_lower_filt)
if resoln_lower_filt == False:
break
if resoln_lower_filt == "":
resoln_lower_filt = -99999999999999.0
else:
resoln_upper_filt = self.check_int(resoln_upper_filt)
if resoln_upper_filt == False:
break
#check they are all numerical values
plt.cla()
self.calc_graph(i_filt,i_sig_filt,resoln_upper_filt,resoln_lower_filt,a_val, b_val, c_val, d_val, e_val,f_val)
if self.recalc == True: #checks there are values in list still after things have been filtered out True/False, assigned in calc_graph based on whether there are items in value lists after filters applied
def check(a):
"""checks if graphical limits are floats or not"""
if len(a) < 1:
return False
else:
try:
a = float(a)
return a
except ValueError:
return False
x_min_val = check(self.x_min.text())
x_max_val = check(self.x_max.text())
y_min_val = check(self.y_min.text())
y_max_val = check(self.y_max.text())
self.plot_norm(x_min_val, x_max_val, y_min_val, y_max_val)
self.run_recalc = False #to stop while loop from running
def plot_norm(self,x_min,x_max,y_min,y_max):
"""plots normal probability plot, if limits not defined, max or min value used as limit to show all points"""
### need to split up resolutions for graph ###
res_resolncut = []
res2_resolncut = []
upper = 2.0
lower = 0.0
colour = []
res2 = self.ax1f1.scatter(self.res[0][0],self.res[0][1],c=self.no_col, lw=0, s=10,picker=True)#line_picker)
y = [-4,0,4]
x = [-4,0,4]
self.ax1f1.plot(x,y,'--',color='r')
self.ax1f1.axhline(y=0,ls="-",c="black")
self.ax1f1.axvline(x=0,ls="-",c="black")
if x_min != False:
if x_max != False:
self.ax1f1.set_xlim([x_min,x_max])
else:
self.ax1f1.set_xlim([x_min,plt.xlim()[1]])
if x_max != False:
if x_min != False:
pass
else:
self.ax1f1.set_xlim([plt.xlim()[0] ,x_max])
if y_min != False:
if y_max != False:
self.ax1f1.set_ylim([y_min,y_max])
else:
self.ax1f1.set_ylim([y_min,plt.ylim()[1]])
if y_max != False:
if y_min != False:
pass
else:
self.ax1f1.set_ylim([plt.ylim()[0],y_max])
self.ax1f1.set_xlabel("Expected Residuals")
self.ax1f1.set_ylabel("Ordered Residuals")
self.fig1.canvas.draw()
def check_frac(self,y):
"""function to check if input value for weight/limit is a fraction and if so extract value"""
frac = y.split("/")
def is_number(x):
try:
no = float(x)
return True
except ValueError:
return False
if len(frac) == 2:
if is_number(frac[0]) == True and is_number(frac[1]) == True: #makes sure both items in list can be floats
new_no = float(frac[0])/float(frac[1])
fraction = new_no
else:
fraction = "False"
else:
fraction = "False"
return fraction
def check_int(self,string):
"""function to check if input value for weight/limit in normal probability plot is a float and if so extract value"""
if string == "":
return string
else:
try:
no = float(string)
except ValueError:
if "/" in string:
no = self.check_frac(string) #so fractions can also be input
else:
no = "False" #using it as a string as if value = 0.0, will be evaluated as false when it shouldn't be.
if no == "False":
text_box_text = "One of the input values is not a number or a fraction:\n graph not recalculated"
self.qlist_item.setText(text_box_text)
return no
def check_int_weight(self,string):
"""function to check if input for cutoffs in weighting tab is a number"""
if string == "":
return string
else:
try:
no = float(string)
except ValueError:
if "/" in string:
no = self.check_frac(string)
else:
no = "False"
if no == "False":
text_box_text = "One of the input values is not a number or a fraction:\n graph not recalculated"
self.tab2_info.setText(text_box_text)
return no
def calculate_weighting_scheme(self):
"""calculates optimal a and b parameters using weighting scheme code from python script"""
#f_c, f_m, sig_f
#check input values are numerical
all_num=True
resoln_upper = self.check_int_weight(self.resoln_upper_filt_2.text())
resoln_lower =self.check_int_weight(self.resoln_lower_filt_2.text())
i_remove = self.check_int_weight(self.i_filt_2.text())
isig_remove = self.check_int_weight(self.i_sig_filt_2.text())
i_val = self.check_int_weight(self.i_filt_2.text())
isig_val = self.check_int_weight(self.i_sig_filt_2.text())
all_num = True
if resoln_upper == "False" or resoln_lower == "False" or i_remove == "False" or isig_remove == "False" or i_val == "False" or isig_val == "False":
all_num = False
#check that input values are numbers.
if self.file_selected == False:
self.tab2_info.setText("Error: File has not been selected")
elif all_num == False:
self.tab2_info.setText("Error: One or more of cutoffs is not a number.")
#pass
elif self.code_run == False: #Stop running if there was an issue importing files
self.tab2_info.setText("Error: Problem importing files. Weighting code cannot run.")
else:
self.tab2_info.setText("")
Fmlist = []
Fclist = []
sigflist = []
resolnlist = []
self.calc_weight_button.setEnabled(False)
table_column = []
Fmlist = deepcopy(self.F_m)
Fclist =deepcopy(self.F_c)
sigflist =deepcopy(self.sig_F)
resolnlist = deepcopy(self.resoln)
F_c = np.array(Fclist)
F_m = np.array(Fmlist)
sig_F = np.array(sigflist)
resolution = np.array(resolnlist)
stop_run = False
bintype = self.weight_bin_style()
new_Fm = []
new_Fc = []
new_sigF = []
newres = []
isig_table = ""
i_table = ""
ru_table = ""
rl_table = ""
noreflns = len(new_Fm)
#run = shelx_weighting_calc olex_weighting_scheme(self.n_independant_params, self.scale_factor, new_Fm, new_Fc, new_sigF, newres, bintype) #setting up class from weighting scheme python scripts.
new_Fm, new_Fc, new_sigF, newres, i_table,isig_table, ru_table, rl_table,noreflns = self.sort_data(F_m, F_c, sig_F, resolution)
zero_indicies = new_Fm < 0 #need reset to zero as in olex
new_Fm[zero_indicies] = 0
run = shelx_weighting_calc(self.n_independant_params, self.scale_factor, new_Fm, new_Fc, new_sigF, newres, bintype) #setting up class from weighting scheme python scripts.
if self.calculate_start_check.checkState() == 2: #check state of checkbox.
calc_start = True
start_a = 0
start_b = 0
start_a, start_b = run.calculate_start(F_m, F_c, sig_F) #these vals all data, sorting would start here before put into file
a_stop = self.check_int_weight(self.a_stop.text())
b_stop = self.check_int_weight(self.b_stop.text())
if a_stop == "False" or b_stop == "False":
self.tab2_info.setText("Error: One or more Weighting Stopping Points not a number.")
stop_run = True
else:
calc_start = False
start_a = self.check_int_weight(self.a_start.text()) #need to check for float
start_b = self.check_int_weight(self.b_start.text()) #need to check for float
a_stop = self.check_int_weight(self.a_stop.text())
b_stop = self.check_int_weight(self.b_stop.text())
if start_a == "" or start_b == "":
self.tab2_info.setText("Error: No starting a or b values set. Please input values or check Calculate Start box and try again")
stop_run = True
elif start_a == "False" or start_b == "False":
self.tab2_info.setText("Error: One or more Weighting Starting Points not a number.")
stop_run = True
elif a_stop == "False" or b_stop == "False":
self.tab2_info.setText("Error: One or more Weighting Stopping Points not a number.")
stop_run = True
if stop_run == False:
if a_stop == "" or b_stop == "":
a_stop = 1e-4
b_stop = 5e-3
# else:
# a_stop = float(a_stop)
# b_stop = float(b_stop)
#olex defaults
a,b,goof,wr2,variance, error = run.minimize_variance(a_stop,b_stop,start_a, start_b)
if len(error) > 0:
self.tab2_info.setText(error)
table_column = []
table_vals = [a,b,goof,wr2,a_stop,b_stop,i_table,isig_table, ru_table, rl_table,noreflns,start_a,start_b,bintype,variance]
for i in table_vals:
item = QtGui.QStandardItem(str(i))
table_column.append(item)
self.tablemodel.appendRow(table_column)
self.weighting_tableview.setModel(self.tablemodel)
if a == "-" and b == "-":
self.tab2_info.setText("CAPOW could not search for weighting scheme, try smaller stopping values or adjusting starting values.")
self.calc_weight_button.setEnabled(True)
def weight_bin_style(self):
"""function to check which weighting bin is required"""
if self.bintype_intensity.checkState() == 2:
bintype = "I"
#elif self.bintype_resolution.checkState() == 2:
else: #either intensity is ticked or resolution is ticked.
bintype = "R"
return bintype
def sort_data(self, Fm, Fc, sigF, resolution):
"""function to apply cutoffs to data for weighting scheme calculator"""
#create lists to populate during application of cutoffs
remove_index = []
remove_resup = []
remove_reslow = []
remove_i = []
remove_isig = []
resulting_list = []
ints = []
# obtain input values from weighting tab
resoln_upper =self.resoln_upper_filt_2.text()
resoln_lower =self.resoln_lower_filt_2.text()
i_remove = self.i_filt_2.text()
isig_remove = self.i_sig_filt_2.text()
i_filt_eq = self.i_filt_eq_2.currentText()
isig_filt_eq = self.i_sig_filt_eq_2.currentText()
i_val = self.i_filt_2.text()
isig_val = self.i_sig_filt_2.text()
isig_table = ""
i_table = ""
ru_table = ""
rl_table = ""
if resoln_upper != "":
remove_resup = resolution > float(resoln_upper) #want to remove everything above, so only want indexes of those above
ru_table = "> %s" % resoln_upper
if resoln_lower != "":
remove_reslow = resolution < float(resoln_lower) #find values with a resoltion of less than resoln_lower, produces list True False whether this condition is met
rl_table = "> %s" % resoln_lower
#### resoln cutoff upper###
if len(remove_resup) > 0:
if len(remove_reslow) > 0:
ints = [i for i in range(0, len(Fm)) if remove_resup[i] == True] #whether above condition is met, remove_resup list of true falses, only want index of those that are true
resulting_list = list(ints)
ints = []
ints = [i for i in range(0, len(Fm)) if remove_reslow[i] == True]
resulting_list.extend(x for x in ints if x not in resulting_list) #add values to resulting list if true in remove_reslow (and not already in resulting list)
ints = []
else:
ints = [i for i in range(0, len(Fm)) if remove_resup[i] == True]
resulting_list = list(ints)
ints = []
elif len(remove_reslow) > 0: #resolution cutoff lower
ints = [i for i in range(0, len(Fm)) if remove_reslow[i] == True]
resulting_list = list(ints)
ints = []
### check i cutoff##
if i_val != "":
if i_filt_eq == "<":
remove_i = Fm > float(i_val) #so selects all Fm values that are greater than the cutoff
elif i_filt_eq == ">":
#then check list
remove_i = Fm < float(i_val)
i_table = "%s %s" % (i_filt_eq,i_val)
if len(resulting_list) > 0:
#if the resulting list has values in, need to make sure that there are no doubles included in resulting list when lower indicies joined on
ints = []
ints = [i for i in range(0, len(Fm)) if remove_i[i] == True]
resulting_list.extend(x for x in ints if x not in resulting_list)
ints = []
else:
ints = [i for i in range(0, len(Fm)) if remove_i[i] == True]
resulting_list = list(ints)
####
fmsig = Fm/sigF
### i/s.u. cutoff
if isig_val != "":
if isig_filt_eq == "<":
remove_isig = fmsig > float(isig_val) #so selects all Fm values that are greater than the cutoff
elif isig_filt_eq == ">":
remove_isig = fmsig < float(isig_val)
isig_table = "%s %s" % (isig_filt_eq,isig_val)
if len(resulting_list) > 0:
#if the resulting list has values in, need to make sure that there are no doubles included in resulting list when lower indicies joined on
ints = []
ints = [i for i in range(0, len(Fm)) if remove_isig[i] == True]
resulting_list.extend(x for x in ints if x not in resulting_list)
ints = []
else:
ints = []
ints = [i for i in range(0, len(Fm)) if remove_isig[i] == True]
resulting_list = list(ints)
ints = []
if len(resulting_list) > 0:
new_Fm = np.delete(Fm, resulting_list)
new_Fc =np.delete(Fc, resulting_list)
new_sigF = np.delete(sigF, resulting_list)
new_res = np.delete(resolution, resulting_list)
else:
new_Fm =Fm
new_Fc =Fc
new_sigF = sigF
new_res = resolution
cutoffs_list = []
noreflns = len(new_Fm)
return new_Fm, new_Fc, new_sigF, new_res,i_table,isig_table, ru_table, rl_table,noreflns
def drk_lim(self):
"""inserts plot limits to mirror DRKplot into normal probability plot tab"""
self.x_min.setText("-4")
self.x_max.setText("4")
self.y_min.setText("-4")
self.y_max.setText("4")
def clr_lim(self):
"""removes plot limits from drk plot tab"""
self.x_min.setText("")
self.x_max.setText("")
self.y_min.setText("")
self.y_max.setText("")
def clear_weights(self):
"""Clears all saved weights from weighting scheme table"""
self.tablemodel = ""
headers = ["a","b","goof","wr2","stop a","stop b","i cutoff","isig cutoff","resoln upper","resoln lower","no. of reflns","start a","start b","binning", "bin variance"]
self.tablemodel = QtGui.QStandardItemModel() #internet says to make it a model to work
self.tablemodel.setHorizontalHeaderLabels(headers)
self.weighting_tableview.setModel(self.tablemodel)
def send_weights(self):
"""Sends selected weights from weighting scheme table to normal probability plot tab"""
#which row is selected?
selectedindexes = self.weighting_tableview.selectedIndexes()
#no selectedRows() function for tableview apparently, so have to get selected values
row = [] #(i.row() for i in selectedindexes if i.row() not in row)]
for i in selectedindexes:
rowindex = i.row()
if rowindex not in row:
row.append(rowindex)
#making sure only one row has been selected
if len(row) == 1:
row_val = row[0]
#gets selected values
selected_a =str(self.tablemodel.item(row_val,0).text())
selected_b = str(self.tablemodel.item(row_val,1).text())
#sets a and b from tab one as selected values
self.a_edit.setText(str(selected_a))
self.b_edit.setText(str(selected_b))
self.c_edit.setText("0.0")
self.d_edit.setText("0.0")
self.e_edit.setText("0.0")
self.f_edit.setText("1/3")
elif len(row) == 0:
self.tab2_info.setText("Error: No row selected")
else:
self.tab2_info.setText("Error: More than one row selected")
if __name__ == '__main__':
#import sys
#from PyQt4 import QtGui
app = QtGui.QApplication(sys.argv)
main = Main(1.0) #scale factor used in weight calculation, code assumes it is one. Change here if not.
main.show()
sys.exit(app.exec_()) | unknown | codeparrot/codeparrot-clean | ||
from os import system
import pyttsx
#Setting up the speaker
def onStart(name):
print ""
def onWord(name, location, length):
print ""
def onEnd(name, completed):
print ""
engine = pyttsx.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', rate-50)
engine.connect('started-utterance', onStart)
engine.connect('started-word', onWord)
engine.connect('finished-utterance', onEnd)
#Speaker terminated
#variables needed
music_dir="~/media/hritik/New\ Volume1/dc++/movies"
input_cmd=raw_input("Enter the command : ")
#setting up the commands
cmd={'pause':"totem --pause"}
cmd['next']="totem --play \n totem --next"
cmd['previous']="totem --play \n totem --previous"
cmd['play']="totem --play"
cmd['vol-up']="totem --volume-up"
cmd['vol-down']="totem --volume-down"
cmd['err_message']="print 'Sorry unknown command'"
cmd['quit']="totem --quit"
cmd['forward']="totem --seek-fwd"
cmd['backward']="totem --seek-bwd"
cmd['fullscreen']="totem --fullscreen"
cmd['mute']="totem --mute"
#Controller begins
priority_key={}
priority_key['next']=['next']
priority_key['previous']=['previous']
priority_key['pause']=['pause','stop']
priority_key['quit']=['quit','close']
priority_key['fullscreen']=['full','fullscreen','maximize','maximise']
priority_key['mute']=['mute','nil','zero','lowest']
priority_key['forward']=['ahead','forward']
priority_key['backward']=['back','backward']
priority_key['vol-up']=['increase','up','raise','high']
priority_key['vol-down']=['decrease','down','lower','low','reduce']
priority_key['play']=['play','start','video','movie','watch','player','current']
def search(word,command,priority_key):
if word in priority_key[command]:
return 1
return 0
str_split=input_cmd.split()
selected="err_message"
for command in priority_key:
for word in str_split:
if(search(word,command,priority_key)):
selected=command
break;
print selected
system(cmd[selected]) | unknown | codeparrot/codeparrot-clean | ||
from scenario.lib import *
# test begins, register start time
start()
# Generic tests
for host in scenario.nodes():
hostinfo = scenario.platform.hosts[host].info
osname = hostinfo['osname'] if 'osname' in hostinfo else ""
run(host, 'fusion', Err.CONTINUE, OSNAME=osname)
run_on("all", 'agent', Err.CONTINUE)
# force inventory
run_on("agent", 'run_agent', Err.CONTINUE, PARAMS="-D force_inventory")
run_on("server", 'run_agent', Err.CONTINUE, PARAMS="")
# accept nodes
for host in scenario.nodes("agent"):
run('localhost', 'agent_accept', Err.BREAK, ACCEPT=host)
# Add a rule
date0 = host_date('wait', Err.CONTINUE, "server")
run('localhost', 'user_rule', Err.BREAK, NAME="Test User", GROUP="special:all")
for host in scenario.nodes("agent"):
wait_for_generation('wait', Err.CONTINUE, "server", date0, host, 20)
# Run agent
run_on("agent", 'run_agent', Err.CONTINUE, PARAMS="-f failsafe.cf")
run_on("agent", 'run_agent', Err.CONTINUE, PARAMS="")
# Test relay configuration
run_on("relay", 'relay_config', Err.CONTINUE)
# Test rule result
run_on("agent", 'user_test', Err.CONTINUE)
# remove rule/directive
run('localhost', 'directive_delete', Err.FINALLY, DELETE="Test User Directive", GROUP="special:all")
run('localhost', 'rule_delete', Err.FINALLY, DELETE="Test User Rule", GROUP="special:all")
# remove agent
for host in scenario.nodes("agent"):
run('localhost', 'agent_delete', Err.FINALLY, DELETE=host)
# test end, print summary
finish() | unknown | codeparrot/codeparrot-clean | ||
"""
Airy Functions
--------------
* airy -- Airy functions and their derivatives.
* airye -- Exponentially scaled Airy functions
* ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)
* bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)
Elliptic Functions and Integrals
--------------------------------
* ellipj -- Jacobian elliptic functions
* ellipk -- Complete elliptic integral of the first kind.
* ellipkinc -- Incomplete elliptic integral of the first kind.
* ellipe -- Complete elliptic integral of the second kind.
* ellipeinc -- Incomplete elliptic integral of the second kind.
Bessel Functions
----------------
* jn -- Bessel function of integer order and real argument.
* jv -- Bessel function of real-valued order and complex argument.
* jve -- Exponentially scaled Bessel function.
* yn -- Bessel function of second kind (integer order).
* yv -- Bessel function of the second kind (real-valued order).
* yve -- Exponentially scaled Bessel function of the second kind.
* kn -- Modified Bessel function of the second kind (integer order).
* kv -- Modified Bessel function of the second kind (real order).
* kve -- Exponentially scaled modified Bessel function of the second kind.
* iv -- Modified Bessel function.
* ive -- Exponentially scaled modified Bessel function.
* hankel1 -- Hankel function of the first kind.
* hankel1e -- Exponentially scaled Hankel function of the first kind.
* hankel2 -- Hankel function of the second kind.
* hankel2e -- Exponentially scaled Hankel function of the second kind.
* lmbda -- [+]Sequence of lambda functions with arbitrary order v.
Zeros of Bessel Functions
.........................
* jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.
* jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.
* jn_zeros -- [+]Zeros of Jn(x)
* jnp_zeros -- [+]Zeros of Jn'(x)
* yn_zeros -- [+]Zeros of Yn(x)
* ynp_zeros -- [+]Zeros of Yn'(x)
* y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)
* y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)
* y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')
Faster versions of common Bessel Functions
..........................................
* j0 -- Bessel function of order 0.
* j1 -- Bessel function of order 1.
* y0 -- Bessel function of second kind of order 0.
* y1 -- Bessel function of second kind of order 1.
* i0 -- Modified Bessel function of order 0.
* i0e -- Exponentially scaled modified Bessel function of order 0.
* i1 -- Modified Bessel function of order 1.
* i1e -- Exponentially scaled modified Bessel function of order 1.
* k0 -- Modified Bessel function of the second kind of order 0.
* k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.
* k1 -- Modified Bessel function of the second kind of order 1.
* k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.
Integrals of Bessel Functions
.............................
* itj0y0 -- Basic integrals of j0 and y0 from 0 to x.
* it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.
* iti0k0 -- Basic integrals of i0 and k0 from 0 to x.
* it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.
* besselpoly -- Integral of a bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.
Derivatives of Bessel Functions
...............................
* jvp -- Nth derivative of Jv(v,z)
* yvp -- Nth derivative of Yv(v,z)
* kvp -- Nth derivative of Kv(v,z)
* ivp -- Nth derivative of Iv(v,z)
* h1vp -- Nth derivative of H1v(v,z)
* h2vp -- Nth derivative of H2v(v,z)
Spherical Bessel Functions
..........................
* sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)
* sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)
* sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)
* sph_in -- [+]Sequence of spherical Bessel functions, in(z)
* sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)
* sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)
Ricatti-Bessel Functions
........................
* riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.
* riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.
Struve Functions
----------------
* struve -- Struve function --- Hv(x)
* modstruve -- Modified struve function --- Lv(x)
* itstruve0 -- Integral of H0(t) from 0 to x
* it2struve0 -- Integral of H0(t)/t from x to Inf.
* itmodstruve0 -- Integral of L0(t) from 0 to x.
Raw Statistical Functions (Friendly versions in scipy.stats)
------------------------------------------------------------
* bdtr -- Sum of terms 0 through k of of the binomial pdf.
* bdtrc -- Sum of terms k+1 through n of the binomial pdf.
* bdtri -- Inverse of bdtr
* btdtr -- Integral from 0 to x of beta pdf.
* btdtri -- Quantiles of beta distribution
* fdtr -- Integral from 0 to x of F pdf.
* fdtrc -- Integral from x to infinity under F pdf.
* fdtri -- Inverse of fdtrc
* gdtr -- Integral from 0 to x of gamma pdf.
* gdtrc -- Integral from x to infinity under gamma pdf.
* gdtria --
* gdtrib --
* gdtrix --
* nbdtr -- Sum of terms 0 through k of the negative binomial pdf.
* nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.
* nbdtri -- Inverse of nbdtr
* pdtr -- Sum of terms 0 through k of the Poisson pdf.
* pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.
* pdtri -- Inverse of pdtr
* stdtr -- Integral from -infinity to t of the Student-t pdf.
* stdtridf --
* stdtrit --
* chdtr -- Integral from 0 to x of the Chi-square pdf.
* chdtrc -- Integral from x to infnity of Chi-square pdf.
* chdtri -- Inverse of chdtrc.
* ndtr -- Integral from -infinity to x of standard normal pdf
* ndtri -- Inverse of ndtr (quantiles)
* smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)
* smirnovi -- Inverse of smirnov.
* kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.
* kolmogi -- Inverse of kolmogorov
* tklmbda -- Tukey-Lambda CDF
Gamma and Related Functions
---------------------------
* gamma -- Gamma function.
* gammaln -- Log of the absolute value of the gamma function.
* gammainc -- Incomplete gamma integral.
* gammaincinv -- Inverse of gammainc.
* gammaincc -- Complemented incomplete gamma integral.
* gammainccinv -- Inverse of gammaincc.
* beta -- Beta function.
* betaln -- Log of the absolute value of the beta function.
* betainc -- Incomplete beta integral.
* betaincinv -- Inverse of betainc.
* psi(digamma) -- Logarithmic derivative of the gamma function.
* rgamma -- One divided by the gamma function.
* polygamma -- Nth derivative of psi function.
Error Function and Fresnel Integrals
------------------------------------
* erf -- Error function.
* erfc -- Complemented error function (1- erf(x))
* erfinv -- Inverse of error function
* erfcinv -- Inverse of erfc
* erf_zeros -- [+]Complex zeros of erf(z)
* fresnel -- Fresnel sine and cosine integrals.
* fresnel_zeros -- Complex zeros of both Fresnel integrals
* fresnelc_zeros -- [+]Complex zeros of fresnel cosine integrals
* fresnels_zeros -- [+]Complex zeros of fresnel sine integrals
* modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)
* modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)
Legendre Functions
------------------
* lpn -- [+]Legendre Functions (polynomials) of the first kind
* lqn -- [+]Legendre Functions of the second kind.
* lpmn -- [+]Associated Legendre Function of the first kind.
* lqmn -- [+]Associated Legendre Function of the second kind.
* lpmv -- Associated Legendre Function of arbitrary non-negative degree v.
* sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)
Orthogonal polynomials --- 15 types
These functions all return a polynomial class which can then be
evaluated: vals = chebyt(n)(x)
This class also has an attribute 'weights' which
return the roots, weights, and total weights for the appropriate
form of Gaussian quadrature. These are returned in an n x 3 array with roots
in the first column, weights in the second column, and total weights in the final
column
* legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).
* chebyt -- [+]Chebyshev polynomial T_n(x)
* chebyu -- [+]Chebyshev polynomial U_n(x)
* chebyc -- [+]Chebyshev polynomial C_n(x)
* chebys -- [+]Chebyshev polynomial S_n(x)
* jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)
* laguerre -- [+]Laguerre polynomial, L_n(x)
* genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)
* hermite -- [+]Hermite polynomial H_n(x)
* hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)
* gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)
* sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)
* sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)
* sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)
* sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)
HyperGeometric Functions
------------------------
* hyp2f1 -- Gauss hypergeometric function (2F1)
* hyp1f1 -- Confluent hypergeometric function (1F1)
* hyperu -- Confluent hypergeometric function (U)
* hyp0f1 -- Confluent hypergeometric limit function (0F1)
* hyp2f0 -- Hypergeometric function (2F0)
* hyp1f2 -- Hypergeometric function (1F2)
* hyp3f0 -- Hypergeometric function (3F0)
Parabolic Cylinder Functions
----------------------------
* pbdv -- Parabolic cylinder function Dv(x) and derivative.
* pbvv -- Parabolic cylinder function Vv(x) and derivative.
* pbwa -- Parabolic cylinder function W(a,x) and derivative.
* pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)
* pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)
* pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z
mathieu and Related Functions (and derivatives)
-----------------------------------------------
* mathieu_a -- Characteristic values for even solution (ce_m)
* mathieu_b -- Characteristic values for odd solution (se_m)
* mathieu_even_coef -- [+]sequence of expansion coefficients for even solution
* mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution
**All the following return both function and first derivative**
* mathieu_cem -- Even mathieu function
* mathieu_sem -- Odd mathieu function
* mathieu_modcem1 -- Even modified mathieu function of the first kind
* mathieu_modcem2 -- Even modified mathieu function of the second kind
* mathieu_modsem1 -- Odd modified mathieu function of the first kind
* mathieu_modsem2 -- Odd modified mathieu function of the second kind
Spheroidal Wave Functions
-------------------------
* pro_ang1 -- Prolate spheroidal angular function of the first kind
* pro_rad1 -- Prolate spheroidal radial function of the first kind
* pro_rad2 -- Prolate spheroidal radial function of the second kind
* obl_ang1 -- Oblate spheroidal angluar function of the first kind
* obl_rad1 -- Oblate spheroidal radial function of the first kind
* obl_rad2 -- Oblate spheroidal radial function of the second kind
* pro_cv -- Compute characteristic value for prolate functions
* obl_cv -- Compute characteristic value for oblate functions
* pro_cv_seq -- Compute sequence of prolate characteristic values
* obl_cv_seq -- Compute sequence of oblate characteristic values
**The following functions require pre-computed characteristic values**
* pro_ang1_cv -- Prolate spheroidal angular function of the first kind
* pro_rad1_cv -- Prolate spheroidal radial function of the first kind
* pro_rad2_cv -- Prolate spheroidal radial function of the second kind
* obl_ang1_cv -- Oblate spheroidal angluar function of the first kind
* obl_rad1_cv -- Oblate spheroidal radial function of the first kind
* obl_rad2_cv -- Oblate spheroidal radial function of the second kind
Kelvin Functions
----------------
* kelvin -- All Kelvin functions (order 0) and derivatives.
* kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives
* ber -- Kelvin function ber x
* bei -- Kelvin function bei x
* berp -- Derivative of Kelvin function ber x
* beip -- Derivative of Kelvin function bei x
* ker -- Kelvin function ker x
* kei -- Kelvin function kei x
* kerp -- Derivative of Kelvin function ker x
* keip -- Derivative of Kelvin function kei x
* ber_zeros -- [+]Zeros of Kelvin function bei x
* bei_zeros -- [+]Zeros of Kelvin function ber x
* berp_zeros -- [+]Zeros of derivative of Kelvin function ber x
* beip_zeros -- [+]Zeros of derivative of Kelvin function bei x
* ker_zeros -- [+]Zeros of Kelvin function kei x
* kei_zeros -- [+]Zeros of Kelvin function ker x
* kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x
* keip_zeros -- [+]Zeros of derivative of Kelvin function kei x
Other Special Functions
-----------------------
* expn -- Exponential integral.
* exp1 -- Exponential integral of order 1 (for complex argument)
* expi -- Another exponential integral -- Ei(x)
* wofz -- Fadeeva function.
* dawsn -- Dawson's integral.
* shichi -- Hyperbolic sine and cosine integrals.
* sici -- Integral of the sinc and "cosinc" functions.
* spence -- Dilogarithm integral.
* zeta -- Riemann zeta function of two arguments.
* zetac -- 1.0 - standard Riemann zeta function.
Convenience Functions
---------------------
* cbrt -- Cube root.
* exp10 -- 10 raised to the x power.
* exp2 -- 2 raised to the x power.
* radian -- radian angle given degrees, minutes, and seconds.
* cosdg -- cosine of the angle given in degrees.
* sindg -- sine of the angle given in degrees.
* tandg -- tangent of the angle given in degrees.
* cotdg -- cotangent of the angle given in degrees.
* log1p -- log(1+x)
* expm1 -- exp(x)-1
* cosm1 -- cos(x)-1
* round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.
-------
[+] in the description indicates a function which is not a universal
function and does not follow broadcasting and automatic
array-looping rules.
Error handling
--------------
Errors are handled by returning nans, or other appropriate values.
Some of the special function routines will print an error message
when an error occurs. By default this printing
is disabled. To enable such messages use errprint(1)
To disable such messages use errprint(0).
Example:
>>> print scipy.special.bdtr(-1,10,0.3)
>>> scipy.special.errprint(1)
>>> print scipy.special.bdtr(-1,10,0.3)
"""
__docformat__ = 'restructuredtext'
postpone_import = 1 | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.testfixture.beans.factory.generator.injection;
import org.springframework.beans.factory.annotation.Autowired;
@SuppressWarnings("unused")
public class InjectionComponent {
private final String bean;
private Integer counter;
public InjectionComponent(String bean) {
this.bean = bean;
}
@Autowired(required = false)
public void setCounter(Integer counter) {
this.counter = counter;
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/factory/generator/injection/InjectionComponent.java |
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockitousage.stubbing;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.*;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.exceptions.verification.SmartNullPointerException;
import org.mockito.exceptions.verification.WantedButNotInvoked;
import org.mockitousage.IMethods;
import org.mockitoutil.TestBase;
public class SmartNullsStubbingTest extends TestBase {
private IMethods mock;
@Before
public void setup() {
mock = mock(IMethods.class, Mockito.RETURNS_SMART_NULLS);
}
public IMethods unstubbedMethodInvokedHere(IMethods mock) {
return mock.iMethodsReturningMethod();
}
@Test
public void shouldSmartNPEPointToUnstubbedCall() throws Exception {
IMethods methods = unstubbedMethodInvokedHere(mock);
try {
methods.simpleMethod();
fail();
} catch (SmartNullPointerException e) {
assertThat(e).hasMessageContaining("unstubbedMethodInvokedHere(");
}
}
@Test
public void should_not_throw_NPE_when_verifying_with_returns_smart_nulls() {
Foo mock = mock(Foo.class, RETURNS_SMART_NULLS);
when(mock.returnsFromArg(null)).thenReturn("Does not fail.");
assertThat((Object) mock.returnsFromArg(null)).isEqualTo("Does not fail.");
}
interface Bar {
void boo();
}
class Foo {
Foo getSomeClass() {
return null;
}
Bar getSomeInterface() {
return null;
}
Bar getBarWithParams(int x, String y) {
return null;
}
<T> T returnsFromArg(T arg) {
return arg;
}
void boo() {}
}
@Test
public void shouldThrowSmartNPEWhenMethodReturnsClass() throws Exception {
Foo mock = mock(Foo.class, RETURNS_SMART_NULLS);
Foo foo = mock.getSomeClass();
try {
foo.boo();
fail();
} catch (SmartNullPointerException e) {
}
}
@Test
public void shouldThrowSmartNPEWhenMethodReturnsInterface() throws Exception {
Foo mock = mock(Foo.class, RETURNS_SMART_NULLS);
Bar bar = mock.getSomeInterface();
try {
bar.boo();
fail();
} catch (SmartNullPointerException e) {
}
}
@Test
public void shouldReturnOrdinaryEmptyValuesForOrdinaryTypes() throws Exception {
IMethods mock = mock(IMethods.class, RETURNS_SMART_NULLS);
assertEquals("", mock.stringReturningMethod());
assertEquals(0, mock.intReturningMethod());
assertEquals(true, mock.listReturningMethod().isEmpty());
assertEquals(0, mock.arrayReturningMethod().length);
}
@Test
public void shouldNotThrowSmartNullPointerOnToString() {
Object smartNull = mock.objectReturningMethod();
try {
verify(mock).simpleMethod(smartNull);
fail();
} catch (WantedButNotInvoked e) {
}
}
@Test
public void shouldNotThrowSmartNullPointerOnObjectMethods() {
Object smartNull = mock.objectReturningMethod();
String ignored = smartNull.toString();
}
@Test
public void shouldShowParameters() {
Foo foo = mock(Foo.class, RETURNS_SMART_NULLS);
Bar smartNull = foo.getBarWithParams(10, "yes sir");
try {
smartNull.boo();
fail();
} catch (Exception e) {
assertThat(e).hasMessageContaining("yes sir");
}
}
@Test
public void shouldShowParametersWhenParamsAreHuge() {
Foo foo = mock(Foo.class, RETURNS_SMART_NULLS);
String longStr =
"Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.";
Bar smartNull = foo.getBarWithParams(10, longStr);
try {
smartNull.boo();
fail();
} catch (Exception e) {
assertThat(e).hasMessageContaining("Lorem Ipsum");
}
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/test/java/org/mockitousage/stubbing/SmartNullsStubbingTest.java |
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2025, Institute of Software, Chinese Academy of Sciences.
#include "rvv_hal.hpp"
#include <cfloat>
#include <cmath>
#include <typeinfo>
namespace cv { namespace rvv_hal { namespace core {
#if CV_HAL_RVV_1P0_ENABLED
namespace {
// the algorithm is copied from core/src/matrix_decomp.cpp,
// in the function template static int cv::LUImpl
template<typename RVV_T, typename T = typename RVV_T::ElemType>
inline int LU(T* src1, size_t src1_step, int m, T* src2, size_t src2_step, int n, int* info)
{
T eps;
if( typeid(T) == typeid(float) )
eps = FLT_EPSILON*10;
else if( typeid(T) == typeid(double) )
eps = DBL_EPSILON*100;
else
return CV_HAL_ERROR_NOT_IMPLEMENTED;
int i, j, k, p = 1;
src1_step /= sizeof(src1[0]);
src2_step /= sizeof(src2[0]);
int vlmax = RVV_T::setvlmax(), vl;
if( src2 )
{
for( i = 0; i < m; i++ )
{
k = i;
for( j = i+1; j < m; j++ )
if( std::abs(src1[j*src1_step + i]) > std::abs(src1[k*src1_step + i]) )
k = j;
if( std::abs(src1[k*src1_step + i]) < eps )
{
*info = 0;
return CV_HAL_ERROR_OK;
}
if( k != i )
{
for( j = i; j < m; j += vl )
{
vl = RVV_T::setvl(m - j);
auto vec_src1 = RVV_T::vload(src1 + i * src1_step + j, vl);
auto vec_src2 = RVV_T::vload(src1 + k * src1_step + j, vl);
RVV_T::vstore(src1 + k * src1_step + j, vec_src1, vl);
RVV_T::vstore(src1 + i * src1_step + j, vec_src2, vl);
}
for( j = 0; j < n; j += vl )
{
vl = RVV_T::setvl(n - j);
auto vec_src1 = RVV_T::vload(src2 + i * src2_step + j, vl);
auto vec_src2 = RVV_T::vload(src2 + k * src2_step + j, vl);
RVV_T::vstore(src2 + k * src2_step + j, vec_src1, vl);
RVV_T::vstore(src2 + i * src2_step + j, vec_src2, vl);
}
p = -p;
}
T d = -1/src1[i*src1_step + i];
for( j = i+1; j < m; j++ )
{
T alpha = src1[j*src1_step + i]*d;
for( k = i+1; k < m; k += vl )
{
vl = RVV_T::setvl(m - k);
auto vec_src = RVV_T::vload(src1 + i * src1_step + k, vl);
auto vec_dst = RVV_T::vload(src1 + j * src1_step + k, vl);
vec_dst = __riscv_vfmacc(vec_dst, alpha, vec_src, vl);
RVV_T::vstore(src1 + j * src1_step + k, vec_dst, vl);
}
for( k = 0; k < n; k += vl )
{
vl = RVV_T::setvl(n - k);
auto vec_src = RVV_T::vload(src2 + i * src2_step + k, vl);
auto vec_dst = RVV_T::vload(src2 + j * src2_step + k, vl);
vec_dst = __riscv_vfmacc(vec_dst, alpha, vec_src, vl);
RVV_T::vstore(src2 + j * src2_step + k, vec_dst, vl);
}
}
}
for( i = m-1; i >= 0; i-- )
for( j = 0; j < n; j++ )
{
T s = src2[i*src2_step + j];
auto vec_sum = RVV_T::vmv(0, vlmax);
for( k = i+1; k < m; k += vl )
{
vl = RVV_T::setvl(m - k);
auto vec_src1 = RVV_T::vload(src1 + i * src1_step + k, vl);
auto vec_src2 = RVV_T::vload_stride(src2 + k * src2_step + j, sizeof(T) * src2_step, vl);
vec_sum = __riscv_vfmacc_tu(vec_sum, vec_src1, vec_src2, vl);
}
s -= __riscv_vfmv_f(__riscv_vfredosum(vec_sum, RVV_BaseType<RVV_T>::vmv_s(0, vlmax), vlmax));
src2[i*src2_step + j] = s/src1[i*src1_step + i];
}
}
else
{
for( i = 0; i < m; i++ )
{
k = i;
for( j = i+1; j < m; j++ )
if( std::abs(src1[j*src1_step + i]) > std::abs(src1[k*src1_step + i]) )
k = j;
if( std::abs(src1[k*src1_step + i]) < eps )
{
*info = 0;
return CV_HAL_ERROR_OK;
}
if( k != i )
{
for( j = i; j < m; j += vl )
{
vl = RVV_T::setvl(m - j);
auto vec_src1 = RVV_T::vload(src1 + i * src1_step + j, vl);
auto vec_src2 = RVV_T::vload(src1 + k * src1_step + j, vl);
RVV_T::vstore(src1 + k * src1_step + j, vec_src1, vl);
RVV_T::vstore(src1 + i * src1_step + j, vec_src2, vl);
}
p = -p;
}
T d = -1/src1[i*src1_step + i];
for( j = i+1; j < m; j++ )
{
T alpha = src1[j*src1_step + i]*d;
for( k = i+1; k < m; k += vl )
{
vl = RVV_T::setvl(m - k);
auto vec_src = RVV_T::vload(src1 + i * src1_step + k, vl);
auto vec_dst = RVV_T::vload(src1 + j * src1_step + k, vl);
vec_dst = __riscv_vfmacc(vec_dst, alpha, vec_src, vl);
RVV_T::vstore(src1 + j * src1_step + k, vec_dst, vl);
}
}
}
}
*info = p;
return CV_HAL_ERROR_OK;
}
} // anonymous
int LU32f(float* src1, size_t src1_step, int m, float* src2, size_t src2_step, int n, int* info) {
return LU<RVV_F32M4>(src1, src1_step, m, src2, src2_step, n, info);
}
int LU64f(double* src1, size_t src1_step, int m, double* src2, size_t src2_step, int n, int* info) {
return LU<RVV_F64M4>(src1, src1_step, m, src2, src2_step, n, info);
}
#endif // CV_HAL_RVV_1P0_ENABLED
}}} // cv::rvv_hal::core | cpp | github | https://github.com/opencv/opencv | hal/riscv-rvv/src/core/lu.cpp |
"""Support for scanning a network with nmap."""
from collections import namedtuple
from datetime import timedelta
import logging
from getmac import get_mac_address
from nmap import PortScanner, PortScannerError
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_EXCLUDE, CONF_HOSTS
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
# Interval in minutes to exclude devices from a scan while they are home
CONF_HOME_INTERVAL = "home_interval"
CONF_OPTIONS = "scan_options"
DEFAULT_OPTIONS = "-F --host-timeout 5s"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOSTS): cv.ensure_list,
vol.Required(CONF_HOME_INTERVAL, default=0): cv.positive_int,
vol.Optional(CONF_EXCLUDE, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_OPTIONS, default=DEFAULT_OPTIONS): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Nmap scanner."""
return NmapDeviceScanner(config[DOMAIN])
Device = namedtuple("Device", ["mac", "name", "ip", "last_update"])
class NmapDeviceScanner(DeviceScanner):
"""This class scans for devices using nmap."""
exclude = []
def __init__(self, config):
"""Initialize the scanner."""
self.last_results = []
self.hosts = config[CONF_HOSTS]
self.exclude = config[CONF_EXCLUDE]
minutes = config[CONF_HOME_INTERVAL]
self._options = config[CONF_OPTIONS]
self.home_interval = timedelta(minutes=minutes)
_LOGGER.debug("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
_LOGGER.debug("Nmap last results %s", self.last_results)
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [
result.name for result in self.last_results if result.mac == device
]
if filter_named:
return filter_named[0]
return None
def get_extra_attributes(self, device):
"""Return the IP of the given device."""
filter_ip = next(
(result.ip for result in self.last_results if result.mac == device), None
)
return {"ip": filter_ip}
def _update_info(self):
"""Scan the network for devices.
Returns boolean if scanning successful.
"""
_LOGGER.debug("Scanning")
scanner = PortScanner()
options = self._options
if self.home_interval:
boundary = dt_util.now() - self.home_interval
last_results = [
device for device in self.last_results if device.last_update > boundary
]
if last_results:
exclude_hosts = self.exclude + [device.ip for device in last_results]
else:
exclude_hosts = self.exclude
else:
last_results = []
exclude_hosts = self.exclude
if exclude_hosts:
options += f" --exclude {','.join(exclude_hosts)}"
try:
result = scanner.scan(hosts=" ".join(self.hosts), arguments=options)
except PortScannerError:
return False
now = dt_util.now()
for ipv4, info in result["scan"].items():
if info["status"]["state"] != "up":
continue
name = info["hostnames"][0]["name"] if info["hostnames"] else ipv4
# Mac address only returned if nmap ran as root
mac = info["addresses"].get("mac") or get_mac_address(ip=ipv4)
if mac is None:
_LOGGER.info("No MAC address found for %s", ipv4)
continue
last_results.append(Device(mac.upper(), name, ipv4, now))
self.last_results = last_results
_LOGGER.debug("nmap scan successful")
return True | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mtd/arasan,nand-controller.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Arasan NAND Flash Controller with ONFI 3.1 support
allOf:
- $ref: nand-controller.yaml
maintainers:
- Michal Simek <michal.simek@amd.com>
properties:
compatible:
items:
- enum:
- xlnx,zynqmp-nand-controller
- const: arasan,nfc-v3p10
reg:
maxItems: 1
clocks:
items:
- description: Controller clock
- description: NAND bus clock
clock-names:
items:
- const: controller
- const: bus
interrupts:
maxItems: 1
required:
- compatible
- reg
- clocks
- clock-names
- interrupts
unevaluatedProperties: false
examples:
- |
nfc: nand-controller@ff100000 {
compatible = "xlnx,zynqmp-nand-controller", "arasan,nfc-v3p10";
reg = <0xff100000 0x1000>;
clock-names = "controller", "bus";
clocks = <&clk200>, <&clk100>;
interrupt-parent = <&gic>;
interrupts = <0 14 4>;
#address-cells = <1>;
#size-cells = <0>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml |
"""
This file is a copy of the scipy.sparse.csgraph._laplacian module from SciPy 1.12
scipy.sparse.csgraph.laplacian supports sparse arrays only starting from Scipy 1.12,
see https://github.com/scipy/scipy/pull/19156. This vendored file can be removed as
soon as Scipy 1.12 becomes the minimum supported version.
Laplacian of a compressed-sparse graph
"""
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from scipy.sparse.linalg import LinearOperator
###############################################################################
# Graph laplacian
def laplacian(
csgraph,
normed=False,
return_diag=False,
use_out_degree=False,
*,
copy=True,
form="array",
dtype=None,
symmetrized=False,
):
"""
Return the Laplacian of a directed graph.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
Compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute symmetrically normalized Laplacian.
Default: False.
return_diag : bool, optional
If True, then also return an array related to vertex degrees.
Default: False.
use_out_degree : bool, optional
If True, then use out-degree instead of in-degree.
This distinction matters only if the graph is asymmetric.
Default: False.
copy : bool, optional
If False, then change `csgraph` in place if possible,
avoiding doubling the memory use.
Default: True, for backward compatibility.
form : 'array', or 'function', or 'lo'
Determines the format of the output Laplacian:
* 'array' is a numpy array;
* 'function' is a pointer to evaluating the Laplacian-vector
or Laplacian-matrix product;
* 'lo' results in the format of the `LinearOperator`.
Choosing 'function' or 'lo' always avoids doubling
the memory use, ignoring `copy` value.
Default: 'array', for backward compatibility.
dtype : None or one of numeric numpy dtypes, optional
The dtype of the output. If ``dtype=None``, the dtype of the
output matches the dtype of the input csgraph, except for
the case ``normed=True`` and integer-like csgraph, where
the output dtype is 'float' allowing accurate normalization,
but dramatically increasing the memory use.
Default: None, for backward compatibility.
symmetrized : bool, optional
If True, then the output Laplacian is symmetric/Hermitian.
The symmetrization is done by ``csgraph + csgraph.T.conj``
without dividing by 2 to preserve integer dtypes if possible
prior to the construction of the Laplacian.
The symmetrization will increase the memory footprint of
sparse matrices unless the sparsity pattern is symmetric or
`form` is 'function' or 'lo'.
Default: False, for backward compatibility.
Returns
-------
lap : ndarray, or sparse matrix, or `LinearOperator`
The N x N Laplacian of csgraph. It will be a NumPy array (dense)
if the input was dense, or a sparse matrix otherwise, or
the format of a function or `LinearOperator` if
`form` equals 'function' or 'lo', respectively.
diag : ndarray, optional
The length-N main diagonal of the Laplacian matrix.
For the normalized Laplacian, this is the array of square roots
of vertex degrees or 1 if the degree is zero.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchhoff matrix" or just the "Laplacian", and is useful in many
parts of spectral graph theory.
In particular, the eigen-decomposition of the Laplacian can give
insight into many properties of the graph, e.g.,
is commonly used for spectral data embedding and clustering.
The constructed Laplacian doubles the memory use if ``copy=True`` and
``form="array"`` which is the default.
Choosing ``copy=False`` has no effect unless ``form="array"``
or the matrix is sparse in the ``coo`` format, or dense array, except
for the integer input with ``normed=True`` that forces the float output.
Sparse input is reformatted into ``coo`` if ``form="array"``,
which is the default.
If the input adjacency matrix is not symmetric, the Laplacian is
also non-symmetric unless ``symmetrized=True`` is used.
Diagonal entries of the input adjacency matrix are ignored and
replaced with zeros for the purpose of normalization where ``normed=True``.
The normalization uses the inverse square roots of row-sums of the input
adjacency matrix, and thus may fail if the row-sums contain
negative or complex with a non-zero imaginary part values.
The normalization is symmetric, making the normalized Laplacian also
symmetric if the input csgraph was symmetric.
References
----------
.. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csgraph
Our first illustration is the symmetric graph
>>> G = np.arange(4) * np.arange(4)[:, np.newaxis]
>>> G
array([[0, 0, 0, 0],
[0, 1, 2, 3],
[0, 2, 4, 6],
[0, 3, 6, 9]])
and its symmetric Laplacian matrix
>>> csgraph.laplacian(G)
array([[ 0, 0, 0, 0],
[ 0, 5, -2, -3],
[ 0, -2, 8, -6],
[ 0, -3, -6, 9]])
The non-symmetric graph
>>> G = np.arange(9).reshape(3, 3)
>>> G
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
has different row- and column sums, resulting in two varieties
of the Laplacian matrix, using an in-degree, which is the default
>>> L_in_degree = csgraph.laplacian(G)
>>> L_in_degree
array([[ 9, -1, -2],
[-3, 8, -5],
[-6, -7, 7]])
or alternatively an out-degree
>>> L_out_degree = csgraph.laplacian(G, use_out_degree=True)
>>> L_out_degree
array([[ 3, -1, -2],
[-3, 8, -5],
[-6, -7, 13]])
Constructing a symmetric Laplacian matrix, one can add the two as
>>> L_in_degree + L_out_degree.T
array([[ 12, -4, -8],
[ -4, 16, -12],
[ -8, -12, 20]])
or use the ``symmetrized=True`` option
>>> csgraph.laplacian(G, symmetrized=True)
array([[ 12, -4, -8],
[ -4, 16, -12],
[ -8, -12, 20]])
that is equivalent to symmetrizing the original graph
>>> csgraph.laplacian(G + G.T)
array([[ 12, -4, -8],
[ -4, 16, -12],
[ -8, -12, 20]])
The goal of normalization is to make the non-zero diagonal entries
of the Laplacian matrix to be all unit, also scaling off-diagonal
entries correspondingly. The normalization can be done manually, e.g.,
>>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
>>> L, d = csgraph.laplacian(G, return_diag=True)
>>> L
array([[ 2, -1, -1],
[-1, 2, -1],
[-1, -1, 2]])
>>> d
array([2, 2, 2])
>>> scaling = np.sqrt(d)
>>> scaling
array([1.41421356, 1.41421356, 1.41421356])
>>> (1/scaling)*L*(1/scaling)
array([[ 1. , -0.5, -0.5],
[-0.5, 1. , -0.5],
[-0.5, -0.5, 1. ]])
Or using ``normed=True`` option
>>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
>>> L
array([[ 1. , -0.5, -0.5],
[-0.5, 1. , -0.5],
[-0.5, -0.5, 1. ]])
which now instead of the diagonal returns the scaling coefficients
>>> d
array([1.41421356, 1.41421356, 1.41421356])
Zero scaling coefficients are substituted with 1s, where scaling
has thus no effect, e.g.,
>>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]])
>>> G
array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0]])
>>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
>>> L
array([[ 0., -0., -0.],
[-0., 1., -1.],
[-0., -1., 1.]])
>>> d
array([1., 1., 1.])
Only the symmetric normalization is implemented, resulting
in a symmetric Laplacian matrix if and only if its graph is symmetric
and has all non-negative degrees, like in the examples above.
The output Laplacian matrix is by default a dense array or a sparse matrix
inferring its shape, format, and dtype from the input graph matrix:
>>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32)
>>> G
array([[0., 1., 1.],
[1., 0., 1.],
[1., 1., 0.]], dtype=float32)
>>> csgraph.laplacian(G)
array([[ 2., -1., -1.],
[-1., 2., -1.],
[-1., -1., 2.]], dtype=float32)
but can alternatively be generated matrix-free as a LinearOperator:
>>> L = csgraph.laplacian(G, form="lo")
>>> L
<3x3 _CustomLinearOperator with dtype=float32>
>>> L(np.eye(3))
array([[ 2., -1., -1.],
[-1., 2., -1.],
[-1., -1., 2.]])
or as a lambda-function:
>>> L = csgraph.laplacian(G, form="function")
>>> L
<function _laplace.<locals>.<lambda> at 0x0000012AE6F5A598>
>>> L(np.eye(3))
array([[ 2., -1., -1.],
[-1., 2., -1.],
[-1., -1., 2.]])
The Laplacian matrix is used for
spectral data clustering and embedding
as well as for spectral graph partitioning.
Our final example illustrates the latter
for a noisy directed linear graph.
>>> from scipy.sparse import diags, random
>>> from scipy.sparse.linalg import lobpcg
Create a directed linear graph with ``N=35`` vertices
using a sparse adjacency matrix ``G``:
>>> N = 35
>>> G = diags(np.ones(N-1), 1, format="csr")
Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``:
>>> rng = np.random.default_rng()
>>> G += 1e-2 * random(N, N, density=0.1, random_state=rng)
Set initial approximations for eigenvectors:
>>> X = rng.random((N, 2))
The constant vector of ones is always a trivial eigenvector
of the non-normalized Laplacian to be filtered out:
>>> Y = np.ones((N, 1))
Alternating (1) the sign of the graph weights allows determining
labels for spectral max- and min- cuts in a single loop.
Since the graph is undirected, the option ``symmetrized=True``
must be used in the construction of the Laplacian.
The option ``normed=True`` cannot be used in (2) for the negative weights
here as the symmetric normalization evaluates square roots.
The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees
a fixed memory footprint and read-only access to the graph.
Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector
that determines the labels as the signs of its components in (5).
Since the sign in an eigenvector is not deterministic and can flip,
we fix the sign of the first component to be always +1 in (4).
>>> for cut in ["max", "min"]:
... G = -G # 1.
... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2.
... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3) # 3.
... eves *= np.sign(eves[0, 0]) # 4.
... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5.
max-cut labels:
[1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]
min-cut labels:
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
As anticipated for a (slightly noisy) linear graph,
the max-cut strips all the edges of the graph coloring all
odd vertices into one color and all even vertices into another one,
while the balanced min-cut partitions the graph
in the middle by deleting a single edge.
Both determined partitions are optimal.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError("csgraph must be a square matrix or array")
if normed and (
np.issubdtype(csgraph.dtype, np.signedinteger)
or np.issubdtype(csgraph.dtype, np.uint)
):
csgraph = csgraph.astype(np.float64)
if form == "array":
create_lap = _laplacian_sparse if issparse(csgraph) else _laplacian_dense
else:
create_lap = (
_laplacian_sparse_flo if issparse(csgraph) else _laplacian_dense_flo
)
degree_axis = 1 if use_out_degree else 0
lap, d = create_lap(
csgraph,
normed=normed,
axis=degree_axis,
copy=copy,
form=form,
dtype=dtype,
symmetrized=symmetrized,
)
if return_diag:
return lap, d
return lap
def _setdiag_dense(m, d):
step = len(d) + 1
m.flat[::step] = d
def _laplace(m, d):
return lambda v: v * d[:, np.newaxis] - m @ v
def _laplace_normed(m, d, nd):
laplace = _laplace(m, d)
return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis])
def _laplace_sym(m, d):
return (
lambda v: v * d[:, np.newaxis]
- m @ v
- np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m))
)
def _laplace_normed_sym(m, d, nd):
laplace_sym = _laplace_sym(m, d)
return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis])
def _linearoperator(mv, shape, dtype):
return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype)
def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized):
# The keyword argument `copy` is unused and has no effect here.
del copy
if dtype is None:
dtype = graph.dtype
graph_sum = np.asarray(graph.sum(axis=axis)).ravel()
graph_diagonal = graph.diagonal()
diag = graph_sum - graph_diagonal
if symmetrized:
graph_sum += np.asarray(graph.sum(axis=1 - axis)).ravel()
diag = graph_sum - graph_diagonal - graph_diagonal
if normed:
isolated_node_mask = diag == 0
w = np.where(isolated_node_mask, 1, np.sqrt(diag))
if symmetrized:
md = _laplace_normed_sym(graph, graph_sum, 1.0 / w)
else:
md = _laplace_normed(graph, graph_sum, 1.0 / w)
if form == "function":
return md, w.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, w.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
else:
if symmetrized:
md = _laplace_sym(graph, graph_sum)
else:
md = _laplace(graph, graph_sum)
if form == "function":
return md, diag.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, diag.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized):
# The keyword argument `form` is unused and has no effect here.
del form
if dtype is None:
dtype = graph.dtype
needs_copy = False
if graph.format in ("lil", "dok"):
m = graph.tocoo()
else:
m = graph
if copy:
needs_copy = True
if symmetrized:
m += m.T.conj()
w = np.asarray(m.sum(axis=axis)).ravel() - m.diagonal()
if normed:
m = m.tocoo(copy=needs_copy)
isolated_node_mask = w == 0
w = np.where(isolated_node_mask, 1, np.sqrt(w))
m.data /= w[m.row]
m.data /= w[m.col]
m.data *= -1
m.setdiag(1 - isolated_node_mask)
else:
if m.format == "dia":
m = m.copy()
else:
m = m.tocoo(copy=needs_copy)
m.data *= -1
m.setdiag(w)
return m.astype(dtype, copy=False), w.astype(dtype)
def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized):
if copy:
m = np.array(graph)
else:
m = np.asarray(graph)
if dtype is None:
dtype = m.dtype
graph_sum = m.sum(axis=axis)
graph_diagonal = m.diagonal()
diag = graph_sum - graph_diagonal
if symmetrized:
graph_sum += m.sum(axis=1 - axis)
diag = graph_sum - graph_diagonal - graph_diagonal
if normed:
isolated_node_mask = diag == 0
w = np.where(isolated_node_mask, 1, np.sqrt(diag))
if symmetrized:
md = _laplace_normed_sym(m, graph_sum, 1.0 / w)
else:
md = _laplace_normed(m, graph_sum, 1.0 / w)
if form == "function":
return md, w.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, w.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
else:
if symmetrized:
md = _laplace_sym(m, graph_sum)
else:
md = _laplace(m, graph_sum)
if form == "function":
return md, diag.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, diag.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized):
if form != "array":
raise ValueError(f'{form!r} must be "array"')
if dtype is None:
dtype = graph.dtype
if copy:
m = np.array(graph)
else:
m = np.asarray(graph)
if dtype is None:
dtype = m.dtype
if symmetrized:
m += m.T.conj()
np.fill_diagonal(m, 0)
w = m.sum(axis=axis)
if normed:
isolated_node_mask = w == 0
w = np.where(isolated_node_mask, 1, np.sqrt(w))
m /= w
m /= w[:, np.newaxis]
m *= -1
_setdiag_dense(m, 1 - isolated_node_mask)
else:
m *= -1
_setdiag_dense(m, w)
return m.astype(dtype, copy=False), w.astype(dtype, copy=False) | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/externals/_scipy/sparse/csgraph/_laplacian.py |
"""Tools for specifying BSON codec options."""
import collections
from distutils import version # pylint: disable=no-name-in-module
try:
from bson import codec_options
import pymongo
_PYMONGO_VERSION = version.LooseVersion(pymongo.version)
except ImportError:
codec_options = None
_PYMONGO_VERSION = version.LooseVersion('0.0')
class TypeRegistry(object):
pass
_FIELDS = (
'document_class', 'tz_aware', 'uuid_representation', 'unicode_decode_error_handler', 'tzinfo',
)
if _PYMONGO_VERSION >= version.LooseVersion('3.8'):
_DEFAULT_TYPE_REGISTRY = codec_options.TypeRegistry()
_FIELDS = _FIELDS + ('type_registry',)
else:
_DEFAULT_TYPE_REGISTRY = TypeRegistry()
class CodecOptions(collections.namedtuple('CodecOptions', _FIELDS)):
def __new__(cls, document_class=dict,
tz_aware=False,
uuid_representation=None,
unicode_decode_error_handler='strict',
tzinfo=None, type_registry=None):
if document_class != dict:
raise NotImplementedError(
'Mongomock does not implement custom document_class yet: %r' % document_class)
if not isinstance(tz_aware, bool):
raise TypeError('tz_aware must be True or False')
if uuid_representation is None:
uuid_representation = 3
if uuid_representation != 3:
raise NotImplementedError('Mongomock does not handle custom uuid_representation yet')
if unicode_decode_error_handler not in ('strict', None):
raise NotImplementedError(
'Mongomock does not handle custom unicode_decode_error_handler yet')
if tzinfo:
raise NotImplementedError('Mongomock does not handle custom tzinfo yet')
values = (
document_class, tz_aware, uuid_representation, unicode_decode_error_handler, tzinfo)
if 'type_registry' in _FIELDS:
if not type_registry:
type_registry = _DEFAULT_TYPE_REGISTRY
elif not type_registry == _DEFAULT_TYPE_REGISTRY:
raise NotImplementedError(
'Mongomock does not handle custom type_registry yet %r' % type_registry)
values = values + (type_registry,)
return tuple.__new__(cls, values)
def with_options(self, **kwargs):
opts = self._asdict()
opts.update(kwargs)
return CodecOptions(**opts)
def is_supported(custom_codec_options):
if not custom_codec_options:
return None
return CodecOptions(**custom_codec_options._asdict()) | unknown | codeparrot/codeparrot-clean | ||
---
name: Verify released binary assets
permissions: read-all
on:
release:
types: [published]
jobs:
verify-assets:
name: Verify released binary assets
runs-on: ubuntu-latest
steps:
- name: Verify binary assets
env:
GH_TOKEN: ${{ github.token }}
RELEASE: ${{ github.event.release.tag_name }}
REPOSITORY: ${{ github.repository }}
run: |
mkdir github-assets
pushd github-assets
gh --repo "${REPOSITORY}" release download "${RELEASE}"
test_assets() {
if [ "$(wc -l <SHA256SUMS)" != "$(find . -name 'etcd-*' | wc -l)" ]; then
echo "::error:: Invalid number of assets"
exit 1
fi
sha256sum -c SHA256SUMS
}
test_assets
popd
mkdir google-assets
for file in github-assets/*; do
file=$(basename "${file}")
echo "Downloading ${file} from Google..."
curl "https://storage.googleapis.com/etcd/${RELEASE}/${file}" \
--fail \
-o "google-assets/${file}"
done
pushd google-assets
test_assets | unknown | github | https://github.com/etcd-io/etcd | .github/workflows/verify-released-assets.yaml |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.models.ptb_lstm.ptb_reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
from tensorflow.models.rnn.ptb import reader
class PtbReaderTest(tf.test.TestCase):
def setUp(self):
self._string_data = "\n".join(
[" hello there i am",
" rain as day",
" want some cheesy puffs ?"])
def testPtbRawData(self):
tmpdir = tf.test.get_temp_dir()
for suffix in "train", "valid", "test":
filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
with tf.gfile.GFile(filename, "w") as fh:
fh.write(self._string_data)
# Smoke test
output = reader.ptb_raw_data(tmpdir)
self.assertEqual(len(output), 4)
def testPtbIterator(self):
raw_data = [4, 3, 2, 1, 0, 5, 6, 1, 1, 1, 1, 0, 3, 4, 1]
batch_size = 3
num_steps = 2
output = list(reader.ptb_iterator(raw_data, batch_size, num_steps))
self.assertEqual(len(output), 2)
o1, o2 = (output[0], output[1])
self.assertEqual(o1[0].shape, (batch_size, num_steps))
self.assertEqual(o1[1].shape, (batch_size, num_steps))
self.assertEqual(o2[0].shape, (batch_size, num_steps))
self.assertEqual(o2[1].shape, (batch_size, num_steps))
if __name__ == "__main__":
tf.test.main() | unknown | codeparrot/codeparrot-clean | ||
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.api import MatrixWorkspace, WorkspaceGroup
from mantid.simpleapi import LoadAndMerge, config, mtd
class LoadAndMergeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
config.appendDataSearchSubDir('ILL/IN16B/')
config.appendDataSearchSubDir('ILL/D20/')
def setUp(self):
config['default.facility'] = 'ILL'
config['default.instrument'] = 'IN16B'
def test_single_run_load(self):
out1 = LoadAndMerge(Filename='170257')
self.assertTrue(out1)
self.assertEqual(out1.name(), 'out1')
self.assertTrue(isinstance(out1, MatrixWorkspace))
mtd.clear()
def test_many_runs_summed(self):
out2 = LoadAndMerge(Filename='170257+170258',LoaderName='LoadILLIndirect')
self.assertTrue(out2)
self.assertEqual(out2.name(), 'out2')
self.assertTrue(isinstance(out2, MatrixWorkspace))
mtd.clear()
def test_many_runs_listed(self):
out3 = LoadAndMerge(Filename='170257,170258',LoaderName='LoadILLIndirect')
self.assertTrue(out3)
self.assertEqual(out3.name(), 'out3')
self.assertTrue(isinstance(out3, WorkspaceGroup))
self.assertEqual(out3.getNumberOfEntries(), 2)
self.assertTrue(isinstance(out3.getItem(0), MatrixWorkspace))
self.assertTrue(isinstance(out3.getItem(1), MatrixWorkspace))
self.assertEqual(out3.getItem(0).name(),'170257')
self.assertEqual(out3.getItem(1).name(),'170258')
mtd.clear()
def test_many_runs_mixed(self):
out4 = LoadAndMerge(Filename='170257+170258,170300+170302',LoaderName='LoadILLIndirect')
self.assertTrue(out4)
self.assertEqual(out4.name(), 'out4')
self.assertTrue(isinstance(out4, WorkspaceGroup))
self.assertEqual(out4.getNumberOfEntries(), 2)
self.assertTrue(isinstance(out4.getItem(0), MatrixWorkspace))
self.assertTrue(isinstance(out4.getItem(1), MatrixWorkspace))
self.assertEqual(out4.getItem(0).name(),'170257_170258')
self.assertEqual(out4.getItem(1).name(),'170300_170302')
mtd.clear()
def test_merge_options(self):
self.assertRaises(RuntimeError,LoadAndMerge,Filename='170300+170301',
OutputWorkspace='out5',LoaderName='LoadILLIndirect',
MergeRunsOptions=dict({'FailBehaviour':'Stop'}))
def test_specific_loader(self):
out5 = LoadAndMerge(Filename='170257',LoaderName='LoadILLIndirect',)
self.assertTrue(out5)
self.assertEqual(out5.name(), 'out5')
self.assertTrue(isinstance(out5, MatrixWorkspace))
mtd.clear()
def test_loader_option(self):
out6 = LoadAndMerge(Filename='967101',LoaderName='LoadILLDiffraction',
LoaderVersion=1,LoaderOptions=dict({'DataType':'Raw'}))
self.assertTrue(out6)
self.assertEqual(out6.name(), 'out6')
self.assertTrue(isinstance(out6, MatrixWorkspace))
mtd.clear()
def test_output_hidden(self):
LoadAndMerge(Filename='170257+170258,170300+170302',LoaderName='LoadILLIndirect',
OutputWorkspace='__out')
self.assertTrue(mtd['__out'])
self.assertTrue(isinstance(mtd['__out'], WorkspaceGroup))
self.assertEqual(mtd['__out'].getNumberOfEntries(), 2)
self.assertTrue(isinstance(mtd['__out'].getItem(0), MatrixWorkspace))
self.assertTrue(isinstance(mtd['__out'].getItem(1), MatrixWorkspace))
self.assertEqual(mtd['__out'].getItem(0).name(),'__170257_170258')
self.assertEqual(mtd['__out'].getItem(1).name(),'__170300_170302')
mtd.clear()
def test_non_ill_load(self):
out7 = LoadAndMerge(Filename='IRS26173,26174.RAW')
self.assertTrue(out7)
self.assertTrue(isinstance(out7, WorkspaceGroup))
self.assertEqual(out7.getNumberOfEntries(), 2)
self.assertTrue(isinstance(out7.getItem(0), MatrixWorkspace))
self.assertTrue(isinstance(out7.getItem(1), MatrixWorkspace))
self.assertEqual(out7.getItem(0).name(),'IRS26173')
self.assertEqual(out7.getItem(1).name(),'IRS26174')
mtd.clear()
def test_multi_period_loader_list(self):
out8 = LoadAndMerge(Filename='MUSR00015196,00015197.nxs')
self.assertTrue(out8)
self.assertTrue(isinstance(out8, WorkspaceGroup))
self.assertEqual(out8.getNumberOfEntries(), 4)
self.assertEqual(out8.getItem(0).name(),'MUSR00015196_1')
self.assertEqual(out8.getItem(1).name(),'MUSR00015196_2')
self.assertEqual(out8.getItem(2).name(),'MUSR00015197_1')
self.assertEqual(out8.getItem(3).name(),'MUSR00015197_2')
mtd.clear()
def test_multi_period_loader_sum(self):
out9 = LoadAndMerge(Filename='MUSR00015196+00015197.nxs')
self.assertTrue(out9)
self.assertTrue(isinstance(out9, MatrixWorkspace))
self.assertTrue('MUSR00015196' not in mtd)
self.assertTrue('MUSR00015197' not in mtd)
self.assertTrue('MUSR00015196_1' not in mtd)
self.assertTrue('MUSR00015196_2' not in mtd)
self.assertTrue('MUSR00015197_1' not in mtd)
self.assertTrue('MUSR00015197_2' not in mtd)
mtd.clear()
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
from multiprocessing import Event, Value
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from intercom.back_end_binding import InterComBackEndBinding
from objects.firmware import Firmware
from scheduler.Analysis import AnalysisScheduler
from scheduler.Unpacking import UnpackingScheduler
from storage.db_interface_backend import BackEndDbInterface
from storage.MongoMgr import MongoMgr
from test.common_helper import clean_test_database, get_database_names, get_test_data_dir
from test.integration.common import initialize_config
from web_interface.frontend_main import WebFrontEnd
# pylint: disable=redefined-outer-name
FIRST_ROOT_ID = '5fadb36c49961981f8d87cc21fc6df73a1b90aa1857621f2405d317afb994b64_68415'
SECOND_ROOT_ID = '0383cac1dd8fbeb770559163edbd571c21696c435a4942bec6df151983719731_52143'
TARGET_UID = '49543bc7128542b062d15419c90459be65ca93c3134554bc6224e307b359c021_9968'
TMP_DIR = TemporaryDirectory(prefix="fact_test_")
class MockScheduler:
def __init__(self, *_, **__):
pass
def add_task(self, task):
pass
@pytest.fixture(scope='module')
def finished_event():
return Event()
@pytest.fixture(scope='module')
def intermediate_event():
return Event()
@pytest.fixture(scope='module')
def test_config():
return initialize_config(TMP_DIR)
@pytest.fixture(scope='module', autouse=True)
def test_server(test_config):
mongo = MongoMgr(test_config)
clean_test_database(test_config, get_database_names(test_config))
yield None
clean_test_database(test_config, get_database_names(test_config))
mongo.shutdown()
@pytest.fixture(scope='module')
def test_app(test_config):
frontend = WebFrontEnd(config=test_config)
frontend.app.config['TESTING'] = True
return frontend.app.test_client()
@pytest.fixture(scope='module')
def test_scheduler(test_config, finished_event, intermediate_event):
interface = BackEndDbInterface(config=test_config)
elements_finished = Value('i', 0)
def count_pre_analysis(file_object):
interface.add_object(file_object)
elements_finished.value += 1
if elements_finished.value == 16:
finished_event.set()
elif elements_finished.value == 8:
intermediate_event.set()
analyzer = AnalysisScheduler(test_config, pre_analysis=count_pre_analysis, db_interface=interface)
unpacker = UnpackingScheduler(config=test_config, post_unpack=analyzer.start_analysis_of_object)
intercom = InterComBackEndBinding(config=test_config, analysis_service=analyzer, unpacking_service=unpacker, compare_service=MockScheduler())
yield unpacker
intercom.shutdown()
unpacker.shutdown()
analyzer.shutdown()
def add_test_file(scheduler, path_in_test_dir):
firmware = Firmware(file_path=str(Path(get_test_data_dir(), path_in_test_dir)))
firmware.set_release_date('1990-01-16')
scheduler.add_task(firmware)
def test_check_collision(test_app, test_scheduler, finished_event, intermediate_event):
add_test_file(test_scheduler, 'regression_one')
intermediate_event.wait(timeout=30)
add_test_file(test_scheduler, 'regression_two')
finished_event.wait(timeout=30)
first_response = test_app.get('/analysis/{}/ro/{}'.format(TARGET_UID, FIRST_ROOT_ID))
assert b'insufficient information' not in first_response.data
second_response = test_app.get('/analysis/{}/ro/{}'.format(TARGET_UID, SECOND_ROOT_ID))
assert b'insufficient information' not in second_response.data | unknown | codeparrot/codeparrot-clean | ||
"""Functions to construct sparse matrices
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
'hstack', 'vstack', 'bmat', 'rand', 'diags', 'block_diag']
from warnings import warn
import numpy as np
from .sputils import upcast
from .csr import csr_matrix
from .csc import csc_matrix
from .bsr import bsr_matrix
from .coo import coo_matrix
from .lil import lil_matrix
from .dia import dia_matrix
from .base import issparse
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : format of the result (e.g. "csr")
By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
See Also
--------
diags : more convenient form of this function
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]])
>>> diags = array([0,-1,2])
>>> spdiags(data, diags, 4, 4).todense()
matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def diags(diagonals, offsets, shape=None, format=None, dtype=None):
"""
Construct a sparse matrix from diagonals.
.. versionadded:: 0.11
Parameters
----------
diagonals : sequence of array_like
Sequence of arrays containing the matrix diagonals,
corresponding to `offsets`.
offsets : sequence of int
Diagonals to set:
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
shape : tuple of int, optional
Shape of the result. If omitted, a square matrix large enough
to contain the diagonals is returned.
format : {"dia", "csr", "csc", "lil", ...}, optional
Matrix format of the result. By default (format=None) an
appropriate sparse matrix format is returned. This choice is
subject to change.
dtype : dtype, optional
Data type of the matrix.
See Also
--------
spdiags : construct matrix from diagonals
Notes
-----
This function differs from `spdiags` in the way it handles
off-diagonals.
The result from `diags` is the sparse equivalent of::
np.diag(diagonals[0], offsets[0])
+ ...
+ np.diag(diagonals[k], offsets[k])
Repeated diagonal offsets are disallowed.
Examples
--------
>>> diagonals = [[1,2,3,4], [1,2,3], [1,2]]
>>> diags(diagonals, [0, -1, 2]).todense()
matrix([[1, 0, 1, 0],
[1, 2, 0, 2],
[0, 2, 3, 0],
[0, 0, 3, 4]])
Broadcasting of scalars is supported (but shape needs to be
specified):
>>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).todense()
matrix([[-2., 1., 0., 0.],
[ 1., -2., 1., 0.],
[ 0., 1., -2., 1.],
[ 0., 0., 1., -2.]])
If only one diagonal is wanted (as in `numpy.diag`), the following
works as well:
>>> diags([1, 2, 3], 1).todense()
matrix([[ 0., 1., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 3.],
[ 0., 0., 0., 0.]])
"""
# if offsets is not a sequence, assume that there's only one diagonal
try:
iter(offsets)
except TypeError:
# now check that there's actually only one diagonal
try:
iter(diagonals[0])
except TypeError:
diagonals = [np.atleast_1d(diagonals)]
else:
raise ValueError("Different number of diagonals and offsets.")
else:
diagonals = list(map(np.atleast_1d, diagonals))
offsets = np.atleast_1d(offsets)
# Basic check
if len(diagonals) != len(offsets):
raise ValueError("Different number of diagonals and offsets.")
# Determine shape, if omitted
if shape is None:
m = len(diagonals[0]) + abs(int(offsets[0]))
shape = (m, m)
# Determine data type, if omitted
if dtype is None:
dtype = np.common_type(*diagonals)
# Construct data array
m, n = shape
M = max([min(m + offset, n - offset) + max(0, offset)
for offset in offsets])
M = max(0, M)
data_arr = np.zeros((len(offsets), M), dtype=dtype)
for j, diagonal in enumerate(diagonals):
offset = offsets[j]
k = max(0, offset)
length = min(m + offset, n - offset)
if length <= 0:
raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
try:
data_arr[j, k:k+length] = diagonal
except ValueError:
if len(diagonal) != length and len(diagonal) != 1:
raise ValueError(
"Diagonal length (index %d: %d at offset %d) does not "
"agree with matrix size (%d, %d)." % (
j, len(diagonal), offset, m, n))
raise
return dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format)
def identity(n, dtype='d', format=None):
"""Identity matrix in sparse format
Returns an identity matrix with shape (n,n) using a given
sparse format and dtype.
Parameters
----------
n : integer
Shape of the identity matrix.
dtype :
Data type of the matrix
format : string
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> identity(3).todense()
matrix([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> identity(3, dtype='int8', format='dia')
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
return eye(n, n, dtype=dtype, format=format)
def eye(m, n=None, k=0, dtype=float, format=None):
"""Sparse matrix with ones on diagonal
Returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
Parameters
----------
n : integer
Number of rows in the matrix.
m : integer, optional
Number of columns. Default: n
k : integer, optional
Diagonal to place ones on. Default: 0 (main diagonal)
dtype :
Data type of the matrix
format : string
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> from scipy import sparse
>>> sparse.eye(3).todense()
matrix([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> sparse.eye(3, dtype=np.int8)
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
if n is None:
n = m
m,n = int(m),int(n)
if m == n and k == 0:
# fast branch for special formats
if format in ['csr', 'csc']:
indptr = np.arange(n+1, dtype=np.intc)
indices = np.arange(n, dtype=np.intc)
data = np.ones(n, dtype=dtype)
cls = {'csr': csr_matrix, 'csc': csc_matrix}[format]
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
row = np.arange(n, dtype=np.intc)
col = np.arange(n, dtype=np.intc)
data = np.ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : string
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> A = csr_matrix(array([[0,2],[5,0]]))
>>> B = csr_matrix(array([[1,2],[3,4]]))
>>> kron(A,B).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> kron(A,[[1,2],[3,4]]).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
# B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
# use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
----------
A
square matrix
B
square matrix
format : string
format of the result (e.g. "csr")
Returns
-------
kronecker sum in a sparse matrix format
Examples
--------
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) # since L + R is not always same format
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : string
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
See Also
--------
vstack : stack sparse matrices vertically (row wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, hstack
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> hstack( [A,B] ).todense()
matrix([[1, 2, 5],
[3, 4, 6]])
"""
return bmat([blocks], format=format, dtype=dtype)
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : string
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
See Also
--------
hstack : stack sparse matrices horizontally (column wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5,6]])
>>> vstack( [A,B] ).todense()
matrix([[1, 2],
[3, 4],
[5, 6]])
"""
return bmat([[b] for b in blocks], format=format, dtype=dtype)
def bmat(blocks, format=None, dtype=None):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
----------
blocks : array_like
Grid of sparse matrices with compatible shapes.
An entry of None implies an all-zero matrix.
format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
The sparse format of the result (e.g. "csr"). If not given, the matrix
is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
bmat : sparse matrix
A "coo" sparse matrix or type of sparse matrix identified by `format`.
See Also
--------
block_diag, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> C = coo_matrix([[7]])
>>> bmat( [[A,B],[None,C]] ).todense()
matrix([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat( [[A,None],[None,C]] ).todense()
matrix([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = np.asarray(blocks, dtype='object')
if np.rank(blocks) != 2:
raise ValueError('blocks must have rank 2')
M,N = blocks.shape
block_mask = np.zeros(blocks.shape, dtype=np.bool)
brow_lengths = np.zeros(blocks.shape[0], dtype=np.intc)
bcol_lengths = np.zeros(blocks.shape[1], dtype=np.intc)
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
else:
if brow_lengths[i] != A.shape[0]:
raise ValueError('blocks[%d,:] has incompatible row dimensions' % i)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
else:
if bcol_lengths[j] != A.shape[1]:
raise ValueError('blocks[:,%d] has incompatible column dimensions' % j)
# ensure that at least one value in each row and col is not None
if brow_lengths.min() == 0:
raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin())
if bcol_lengths.min() == 0:
raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin())
nnz = sum([A.nnz for A in blocks[block_mask]])
if dtype is None:
dtype = upcast(*tuple([A.dtype for A in blocks[block_mask]]))
row_offsets = np.concatenate(([0], np.cumsum(brow_lengths)))
col_offsets = np.concatenate(([0], np.cumsum(bcol_lengths)))
data = np.empty(nnz, dtype=dtype)
row = np.empty(nnz, dtype=np.intc)
col = np.empty(nnz, dtype=np.intc)
nnz = 0
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = blocks[i,j]
data[nnz:nnz + A.nnz] = A.data
row[nnz:nnz + A.nnz] = A.row
col[nnz:nnz + A.nnz] = A.col
row[nnz:nnz + A.nnz] += row_offsets[i]
col[nnz:nnz + A.nnz] += col_offsets[j]
nnz += A.nnz
shape = (np.sum(brow_lengths), np.sum(bcol_lengths))
return coo_matrix((data, (row, col)), shape=shape).asformat(format)
def block_diag(mats, format=None, dtype=None):
"""
Build a block diagonal sparse matrix from provided matrices.
.. versionadded:: 0.11.0
Parameters
----------
A, B, ... : sequence of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the matrix
is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
res : sparse matrix
See Also
--------
bmat, diags
Examples
--------
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> block_diag((A, B, C)).todense()
matrix([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None]*nmat
if issparse(a):
row[ia] = a
else:
row[ia] = coo_matrix(a)
rows.append(row)
return bmat(rows, format=format, dtype=dtype)
def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
"""Generate a sparse matrix of the given shape and density with uniformely
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str
sparse matrix format.
dtype : dtype
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used.
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
if dtype and not dtype in [np.float32, np.float64, np.longdouble]:
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
# XXX: sparse uses intc instead of intp...
tp = np.intp
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = int(density * m * n)
# Generate a few more values than k so that we can get unique values
# afterwards.
# XXX: one could be smarter here
mlow = 5
fac = 1.02
gk = min(k + mlow, fac * k)
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
def _gen_unique_rand(rng, _gk):
ind = rng.rand(int(_gk))
return np.unique(np.floor(ind * mn))[:k]
ind = _gen_unique_rand(random_state, gk)
while ind.size < k:
gk *= 1.05
ind = _gen_unique_rand(random_state, gk)
j = np.floor(ind * 1. / m).astype(tp)
i = (ind - j * m).astype(tp)
vals = random_state.rand(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format) | unknown | codeparrot/codeparrot-clean | ||
#!/bin/bash
set -euo pipefail
VALIDATION_SCRIPTS_VERSION=2.7.1
GRADLE_ENTERPRISE_ACCESS_KEY=$(vault kv get -field=value secret/ci/elastic-elasticsearch/gradle-enterprise-api-key)
export GRADLE_ENTERPRISE_ACCESS_KEY
curl -s -L -O https://github.com/gradle/gradle-enterprise-build-validation-scripts/releases/download/v$VALIDATION_SCRIPTS_VERSION/develocity-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip && unzip -q -o develocity-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip
# Create a temporary file
tmpOutputFile=$(mktemp)
trap "rm $tmpOutputFile" EXIT
set +e
develocity-gradle-build-validation/03-validate-local-build-caching-different-locations.sh -r https://github.com/elastic/elasticsearch.git -b $BUILDKITE_BRANCH --develocity-server https://gradle-enterprise.elastic.co -t precommit --fail-if-not-fully-cacheable | tee $tmpOutputFile
# Capture the return value
retval=$?
set -e
# Now read the content from the temporary file into a variable
perfOutput=$(cat $tmpOutputFile | sed -n '/Performance Characteristics/,/See https:\/\/gradle.com\/bvs\/main\/Gradle.md#performance-characteristics for details./p' | sed '$d' | sed 's/\x1b\[[0-9;]*m//g')
investigationOutput=$(cat $tmpOutputFile | sed -n '/Investigation Quick Links/,$p' | sed 's/\x1b\[[0-9;]*m//g')
# Initialize HTML output variable
summaryHtml="<h4>Build Cache Performance Characteristics</h4>"
summaryHtml+="<ul>"
# Process each line of the string
while IFS=: read -r label value; do
if [[ -n "$label" && -n "$value" ]]; then
# Trim whitespace from label and value
trimmed_label=$(echo "$label" | xargs)
trimmed_value=$(echo "$value" | xargs)
# Append to HTML output variable
summaryHtml+="<li><strong>$trimmed_label:</strong> $trimmed_value</li>"
fi
done <<< "$perfOutput"
summaryHtml+="</ul>"
# generate html for links
summaryHtml+="<h4>Investigation Links</h4>"
summaryHtml+="<ul>"
# Process each line of the string
while IFS= read -r line; do
if [[ "$line" =~ http.* ]]; then
# Extract URL and description using awk
url=$(echo "$line" | awk '{print $NF}')
description=$(echo "$line" | sed -e "s/:.*//")
# Append to HTML output variable
summaryHtml+=" <li><a href=\"$url\">$description</a></li>"
fi
done <<< "$investigationOutput"
# End of the HTML content
summaryHtml+="</ul>"
cat << EOF | buildkite-agent annotate --context "ctx-validation-summary" --style "info"
$summaryHtml
EOF
# Check if the command was successful
if [ $retval -eq 0 ]; then
echo "Experiment completed successfully"
elif [ $retval -eq 1 ]; then
echo "An invalid input was provided while attempting to run the experiment"
elif [ $retval -eq 2 ]; then
echo "One of the builds that is part of the experiment failed"
elif [ $retval -eq 3 ]; then
echo "The build was not fully cacheable for the given task graph"
elif [ $retval -eq 3 ]; then
echo "An unclassified, fatal error happened while running the experiment"
fi
exit $retval | unknown | github | https://github.com/elastic/elasticsearch | .buildkite/scripts/gradle-build-cache-validation.sh |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pluggable Back-end for Account Server
"""
from uuid import uuid4
import sqlite3
import six
from swift.common.utils import Timestamp, RESERVED_BYTE
from swift.common.db import DatabaseBroker, utf8encode, zero_like
DATADIR = 'accounts'
POLICY_STAT_TRIGGER_SCRIPT = """
CREATE TRIGGER container_insert_ps AFTER INSERT ON container
BEGIN
INSERT OR IGNORE INTO policy_stat
(storage_policy_index, container_count, object_count, bytes_used)
VALUES (new.storage_policy_index, 0, 0, 0);
UPDATE policy_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used
WHERE storage_policy_index = new.storage_policy_index;
END;
CREATE TRIGGER container_delete_ps AFTER DELETE ON container
BEGIN
UPDATE policy_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used
WHERE storage_policy_index = old.storage_policy_index;
END;
"""
class AccountBroker(DatabaseBroker):
"""Encapsulates working with an account database."""
db_type = 'account'
db_contains_type = 'container'
db_reclaim_timestamp = 'delete_timestamp'
def _initialize(self, conn, put_timestamp, **kwargs):
"""
Create a brand new account database (tables, indices, triggers, etc.)
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
self.create_container_table(conn)
self.create_account_stat_table(conn, put_timestamp)
self.create_policy_stat_table(conn)
def create_container_table(self, conn):
"""
Create container table which is specific to the account DB.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE container (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
put_timestamp TEXT,
delete_timestamp TEXT,
object_count INTEGER,
bytes_used INTEGER,
deleted INTEGER DEFAULT 0,
storage_policy_index INTEGER DEFAULT 0
);
CREATE INDEX ix_container_deleted_name ON
container (deleted, name);
CREATE TRIGGER container_insert AFTER INSERT ON container
BEGIN
UPDATE account_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used,
hash = chexor(hash, new.name,
new.put_timestamp || '-' ||
new.delete_timestamp || '-' ||
new.object_count || '-' || new.bytes_used);
END;
CREATE TRIGGER container_update BEFORE UPDATE ON container
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER container_delete AFTER DELETE ON container
BEGIN
UPDATE account_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used,
hash = chexor(hash, old.name,
old.put_timestamp || '-' ||
old.delete_timestamp || '-' ||
old.object_count || '-' || old.bytes_used);
END;
""" + POLICY_STAT_TRIGGER_SCRIPT)
def create_account_stat_table(self, conn, put_timestamp):
"""
Create account_stat table which is specific to the account DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
conn.executescript("""
CREATE TABLE account_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
container_count INTEGER,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
INSERT INTO account_stat (container_count) VALUES (0);
""")
conn.execute('''
UPDATE account_stat SET account = ?, created_at = ?, id = ?,
put_timestamp = ?, status_changed_at = ?
''', (self.account, Timestamp.now().internal, str(uuid4()),
put_timestamp, put_timestamp))
def create_policy_stat_table(self, conn):
"""
Create policy_stat table which is specific to the account DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE policy_stat (
storage_policy_index INTEGER PRIMARY KEY,
container_count INTEGER DEFAULT 0,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0
);
INSERT OR IGNORE INTO policy_stat (
storage_policy_index, container_count, object_count,
bytes_used
)
SELECT 0, container_count, object_count, bytes_used
FROM account_stat
WHERE container_count > 0;
""")
def get_db_version(self, conn):
if self._db_version == -1:
self._db_version = 0
for row in conn.execute('''
SELECT name FROM sqlite_master
WHERE name = 'ix_container_deleted_name' '''):
self._db_version = 1
return self._db_version
def _commit_puts_load(self, item_list, entry):
"""See :func:`swift.common.db.DatabaseBroker._commit_puts_load`"""
# check to see if the update includes policy_index or not
(name, put_timestamp, delete_timestamp, object_count, bytes_used,
deleted) = entry[:6]
if len(entry) > 6:
storage_policy_index = entry[6]
else:
# legacy support during upgrade until first non legacy storage
# policy is defined
storage_policy_index = 0
item_list.append(
{'name': name,
'put_timestamp': put_timestamp,
'delete_timestamp': delete_timestamp,
'object_count': object_count,
'bytes_used': bytes_used,
'deleted': deleted,
'storage_policy_index': storage_policy_index})
def empty(self):
"""
Check if the account DB is empty.
:returns: True if the database has no active containers.
"""
self._commit_puts_stale_ok()
with self.get() as conn:
row = conn.execute(
'SELECT container_count from account_stat').fetchone()
return zero_like(row[0])
def make_tuple_for_pickle(self, record):
return (record['name'], record['put_timestamp'],
record['delete_timestamp'], record['object_count'],
record['bytes_used'], record['deleted'],
record['storage_policy_index'])
def put_container(self, name, put_timestamp, delete_timestamp,
object_count, bytes_used, storage_policy_index):
"""
Create a container with the given attributes.
:param name: name of the container to create (a native string)
:param put_timestamp: put_timestamp of the container to create
:param delete_timestamp: delete_timestamp of the container to create
:param object_count: number of objects in the container
:param bytes_used: number of bytes used by the container
:param storage_policy_index: the storage policy for this container
"""
if Timestamp(delete_timestamp) > Timestamp(put_timestamp) and \
zero_like(object_count):
deleted = 1
else:
deleted = 0
record = {'name': name, 'put_timestamp': put_timestamp,
'delete_timestamp': delete_timestamp,
'object_count': object_count,
'bytes_used': bytes_used,
'deleted': deleted,
'storage_policy_index': storage_policy_index}
self.put_record(record)
def _is_deleted_info(self, status, container_count, delete_timestamp,
put_timestamp):
"""
Apply delete logic to database info.
:returns: True if the DB is considered to be deleted, False otherwise
"""
return status == 'DELETED' or zero_like(container_count) and (
Timestamp(delete_timestamp) > Timestamp(put_timestamp))
def _is_deleted(self, conn):
"""
Check account_stat table and evaluate info.
:param conn: database conn
:returns: True if the DB is considered to be deleted, False otherwise
"""
info = conn.execute('''
SELECT put_timestamp, delete_timestamp, container_count, status
FROM account_stat''').fetchone()
return self._is_deleted_info(**info)
def is_status_deleted(self):
"""Only returns true if the status field is set to DELETED."""
with self.get() as conn:
row = conn.execute('''
SELECT put_timestamp, delete_timestamp, status
FROM account_stat''').fetchone()
return row['status'] == "DELETED" or (
row['delete_timestamp'] > row['put_timestamp'])
def get_policy_stats(self, do_migrations=False):
"""
Get global policy stats for the account.
:param do_migrations: boolean, if True the policy stat dicts will
always include the 'container_count' key;
otherwise it may be omitted on legacy databases
until they are migrated.
:returns: dict of policy stats where the key is the policy index and
the value is a dictionary like {'object_count': M,
'bytes_used': N, 'container_count': L}
"""
columns = [
'storage_policy_index',
'container_count',
'object_count',
'bytes_used',
]
def run_query():
return (conn.execute('''
SELECT %s
FROM policy_stat
''' % ', '.join(columns)).fetchall())
self._commit_puts_stale_ok()
info = []
with self.get() as conn:
try:
info = run_query()
except sqlite3.OperationalError as err:
if "no such column: container_count" in str(err):
if do_migrations:
self._migrate_add_container_count(conn)
else:
columns.remove('container_count')
info = run_query()
elif "no such table: policy_stat" in str(err):
if do_migrations:
self.create_policy_stat_table(conn)
info = run_query()
# else, pass and let the results be empty
else:
raise
policy_stats = {}
for row in info:
stats = dict(row)
key = stats.pop('storage_policy_index')
policy_stats[key] = stats
return policy_stats
def get_info(self):
"""
Get global data for the account.
:returns: dict with keys: account, created_at, put_timestamp,
delete_timestamp, status_changed_at, container_count,
object_count, bytes_used, hash, id
"""
self._commit_puts_stale_ok()
with self.get() as conn:
return dict(conn.execute('''
SELECT account, created_at, put_timestamp, delete_timestamp,
status_changed_at, container_count, object_count,
bytes_used, hash, id
FROM account_stat
''').fetchone())
def list_containers_iter(self, limit, marker, end_marker, prefix,
delimiter, reverse=False, allow_reserved=False):
"""
Get a list of containers sorted by name starting at marker onward, up
to limit entries. Entries will begin with the prefix and will not have
the delimiter after the prefix.
:param limit: maximum number of entries to get
:param marker: marker query
:param end_marker: end marker query
:param prefix: prefix query
:param delimiter: delimiter for query
:param reverse: reverse the result order.
:param allow_reserved: exclude names with reserved-byte by default
:returns: list of tuples of (name, object_count, bytes_used,
put_timestamp, 0)
"""
delim_force_gte = False
if six.PY2:
(marker, end_marker, prefix, delimiter) = utf8encode(
marker, end_marker, prefix, delimiter)
if reverse:
# Reverse the markers if we are reversing the listing.
marker, end_marker = end_marker, marker
self._commit_puts_stale_ok()
if delimiter and not prefix:
prefix = ''
if prefix:
end_prefix = prefix[:-1] + chr(ord(prefix[-1]) + 1)
orig_marker = marker
with self.get() as conn:
results = []
while len(results) < limit:
query = """
SELECT name, object_count, bytes_used, put_timestamp, 0
FROM container
WHERE """
query_args = []
if end_marker and (not prefix or end_marker < end_prefix):
query += ' name < ? AND'
query_args.append(end_marker)
elif prefix:
query += ' name < ? AND'
query_args.append(end_prefix)
if delim_force_gte:
query += ' name >= ? AND'
query_args.append(marker)
# Always set back to False
delim_force_gte = False
elif marker and (not prefix or marker >= prefix):
query += ' name > ? AND'
query_args.append(marker)
elif prefix:
query += ' name >= ? AND'
query_args.append(prefix)
if not allow_reserved:
query += ' name >= ? AND'
query_args.append(chr(ord(RESERVED_BYTE) + 1))
if self.get_db_version(conn) < 1:
query += ' +deleted = 0'
else:
query += ' deleted = 0'
query += ' ORDER BY name %s LIMIT ?' % \
('DESC' if reverse else '')
query_args.append(limit - len(results))
curs = conn.execute(query, query_args)
curs.row_factory = None
# Delimiters without a prefix is ignored, further if there
# is no delimiter then we can simply return the result as
# prefixes are now handled in the SQL statement.
if prefix is None or not delimiter:
return [r for r in curs]
# We have a delimiter and a prefix (possibly empty string) to
# handle
rowcount = 0
for row in curs:
rowcount += 1
name = row[0]
if reverse:
end_marker = name
else:
marker = name
if len(results) >= limit:
curs.close()
return results
end = name.find(delimiter, len(prefix))
if end >= 0:
if reverse:
end_marker = name[:end + len(delimiter)]
else:
marker = ''.join([
name[:end],
delimiter[:-1],
chr(ord(delimiter[-1:]) + 1),
])
# we want result to be inclusive of delim+1
delim_force_gte = True
dir_name = name[:end + len(delimiter)]
if dir_name != orig_marker:
results.append([dir_name, 0, 0, '0', 1])
curs.close()
break
results.append(row)
if not rowcount:
break
return results
def merge_items(self, item_list, source=None):
"""
Merge items into the container table.
:param item_list: list of dictionaries of {'name', 'put_timestamp',
'delete_timestamp', 'object_count', 'bytes_used',
'deleted', 'storage_policy_index'}
:param source: if defined, update incoming_sync with the source
"""
def _really_merge_items(conn):
max_rowid = -1
curs = conn.cursor()
for rec in item_list:
rec.setdefault('storage_policy_index', 0) # legacy
record = [rec['name'], rec['put_timestamp'],
rec['delete_timestamp'], rec['object_count'],
rec['bytes_used'], rec['deleted'],
rec['storage_policy_index']]
query = '''
SELECT name, put_timestamp, delete_timestamp,
object_count, bytes_used, deleted,
storage_policy_index
FROM container WHERE name = ?
'''
if self.get_db_version(conn) >= 1:
query += ' AND deleted IN (0, 1)'
curs_row = curs.execute(query, (rec['name'],))
curs_row.row_factory = None
row = curs_row.fetchone()
if row:
row = list(row)
for i in range(5):
if record[i] is None and row[i] is not None:
record[i] = row[i]
if Timestamp(row[1]) > \
Timestamp(record[1]): # Keep newest put_timestamp
record[1] = row[1]
if Timestamp(row[2]) > \
Timestamp(record[2]): # Keep newest delete_timestamp
record[2] = row[2]
# If deleted, mark as such
if Timestamp(record[2]) > Timestamp(record[1]) and \
zero_like(record[3]):
record[5] = 1
else:
record[5] = 0
curs.execute('''
DELETE FROM container WHERE name = ? AND
deleted IN (0, 1)
''', (record[0],))
curs.execute('''
INSERT INTO container (name, put_timestamp,
delete_timestamp, object_count, bytes_used,
deleted, storage_policy_index)
VALUES (?, ?, ?, ?, ?, ?, ?)
''', record)
if source:
max_rowid = max(max_rowid, rec['ROWID'])
if source:
try:
curs.execute('''
INSERT INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (max_rowid, source))
except sqlite3.IntegrityError:
curs.execute('''
UPDATE incoming_sync
SET sync_point=max(?, sync_point)
WHERE remote_id=?
''', (max_rowid, source))
conn.commit()
with self.get() as conn:
# create the policy stat table if needed and add spi to container
try:
_really_merge_items(conn)
except sqlite3.OperationalError as err:
if 'no such column: storage_policy_index' not in str(err):
raise
self._migrate_add_storage_policy_index(conn)
_really_merge_items(conn)
def _migrate_add_container_count(self, conn):
"""
Add the container_count column to the 'policy_stat' table and
update it
:param conn: DB connection object
"""
# add the container_count column
curs = conn.cursor()
curs.executescript('''
DROP TRIGGER container_delete_ps;
DROP TRIGGER container_insert_ps;
ALTER TABLE policy_stat
ADD COLUMN container_count INTEGER DEFAULT 0;
''' + POLICY_STAT_TRIGGER_SCRIPT)
# keep the simple case simple, if there's only one entry in the
# policy_stat table we just copy the total container count from the
# account_stat table
# if that triggers an update then the where changes <> 0 *would* exist
# and the insert or replace from the count subqueries won't execute
curs.executescript("""
UPDATE policy_stat
SET container_count = (
SELECT container_count
FROM account_stat)
WHERE (
SELECT COUNT(storage_policy_index)
FROM policy_stat
) <= 1;
INSERT OR REPLACE INTO policy_stat (
storage_policy_index,
container_count,
object_count,
bytes_used
)
SELECT p.storage_policy_index,
c.count,
p.object_count,
p.bytes_used
FROM (
SELECT storage_policy_index,
COUNT(*) as count
FROM container
WHERE deleted = 0
GROUP BY storage_policy_index
) c
JOIN policy_stat p
ON p.storage_policy_index = c.storage_policy_index
WHERE NOT EXISTS(
SELECT changes() as change
FROM policy_stat
WHERE change <> 0
);
""")
conn.commit()
def _migrate_add_storage_policy_index(self, conn):
"""
Add the storage_policy_index column to the 'container' table and
set up triggers, creating the policy_stat table if needed.
:param conn: DB connection object
"""
try:
self.create_policy_stat_table(conn)
except sqlite3.OperationalError as err:
if 'table policy_stat already exists' not in str(err):
raise
conn.executescript('''
ALTER TABLE container
ADD COLUMN storage_policy_index INTEGER DEFAULT 0;
''' + POLICY_STAT_TRIGGER_SCRIPT) | unknown | codeparrot/codeparrot-clean | ||
function Component() {
const foo = () => {
// Cannot assign to globals
someUnknownGlobal = true;
moduleLocal = true;
};
foo();
} | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.reassignment-to-global-indirect.js |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributor: Raph Levien
import os
import sys
sys.path.append(
os.path.join(os.path.dirname(__file__), os.pardir, 'spiro', 'curves'))
from fontTools import ttLib
from fontTools.ttLib.tables import _g_l_y_f
import fromcubic
import tocubic
import pcorn
import math
import md5
def lerppt(t, p0, p1):
return (p0[0] + t * (p1[0] - p0[0]), p0[1] + t * (p1[1] - p0[1]))
def glyph_to_bzs(g):
bzs = []
for i in range(g.numberOfContours):
beg = 0 if i == 0 else g.endPtsOfContours[i - 1] + 1
end = g.endPtsOfContours[i] + 1
n = end - beg
pts = g.coordinates[beg:end]
flags = g.flags[beg:end]
bz = []
for j in range(n):
x1, y1 = pts[(j+1) % n]
if flags[j] and flags[(j+1) % n]:
bz.append((pts[j], (x1, y1)))
elif not flags[j]:
if flags[j - 1]:
x0, y0 = pts[j - 1]
else:
x0, y0 = lerppt(0.5, pts[j - 1], pts[j])
if not flags[(j+1) % n]:
x1, y1 = lerppt(0.5, (x1, y1), pts[j])
if pts[j] == (x0, y0) or pts[j] == (x1, y1):
# degenerate quad, treat as line
bz.append(((x0, y0), (x1, y1)))
else:
bz.append(((x0, y0), pts[j], (x1, y1)))
bzs.append(bz)
return bzs
# convert all quadratics to cubics
def raise_to_cubic(bzs):
result = []
for sp in bzs:
r = []
for bz in sp:
if len(bz) == 3:
r.append((bz[0], lerppt(2./3, bz[0], bz[1]), lerppt(2./3, bz[2], bz[1]), bz[2]))
else:
r.append(bz)
result.append(r)
return result
def plot(bzs):
tocubic.plot_prolog()
print '/ss 1.5 def'
print '/circle { ss 0 moveto currentpoint exch ss sub exch ss 0 360 arc } bind def'
fromcubic.plot_bzs(bzs, (100, 100), 0.25, fancy = True)
print 'showpage'
def getbreaks(curve):
extrema = curve.find_extrema()
extrema.extend(curve.find_breaks())
extrema.append(0)
extrema.append(curve.arclen)
extrema.sort()
result = []
for i in range(len(extrema)):
if i == 0 or extrema[i] > extrema[i-1] + 0.1:
result.append(extrema[i])
print result
return result
class Pt:
def __init__(self, curve, s):
self.s = s
x, y = curve.xy(s)
self.xy = (round(x), round(y))
self.th = curve.th(s)
class MiniState:
def __init__(self, score, sp):
self.score = score
self.sp = sp
def combine(self, score, bz):
newscore = self.score + score + penalty * (len(bz) - 1)
if len(bz) == 3 and len(self.sp):
lastbz = self.sp[-1]
if len(lastbz) == 3:
if lerppt(0.5, lastbz[1], bz[1]) == bz[0]:
newscore -= penalty
return MiniState(newscore, self.sp + [bz])
class State:
def __init__(self, base):
self.base = base # a MiniState
self.map = {}
penalty = 0.05
def measure_bz(curve, s0, s1, bz):
bz_arclen = tocubic.bz_arclength_rk4(bz)
if bz_arclen == 0: return 1e9
arclen_scale = (s1 - s0) / bz_arclen
def th_fn(s):
return curve.th(s0 + arclen_scale * s, s == 0)
return tocubic.measure_bz_rk4(bz, bz_arclen, th_fn)
def measure_line(curve, st, pt0, pt1):
bz = (pt0.xy, pt1.xy)
return st.combine(measure_bz(curve, pt0.s, pt1.s, bz), bz)
def intersect(xy0, th0, xy1, th1):
x0, y0 = xy0
x1, y1 = xy1
dx0 = math.cos(th0)
dy0 = math.sin(th0)
dx1 = math.cos(th1)
dy1 = math.sin(th1)
det = dx0 * dy1 - dy0 * dx1
if abs(det) < 1e-6: return None
det = 1 / det
a = y0 * dx0 - x0 * dy0
b = y1 * dx1 - x1 * dy1
x = (a * dx1 - b * dx0) * det
y = (a * dy1 - b * dy0) * det
return (x, y)
def measure_quad(curve, st, pt0, pt1):
xy = intersect(pt0.xy, pt0.th, pt1.xy, pt1.th)
if xy is None: return None
x, y = xy
x = round(x)
y = round(y)
bz = (pt0.xy, (x, y), pt1.xy)
return st.combine(measure_bz(curve, pt0.s, pt1.s, bz), bz)
class Thcache:
mult = 1
def __init__(self, curve, s0, s1):
self.s0 = s0
self.s1 = s1
self.ths1 = curve.th(s1, False)
self.vals = []
scale = 1.0 / self.mult
for i in range(int(self.mult * (s1 - s0)) + 2):
s = min(s1, s0 + i * scale)
self.vals.append(curve.th(s, i == 0))
def th(self, s, ds):
if s > self.s1: return self.ths1
s = self.mult * (s - self.s0)
bucket = int(s)
v0 = self.vals[bucket]
v1 = self.vals[bucket + 1]
return v0 + (s - bucket) * (v1 - v0)
# produce an optimized sequence of quadratics from s0 to s1 of the curve
def optimize_run(curve, s0, s1):
print s0, s1
n = int(round(1 * (s1 - s0)))
pts = []
for i in range(n + 1):
pts.append(Pt(curve, s0 + (s1 - s0) * i / n))
cache = Thcache(curve, s0, s1)
states = [MiniState(0, [])]
newst = measure_line(cache, states[0], pts[0], pts[n])
bestst = newst
newst = measure_quad(cache, states[0], pts[0], pts[n])
if newst and newst.score < bestst.score:
bestst = newst
if bestst.score <= 3 * penalty:
return bestst.sp
# Quick scan for two-quad sections
# Note, could do line+quad and quad+line too, but less likely to win
for i in range(1, n):
st1 = measure_quad(cache, states[0], pts[0], pts[i])
if st1:
st2 = measure_quad(cache, st1, pts[i], pts[n])
if st2 and st2.score < bestst.score:
bestst = st2
if bestst.score <= 4 * penalty:
return bestst.sp
for i in range(1, n + 1):
best = 1e9
badcount = 0
for j in range(i - 1, -1, -1):
newst = measure_line(cache, states[j], pts[j], pts[i])
if newst and newst.score < best:
best, bestst = newst.score, newst
newst = measure_quad(cache, states[j], pts[j], pts[i])
if newst and newst.score < best:
best, bestst = newst.score, newst
if newst is None or newst.score - states[j].score > 10 * penalty:
badcount += 1
if badcount == 20:
break
else:
badcount = 0
states.append(bestst)
return states[n].sp
def optimize(bzs):
result = []
for sp in fromcubic.bzs_to_pcorn(bzs):
r = []
curve = pcorn.Curve(sp)
breaks = getbreaks(curve)
for i in range(len(breaks) - 1):
r.extend(optimize_run(curve, breaks[i], breaks[i + 1]))
result.append(r)
return result
def plot_tt_raw(bzs, fancy = True):
x0 = 100
y0 = 100
scale = 0.25
fromcubic.plot_bzs(raise_to_cubic(bzs), (x0, y0), scale)
if fancy:
for sp in bzs:
for i in range(len(sp)):
lastbz = sp[i - 1]
bz = sp[i]
if len(bz) != 3 or len(lastbz) != 3 or lerppt(0.5, lastbz[1], bz[1]) != bz[0]:
x, y = bz[0]
print 'gsave %f %f translate circle fill grestore' % (x * scale + x0, y * scale + y0)
if len(bz) == 3:
x, y = bz[1]
print 'gsave %f %f translate circle stroke grestore' % (x * scale + x0, y * scale + y0)
def plot_tt(bzs, orig = None, style = 'redcyan'):
tocubic.plot_prolog()
print '/ss 2 def'
print '/circle { ss 0 moveto currentpoint exch ss sub exch ss 0 360 arc } bind def'
if style == 'redcyan':
print 'true setoverprint true setoverprintmode'
x0 = 100
y0 = 100
scale = 0.25
if orig:
print '0 1 1 0 setcmykcolor'
fancy = (style == 'redcyan')
plot_tt_raw(orig, fancy)
if style == 'redcyan':
print '1 0 0 0 setcmykcolor'
elif style == 'redblack':
print '0 0 0 1 setcmykcolor'
plot_tt_raw(bzs)
print 'showpage'
def segment_sp(sp):
bks = set()
# direction changes
xsg = 0
ysg = 0
for i in range(2 * len(sp)):
imod = i % len(sp)
xsg1 = sp[imod][-1][0] - sp[imod][0][0]
ysg1 = sp[imod][-1][1] - sp[imod][0][1]
if xsg * xsg1 < 0 or ysg * ysg1 < 0:
bks.add(imod)
xsg = xsg1
ysg = ysg1
else:
if xsg == 0: xsg = xsg1
if ysg == 0: ysg = ysg1
# angle breaks
for i in range(len(sp)):
dx0 = sp[i-1][-1][0] - sp[i-1][-2][0]
dy0 = sp[i-1][-1][1] - sp[i-1][-2][1]
dx1 = sp[i][1][0] - sp[i][0][0]
dy1 = sp[i][1][1] - sp[i][0][1]
bend = dx1 * dy0 - dx0 * dy1
if (dx0 == 0 and dy0 == 0) or (dx1 == 0 and dy1 == 0):
bks.add(i)
else:
bend = bend / (math.hypot(dx0, dy0) * math.hypot(dx1, dy1))
# for small angles, bend is in units of radians
if abs(bend) > 0.02:
bks.add(i)
return sorted(bks)
def seg_to_string(sp, bk0, bk1):
if bk1 < bk0:
bk1 += len(sp)
res = []
for i in range(bk0, bk1):
bz = sp[i % len(sp)]
if len(bz) == 2:
# just represent lines as quads
bz = (bz[0], lerppt(0.5, bz[0], bz[1]), bz[1])
res.append(' '.join(['%g' % z for xy in bz for z in xy]) + '\n')
return ''.join(res)
USE_SUBDIRS = True
# get filename, ensuring directory exists
def seg_fn(segstr):
fn = md5.new(segstr).hexdigest()[:16]
if USE_SUBDIRS:
dirname = fn[:2]
if not os.path.exists(dirname):
os.mkdir(dirname)
fn = dirname + '/' + fn[2:]
fn += '.bez'
return fn
def gen_segs(glyph):
bzs = glyph_to_bzs(glyph)
for sp in bzs:
bks = segment_sp(sp)
for i in range(len(bks)):
bk0, bk1 = bks[i], bks[(i + 1) % len(bks)]
if bk1 != (bk0 + 1) % len(sp) or len(sp[bk0]) != 2:
segstr = seg_to_string(sp, bk0, bk1)
fn = seg_fn(segstr)
file(fn, 'w').write(segstr)
def generate(fn):
f = ttLib.TTFont(fn)
glyf = f['glyf']
for name in glyf.keys():
g = glyf[name]
print 'generating', name
gen_segs(g)
def read_bzs(fn):
result = []
for l in file(fn):
z = [float(z) for z in l.split()]
bz = ((z[0], z[1]), (z[2], z[3]), (z[4], z[5]))
if bz[1] == lerppt(0.5, bz[0], bz[2]):
bz = (bz[0], bz[2])
result.append(bz)
return result
def pt_to_int(pt):
# todo: should investigate non-int points
return (int(round(pt[0])), int(round(pt[1])))
def bzs_to_glyph(bzs, glyph):
coordinates = []
flags = []
endPtsOfContours = []
for sp in bzs:
for i in range(len(sp)):
lastbz = sp[i - 1]
bz = sp[i]
if len(bz) != 3 or len(lastbz) != 3 or lerppt(0.5, lastbz[1], bz[1]) != bz[0]:
coordinates.append(pt_to_int(bz[0]))
flags.append(1)
if len(bz) == 3:
coordinates.append(pt_to_int(bz[1]))
flags.append(0)
endPtsOfContours.append(len(coordinates) - 1)
glyph.coordinates = _g_l_y_f.GlyphCoordinates(coordinates)
glyph.flags = flags
glyph.endPtsOfContours = endPtsOfContours
def repack_glyph(glyph):
bzs = glyph_to_bzs(glyph)
newbzs = []
for sp in bzs:
bks = segment_sp(sp)
newsp = []
for i in range(len(bks)):
bk0, bk1 = bks[i], bks[(i + 1) % len(bks)]
if bk1 != (bk0 + 1) % len(sp) or len(sp[bk0]) != 2:
segstr = seg_to_string(sp, bk0, bk1)
fn = seg_fn(segstr) + 'opt'
newsp.extend(read_bzs(fn))
else:
newsp.append(sp[bk0])
newbzs.append(newsp)
bzs_to_glyph(newbzs, glyph)
plot_tt(newbzs, bzs, style = 'redblack')
def repack(fn, newfn):
f = ttLib.TTFont(fn)
glyf = f['glyf']
for name in glyf.keys():
g = glyf[name]
if not g.isComposite():
repack_glyph(g)
if newfn:
f.save(newfn)
def main(argv):
if argv[1] == 'gen':
generate(sys.argv[2])
elif argv[1] == 'pack':
repack(sys.argv[2], sys.argv[3] if len(argv) >= 3 else None)
main(sys.argv) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_STABLEHLO_TRANSFORMS_TRANSFORMS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_STABLEHLO_TRANSFORMS_TRANSFORMS_H_
#include "mlir/Pass/PassManager.h" // from @llvm-project
namespace mlir {
namespace odml {
// Adds all the necessary passes to lower a TF module to StableHLO.
// `skip_resize` enables or disables skipping conversion of tf.ResizeBilinear
// and tf.ResizeNearestNeighbor ops.
// `smuggle_disallowed_ops` enables or disables converting disallowed ops
// like tf.ResizeBilinear or tf.ResizeNearestNeighbor to mhlo.custom_call ops.
void AddTFToStablehloPasses(OpPassManager& pm, bool skip_resize,
bool smuggle_disallowed_ops);
// This function is a common entry point for all graph optimizations that are
// not specific to any hardware. It legalizes SHLO->MHLO, does MHLO->MHLO
// optimizations by calling `AddMhloOptimizationPasses` internally, and
// legalizes MHLO->SHLO
void AddStablehloOptimizationPasses(OpPassManager& pm);
// Adds all the backend-agonstic stableHLO optimization passes
void AddMhloOptimizationPasses(OpPassManager& pm, bool add_fold_broadcast_pass);
} // namespace odml
} // namespace mlir
#endif // TENSORFLOW_COMPILER_MLIR_LITE_STABLEHLO_TRANSFORMS_TRANSFORMS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/mlir/lite/stablehlo/transforms/transforms.h |
import fs from 'node:fs/promises'
import { dirname, resolve } from 'node:path'
import { type Plugin, type Root } from 'postcss'
import { parseImportParams } from '../../../../tailwindcss/src/at-import'
import { segment } from '../../../../tailwindcss/src/utils/segment'
import * as ValueParser from '../../../../tailwindcss/src/value-parser'
export function migrateImport(): Plugin {
async function migrate(root: Root) {
let file = root.source?.input.file
if (!file) return
let promises: Promise<void>[] = []
root.walkAtRules('import', (rule) => {
try {
let [firstParam, ...rest] = segment(rule.params, ' ')
let params = parseImportParams(ValueParser.parse(firstParam))
if (!params) return
let isRelative = params.uri[0] === '.'
let hasCssExtension = params.uri.endsWith('.css')
if (isRelative && hasCssExtension) {
return
}
let fullPath = resolve(dirname(file), params.uri)
if (!hasCssExtension) fullPath += '.css'
promises.push(
fs.stat(fullPath).then(() => {
let ext = hasCssExtension ? '' : '.css'
let path = isRelative ? params.uri : `./${params.uri}`
rule.params = [`'${path}${ext}'`, ...rest].join(' ')
}),
)
} catch {
// When an error occurs while parsing the `@import` statement, we skip
// the import. This will happen in cases where you import an external
// URL.
}
})
await Promise.allSettled(promises)
}
return {
postcssPlugin: '@tailwindcss/upgrade/migrate-import',
OnceExit: migrate,
}
} | typescript | github | https://github.com/tailwindlabs/tailwindcss | packages/@tailwindcss-upgrade/src/codemods/css/migrate-import.ts |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: eos_banner
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage multiline banners on Arista EOS devices
description:
- This will configure both login and motd banners on remote devices
running Arista EOS. It allows playbooks to add or remote
banner text from the active running configuration.
extends_documentation_fragment: eos
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device.
required: true
default: null
choices: ['login', 'banner']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string. Requires I(state=present).
default: null
state:
description:
- Specifies whether or not the configuration is
present in the current devices active running configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure the login banner
eos_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
eos_banner:
banner: motd
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner login
- this is my login banner
- that contains a multiline
- string
- EOF
session_name:
description: The EOS config session name used to load the configuration
returned: if changes
type: str
sample: ansible_1479315771
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.eos import load_config, run_commands
from ansible.module_utils.eos import eos_argument_spec, check_args
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
if state == 'absent' and 'text' in have.keys() and have['text']:
commands.append('no banner %s' % module.params['banner'])
elif state == 'present':
if want['text'] and (want['text'] != have.get('text')):
if module.params['transport'] == 'cli':
commands.append('banner %s' % module.params['banner'])
commands.extend(want['text'].strip().split('\n'))
commands.append('EOF')
else:
# For EAPI we need to construct a dict with cmd/input
# key/values for the banner
commands.append({'cmd': 'banner %s' % module.params['banner'],
'input': want['text'].strip('\n')})
return commands
def map_config_to_obj(module):
output = run_commands(module, ['show banner %s' % module.params['banner']])
obj = {'banner': module.params['banner'], 'state': 'absent'}
if output:
if module.params['transport'] == 'cli':
obj['text'] = output[0]
else:
# On EAPI we need to extract the banner text from dict key
# 'loginBanner'
if module.params['banner'] == 'login':
banner_response_key = 'loginBanner'
else:
banner_response_key = 'motd'
if isinstance(output[0], dict) and banner_response_key in output[0].keys():
obj['text'] = output[0][banner_response_key].strip('\n')
obj['state'] = 'present'
return obj
def map_params_to_obj(module):
text = module.params['text']
if text:
text = str(text).strip()
return {
'banner': module.params['banner'],
'text': text,
'state': module.params['state']
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(eos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from neutron.models import Word
from .models import AlternateData
class AlternateDataForm(forms.ModelForm):
word = forms.CharField()
class Meta:
model = AlternateData
fields = '__all__'
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
initial = kwargs.pop('initial', {})
if instance:
initial.update({'word': instance.word.word })
super(AlternateDataForm, self).__init__(initial=initial, *args, **kwargs)
def clean(self):
cleaned_data = self.cleaned_data
cleaned_data['word'], _ = Word.objects.get_or_create(word=cleaned_data['word'])
return cleaned_data
class WordCoarseDataForm(forms.ModelForm):
word = forms.CharField()
class Meta:
model = AlternateData
fields = '__all__'
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
initial = kwargs.pop('initial', {})
if instance:
initial.update({'word': instance.word.word })
super(WordCoarseDataForm, self).__init__(initial=initial, *args, **kwargs)
def clean(self):
cleaned_data = self.cleaned_data
cleaned_data['word'], _ = Word.objects.get_or_create(word=cleaned_data['word'])
return cleaned_data | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fe10.test.cases.generated.cases.components.javaInteroperabilityComponent;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fe10.test.configurator.AnalysisApiFe10TestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.javaInteroperabilityComponent.AbstractExpressionTypeAsPsiTypeTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression")
@TestDataPath("$PROJECT_ROOT")
public class Fe10IdeNormalAnalysisSourceModuleExpressionTypeAsPsiTypeTestGenerated extends AbstractExpressionTypeAsPsiTypeTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFe10TestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fe10,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInForExpression() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("capturedBoundType.kt")
public void testCapturedBoundType() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/capturedBoundType.kt");
}
@Test
@TestMetadata("class_object_call.kt")
public void testClass_object_call() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/class_object_call.kt");
}
@Test
@TestMetadata("class_object_constructor.kt")
public void testClass_object_constructor() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/class_object_constructor.kt");
}
@Test
@TestMetadata("errorType.kt")
public void testErrorType() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/errorType.kt");
}
@Test
@TestMetadata("flexibleTypeWithArgumentUpperBound.kt")
public void testFlexibleTypeWithArgumentUpperBound() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/flexibleTypeWithArgumentUpperBound.kt");
}
@Test
@TestMetadata("inlineClassWithArguments.kt")
public void testInlineClassWithArguments() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/inlineClassWithArguments.kt");
}
@Test
@TestMetadata("inlineClassWithoutArguments.kt")
public void testInlineClassWithoutArguments() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/inlineClassWithoutArguments.kt");
}
@Test
@TestMetadata("KTIJ25461.kt")
public void testKTIJ25461() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/KTIJ25461.kt");
}
@Test
@TestMetadata("localClassWithUnresolvedSuperType.kt")
public void testLocalClassWithUnresolvedSuperType() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/localClassWithUnresolvedSuperType.kt");
}
@Test
@TestMetadata("recursiveTypeParameter_localSimple.kt")
public void testRecursiveTypeParameter_localSimple() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/recursiveTypeParameter_localSimple.kt");
}
@Test
@TestMetadata("recursiveTypeParameter_localWithTypeParameter.kt")
public void testRecursiveTypeParameter_localWithTypeParameter() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/recursiveTypeParameter_localWithTypeParameter.kt");
}
@Test
@TestMetadata("typeParamFlexibleUpperBound.kt")
public void testTypeParamFlexibleUpperBound() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/typeParamFlexibleUpperBound.kt");
}
@Test
@TestMetadata("unitType.kt")
public void testUnitType() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/unitType.kt");
}
@Test
@TestMetadata("unitTypeNullable.kt")
public void testUnitTypeNullable() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/unitTypeNullable.kt");
}
@Test
@TestMetadata("unitTypeTypealias.kt")
public void testUnitTypeTypealias() {
runTest("analysis/analysis-api/testData/components/javaInteroperabilityComponent/asPsiType/forExpression/unitTypeTypealias.kt");
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fe10/tests-gen/org/jetbrains/kotlin/analysis/api/fe10/test/cases/generated/cases/components/javaInteroperabilityComponent/Fe10IdeNormalAnalysisSourceModuleExpressionTypeAsPsiTypeTestGenerated.java |
from templeplus.pymod import PythonModifier
from toee import *
import tpdp
from utilities import *
print "Registering sp-Magic Circle Outward Fixed"
# args: (0-4)
# 0 - spell_id
# 1 - duration
# 2 - Type Flag (1-Good, 2- Evil, 3- Law, 4- Chaos)
# 3 - aoe id
# 4 - spare
#List of spells protection from evil grants immunity for (should really suppress).
#Includes charm/dominate enchantments (not sleep or confusion) and magic jar (if someone ever adds it).
immunity_list = [
spell_mass_charm_monster, spell_charm_monster, spell_charm_person, spell_charm_person_or_animal, spell_magic_jar, spell_dominate_animal,
spell_dominate_monster, spell_dominate_person, spell_mass_suggestion, spell_suggestion
]
#Checks if a character's alignment matches the spell type
def CheckAlignment(character, flag):
align = character.stat_level_get(stat_alignment)
if flag == 1:
return align & ALIGNMENT_GOOD
elif flag == 2:
return align & ALIGNMENT_EVIL
elif flag == 3:
return align & ALIGNMENT_LAWFUL
elif flag == 4:
return align & ALIGNMENT_CHAOTIC
return 0
#Check if a summoned creature can still attack someone protected
def SummonCanAttack(character, flag):
align = character.stat_level_get(stat_alignment)
if flag == 1:
return align & ALIGNMENT_EVIL
elif flag == 2:
return align & ALIGNMENT_GOOD
elif flag == 4:
return align & ALIGNMENT_CHAOTIC
elif flag == 3:
return align & ALIGNMENT_LAWFUL
return 0
#Gets the text name for the type of spell
def GetCircleSpellType(type):
SpellName = ""
if type == 1:
SpellName = game.get_spell_mesline(284)
elif type == 2:
SpellName = game.get_spell_mesline(283)
elif type == 3:
SpellName = game.get_spell_mesline(285)
elif type == 4:
SpellName = game.get_spell_mesline(282)
return SpellName
#Gets the text name for the type of effect
def GetCircleEffectType(type):
SpellName = ""
if type == 1:
SpellName = game.get_spell_mesline(371)
elif type == 2:
SpellName = game.get_spell_mesline(370)
elif type == 3:
SpellName = game.get_spell_mesline(372)
elif type == 4:
SpellName = game.get_spell_mesline(368)
return SpellName
def MagicCircleOutwardBegin(attachee, args, evt_obj):
radius_feet = 10.0
obj_evt_id = attachee.object_event_append(OLC_CRITTERS, radius_feet)
args.set_arg(3, obj_evt_id)
print "Magic Circle Outward: New Object Event ID: " + str(obj_evt_id)
return 0
def MagicCircleOutwardAoEEntered(attachee, args, evt_obj):
obj_evt_id = args.get_arg(3)
if obj_evt_id != evt_obj.evt_id:
print "Magic Circle Outward Aura Entered: ID mismatch " + str(evt_obj.evt_id) + ", stored was: " + str(obj_evt_id)
return 0
print "Magic Circle Outward Aura Entered, event ID: " + str(obj_evt_id)
tgt = evt_obj.target
if tgt == OBJ_HANDLE_NULL:
return 0
if attachee == OBJ_HANDLE_NULL:
return 0
#All get the effect even the character the spell is on
type = args.get_arg(2)
spell_id = args.get_arg(0)
tgt.condition_add_with_args("Magic Circle Outward Aura", obj_evt_id, type, spell_id)
return 0
def MagicCircleOutwardHasSpellActive(attachee, args, evt_obj):
if type == 1:
if evt_obj.data1 == 284:
evt_obj.return_val = 1
elif type == 2:
if evt_obj.data1 == 283:
evt_obj.return_val = 1
elif type == 3:
if evt_obj.data1 == 285:
evt_obj.return_val = 1
elif type == 4:
if evt_obj.data1 == 282:
evt_obj.return_val = 1
return 0
def MagicCircleOutwardKilled(attachee, args, evt_obj):
args.remove_spell()
args.remove_spell_mod()
return 0
def MagicCircleOutwardSpellEnd(attachee, args, evt_obj):
spell_id = args.get_arg(0)
type = args.get_arg(2)
if evt_obj.data1 == spell_id:
if type == 1:
game.particles( 'sp-Magic Circle against Good-END', attachee)
elif type == 2:
game.particles( 'sp-Magic Circle against Evil-END', attachee)
elif type == 3:
game.particles( 'sp-Magic Circle against Law-END', attachee)
elif type == 4:
game.particles( 'sp-Magic Circle against Chaos-END', attachee)
return 0
def HasMagicCircleQuery(attachee, args, evt_obj):
type = args.get_arg(2)
if evt_obj.data1 == type:
evt_obj.return_val = 1
return 0
def MagicCircleOutwardTooltip(attachee, args, evt_obj):
type = args.get_arg(2)
SpellName = GetCircleSpellType(type)
evt_obj.append(SpellName)
return 0
def MagicCircleOutwardEffectTooltip(attachee, args, evt_obj):
type = args.get_arg(2)
SpellName = GetCircleSpellType(type)
evt_obj.append(tpdp.hash("MAGIC_CIRCLE_OUTWARD_FIXED"), -2, SpellName + "")
return 0
magicCircleOutward = PythonModifier("sp-Magic Circle Outward Fixed", 5, 0) #Note: Allows duplicates (for the other versions of the spell)
magicCircleOutward.AddHook(ET_OnConditionAdd, EK_NONE, MagicCircleOutwardBegin, ())
magicCircleOutward.AddHook(ET_OnD20Signal, EK_S_Teleport_Reconnect, MagicCircleOutwardBegin, ())
magicCircleOutward.AddHook(ET_OnObjectEvent, EK_OnEnterAoE, MagicCircleOutwardAoEEntered, ())
magicCircleOutward.AddHook(ET_OnD20Query, EK_Q_Critter_Has_Spell_Active, MagicCircleOutwardHasSpellActive, ())
magicCircleOutward.AddHook(ET_OnD20Signal, EK_S_Killed, MagicCircleOutwardKilled, ())
magicCircleOutward.AddHook(ET_OnD20Signal, EK_S_Spell_End, MagicCircleOutwardSpellEnd, ())
magicCircleOutward.AddHook(ET_OnD20PythonQuery, "Has Magic Circle Spell", HasMagicCircleQuery, ())
magicCircleOutward.AddHook(ET_OnGetTooltip, EK_NONE, MagicCircleOutwardTooltip, ())
magicCircleOutward.AddHook(ET_OnGetEffectTooltip, EK_NONE, MagicCircleOutwardEffectTooltip, ())
magicCircleOutward.AddSpellDispelCheckStandard()
magicCircleOutward.AddSpellTeleportPrepareStandard()
magicCircleOutward.AddSpellTeleportReconnectStandard()
magicCircleOutward.AddSpellCountdownStandardHook()
def MagicCircleOutwardEffAoEExited(attachee, args, evt_obj):
obj_evt_id = args.get_arg(0)
if obj_evt_id != evt_obj.evt_id:
print "Magic Circle Outward Aura: ID mismatch " + str(evt_obj.evt_id) + ", stored was: " + str(obj_evt_id)
return 0
print "Magic Circle Outward Aura (ID " + str(obj_evt_id) +") Exited, critter: " + attachee.description + " "
args.condition_remove()
return 0
def MagicCircleOutwardEffTooltip(attachee, args, evt_obj):
type = args.get_arg(1)
SpellName = GetCircleEffectType(type)
evt_obj.append(SpellName)
return 0
def MagicCircleOutwardEffEffectTooltip(attachee, args, evt_obj):
type = args.get_arg(1)
SpellName = GetCircleEffectType(type)
evt_obj.append(tpdp.hash("MAGIC_CIRCLE_OUTWARD_FIXED_EFFECT"), -2, SpellName + "")
return 0
def MagicCircleOutwardEffRemove(attachee, args, evt_obj):
print "Removing Magic Circle Effect Condition " + attachee.description
args.condition_remove()
return 0
def MagicCircleOutwardEffACBonus(attachee, args, evt_obj):
attacker = evt_obj.attack_packet.attacker
if attacker == OBJ_HANDLE_NULL:
return 0
type = args.get_arg(1)
addBonus = CheckAlignment(attacker, type)
if addBonus:
evt_obj.bonus_list.add(2, 11, 207) #Deflection bonus
return 0
def MagicCircleOutwardEffSavingThrow(attachee, args, evt_obj):
caster = evt_obj.obj
type = args.get_arg(1)
addBonus = CheckAlignment(caster, type)
if addBonus:
evt_obj.bonus_list.add(2, 15, 207) #Resistance bonus
return 0
def MagicCircleOutwardEffDamageResistance(attachee, args, evt_obj):
attacker = evt_obj.attack_packet.attacker
if attacker == OBJ_HANDLE_NULL:
return 0
if attacker.d20_query_has_condition("sp-Summoned"):
type = args.get_arg(1)
canAttack = SummonCanAttack(attacker, type)
if not canAttack:
wpn = evt_obj.attack_packet.get_weapon_used()
if wpn != OBJ_HANDLE_NULL:
return 0
spell_id = args.get_arg(2)
spell_packet = tpdp.SpellPacket(spell_id)
resisted = spell_packet.check_spell_resistance_force(attacker)
if resisted:
return 0
evt_obj.damage_packet.add_mod_factor(0.0, D20DT_UNSPECIFIED, 104) #Do no damage at all
return 0
def MagicCircleOutwardEffSpellImmunity(attachee, args, evt_obj):
sp_pkt = evt_obj.spell_packet
spell_enum = sp_pkt.spell_enum
if (spell_enum == 0):
return 0
# Providing immunity (the effect really should just be suppressed while inside the circle)
if spell_enum in immunity_list:
evt_obj.return_val = 1
return 0
def MagicCircleOutwardEffPreAdd(attachee, args, evt_obj):
val = evt_obj.is_modifier("Magic Circle Outward Aura") #Is it a duplicate?
if val:
type = args.get_arg(1)
if type == evt_obj.arg2: #Reject if it is the same type of protection effect
evt_obj.return_val = 0
return 0
# Note: Condition allows duplicates for the other versions of the spell
magicCircleOutwardEffect = PythonModifier("Magic Circle Outward Aura", 4, 0) #id, alignment flag (1-Good, 2- Evil, 3- Law, 4- Chaos), spell id, spare
magicCircleOutwardEffect.AddHook(ET_OnObjectEvent, EK_OnLeaveAoE, MagicCircleOutwardEffAoEExited, ())
magicCircleOutwardEffect.AddHook(ET_OnNewDay, EK_NEWDAY_REST, MagicCircleOutwardEffRemove, ())
magicCircleOutwardEffect.AddHook(ET_OnGetTooltip, EK_NONE, MagicCircleOutwardEffTooltip, ())
magicCircleOutwardEffect.AddHook(ET_OnGetEffectTooltip, EK_NONE, MagicCircleOutwardEffEffectTooltip, ())
magicCircleOutwardEffect.AddHook(ET_OnD20Signal, EK_S_Teleport_Prepare, MagicCircleOutwardEffRemove, ())
magicCircleOutwardEffect.AddHook(ET_OnGetAC, EK_NONE, MagicCircleOutwardEffACBonus, ())
magicCircleOutwardEffect.AddHook(ET_OnSaveThrowLevel, EK_NONE, MagicCircleOutwardEffSavingThrow, () )
magicCircleOutwardEffect.AddHook(ET_OnTakingDamage2, EK_NONE, MagicCircleOutwardEffDamageResistance, ())
magicCircleOutwardEffect.AddHook(ET_OnSpellImmunityCheck, EK_NONE, MagicCircleOutwardEffSpellImmunity, ())
magicCircleOutwardEffect.AddHook(ET_OnConditionAddPre, EK_NONE, MagicCircleOutwardEffPreAdd, ()) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sale order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'images': ['images/crm_statistics_dashboard.jpeg', 'images/opportunity_to_quote.jpeg'],
'depends': ['sale', 'crm', 'web_kanban_gauge'],
'data': [
'wizard/crm_make_sale_view.xml',
'sale_crm_view.xml',
'security/sale_crm_security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'test': ['test/sale_crm.yml'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.resource}.
"""
from twisted.trial.unittest import TestCase
from twisted.web.error import UnsupportedMethod
from twisted.web.resource import (
NOT_FOUND, FORBIDDEN, Resource, ErrorPage, NoResource, ForbiddenResource,
getChildForRequest)
from twisted.web.test.requesthelper import DummyRequest
class ErrorPageTests(TestCase):
"""
Tests for L{ErrorPage}, L{NoResource}, and L{ForbiddenResource}.
"""
errorPage = ErrorPage
noResource = NoResource
forbiddenResource = ForbiddenResource
def test_getChild(self):
"""
The C{getChild} method of L{ErrorPage} returns the L{ErrorPage} it is
called on.
"""
page = self.errorPage(321, "foo", "bar")
self.assertIdentical(page.getChild(b"name", object()), page)
def _pageRenderingTest(self, page, code, brief, detail):
request = DummyRequest([b''])
template = (
u"\n"
u"<html>\n"
u" <head><title>%s - %s</title></head>\n"
u" <body>\n"
u" <h1>%s</h1>\n"
u" <p>%s</p>\n"
u" </body>\n"
u"</html>\n")
expected = template % (code, brief, brief, detail)
self.assertEqual(
page.render(request), expected.encode('utf-8'))
self.assertEqual(request.responseCode, code)
self.assertEqual(
request.outgoingHeaders,
{b'content-type': b'text/html; charset=utf-8'})
def test_errorPageRendering(self):
"""
L{ErrorPage.render} returns a C{bytes} describing the error defined by
the response code and message passed to L{ErrorPage.__init__}. It also
uses that response code to set the response code on the L{Request}
passed in.
"""
code = 321
brief = "brief description text"
detail = "much longer text might go here"
page = self.errorPage(code, brief, detail)
self._pageRenderingTest(page, code, brief, detail)
def test_noResourceRendering(self):
"""
L{NoResource} sets the HTTP I{NOT FOUND} code.
"""
detail = "long message"
page = self.noResource(detail)
self._pageRenderingTest(page, NOT_FOUND, "No Such Resource", detail)
def test_forbiddenResourceRendering(self):
"""
L{ForbiddenResource} sets the HTTP I{FORBIDDEN} code.
"""
detail = "longer message"
page = self.forbiddenResource(detail)
self._pageRenderingTest(page, FORBIDDEN, "Forbidden Resource", detail)
class DynamicChild(Resource):
"""
A L{Resource} to be created on the fly by L{DynamicChildren}.
"""
def __init__(self, path, request):
Resource.__init__(self)
self.path = path
self.request = request
class DynamicChildren(Resource):
"""
A L{Resource} with dynamic children.
"""
def getChild(self, path, request):
return DynamicChild(path, request)
class BytesReturnedRenderable(Resource):
"""
A L{Resource} with minimal capabilities to render a response.
"""
def __init__(self, response):
"""
@param response: A C{bytes} object giving the value to return from
C{render_GET}.
"""
Resource.__init__(self)
self._response = response
def render_GET(self, request):
"""
Render a response to a I{GET} request by returning a short byte string
to be written by the server.
"""
return self._response
class ImplicitAllowedMethods(Resource):
"""
A L{Resource} which implicitly defines its allowed methods by defining
renderers to handle them.
"""
def render_GET(self, request):
pass
def render_PUT(self, request):
pass
class ResourceTests(TestCase):
"""
Tests for L{Resource}.
"""
def test_staticChildren(self):
"""
L{Resource.putChild} adds a I{static} child to the resource. That child
is returned from any call to L{Resource.getChildWithDefault} for the
child's path.
"""
resource = Resource()
child = Resource()
sibling = Resource()
resource.putChild(b"foo", child)
resource.putChild(b"bar", sibling)
self.assertIdentical(
child, resource.getChildWithDefault(b"foo", DummyRequest([])))
def test_dynamicChildren(self):
"""
L{Resource.getChildWithDefault} delegates to L{Resource.getChild} when
the requested path is not associated with any static child.
"""
path = b"foo"
request = DummyRequest([])
resource = DynamicChildren()
child = resource.getChildWithDefault(path, request)
self.assertIsInstance(child, DynamicChild)
self.assertEqual(child.path, path)
self.assertIdentical(child.request, request)
def test_defaultHEAD(self):
"""
When not otherwise overridden, L{Resource.render} treats a I{HEAD}
request as if it were a I{GET} request.
"""
expected = b"insert response here"
request = DummyRequest([])
request.method = b'HEAD'
resource = BytesReturnedRenderable(expected)
self.assertEqual(expected, resource.render(request))
def test_explicitAllowedMethods(self):
"""
The L{UnsupportedMethod} raised by L{Resource.render} for an unsupported
request method has a C{allowedMethods} attribute set to the value of the
C{allowedMethods} attribute of the L{Resource}, if it has one.
"""
expected = [b'GET', b'HEAD', b'PUT']
resource = Resource()
resource.allowedMethods = expected
request = DummyRequest([])
request.method = b'FICTIONAL'
exc = self.assertRaises(UnsupportedMethod, resource.render, request)
self.assertEqual(set(expected), set(exc.allowedMethods))
def test_implicitAllowedMethods(self):
"""
The L{UnsupportedMethod} raised by L{Resource.render} for an unsupported
request method has a C{allowedMethods} attribute set to a list of the
methods supported by the L{Resource}, as determined by the
I{render_}-prefixed methods which it defines, if C{allowedMethods} is
not explicitly defined by the L{Resource}.
"""
expected = set([b'GET', b'HEAD', b'PUT'])
resource = ImplicitAllowedMethods()
request = DummyRequest([])
request.method = b'FICTIONAL'
exc = self.assertRaises(UnsupportedMethod, resource.render, request)
self.assertEqual(expected, set(exc.allowedMethods))
class GetChildForRequestTests(TestCase):
"""
Tests for L{getChildForRequest}.
"""
def test_exhaustedPostPath(self):
"""
L{getChildForRequest} returns whatever resource has been reached by the
time the request's C{postpath} is empty.
"""
request = DummyRequest([])
resource = Resource()
result = getChildForRequest(resource, request)
self.assertIdentical(resource, result)
def test_leafResource(self):
"""
L{getChildForRequest} returns the first resource it encounters with a
C{isLeaf} attribute set to C{True}.
"""
request = DummyRequest([b"foo", b"bar"])
resource = Resource()
resource.isLeaf = True
result = getChildForRequest(resource, request)
self.assertIdentical(resource, result)
def test_postPathToPrePath(self):
"""
As path segments from the request are traversed, they are taken from
C{postpath} and put into C{prepath}.
"""
request = DummyRequest([b"foo", b"bar"])
root = Resource()
child = Resource()
child.isLeaf = True
root.putChild(b"foo", child)
self.assertIdentical(child, getChildForRequest(root, request))
self.assertEqual(request.prepath, [b"foo"])
self.assertEqual(request.postpath, [b"bar"]) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class Canalc2IE(InfoExtractor):
IE_NAME = 'canalc2.tv'
_VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)'
_TEST = {
'url': 'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui',
'md5': '060158428b650f896c542dfbb3d6487f',
'info_dict': {
'id': '12163',
'ext': 'mp4',
'title': 'Terrasses du Numérique'
}
}
def _real_extract(self, url):
video_id = re.match(self._VALID_URL, url).group('id')
# We need to set the voir field for getting the file name
url = 'http://www.canalc2.tv/video.asp?idVideo=%s&voir=oui' % video_id
webpage = self._download_webpage(url, video_id)
file_name = self._search_regex(
r"so\.addVariable\('file','(.*?)'\);",
webpage, 'file name')
video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name
title = self._html_search_regex(
r'class="evenement8">(.*?)</a>', webpage, 'title')
return {
'id': video_id,
'ext': 'mp4',
'url': video_url,
'title': title,
} | unknown | codeparrot/codeparrot-clean | ||
from __future__ import annotations
import gzip
import io
import tarfile
from typing import (
TYPE_CHECKING,
Any,
)
import zipfile
from pandas.compat._optional import import_optional_dependency
import pandas as pd
if TYPE_CHECKING:
from collections.abc import Callable
from pathlib import Path
from pandas import (
DataFrame,
Series,
)
# ------------------------------------------------------------------
# File-IO
def round_trip_pickle(obj: Any, tmp_path: Path) -> DataFrame | Series:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
pd.to_pickle(obj, tmp_path)
return pd.read_pickle(tmp_path)
def round_trip_pathlib(writer, reader, tmp_path: Path):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
writer(tmp_path)
obj = reader(tmp_path)
return obj
def write_to_compressed(compression, path: str, data, dest: str = "test") -> None:
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
args: tuple[Any, ...] = (data,)
mode = "wb"
method = "write"
compress_method: Callable
if compression == "zip":
compress_method = zipfile.ZipFile
mode = "w"
args = (dest, data)
method = "writestr"
elif compression == "tar":
compress_method = tarfile.TarFile
mode = "w"
file = tarfile.TarInfo(name=dest)
bytes = io.BytesIO(data)
file.size = len(data)
args = (file, bytes)
method = "addfile"
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
import bz2
compress_method = bz2.BZ2File
elif compression == "zstd":
compress_method = import_optional_dependency("zstandard").open
elif compression == "xz":
import lzma
compress_method = lzma.LZMAFile
else:
raise ValueError(f"Unrecognized compression type: {compression}")
# error: No overload variant of "ZipFile" matches argument types "str", "str"
# error: No overload variant of "BZ2File" matches argument types "str", "str"
# error: Argument "mode" to "TarFile" has incompatible type "str";
# expected "Literal['r', 'a', 'w', 'x']
with compress_method(path, mode=mode) as f: # type: ignore[call-overload, arg-type]
getattr(f, method)(*args) | python | github | https://github.com/pandas-dev/pandas | pandas/_testing/_io.py |
# -*- coding: utf-8 -*-
from dateutil.relativedelta import relativedelta
from openerp import fields
from openerp.tests import common
class TestEventCommon(common.TransactionCase):
def setUp(self):
super(TestEventCommon, self).setUp()
# Usefull models
self.Users = self.env['res.users']
self.Event = self.env['event.event']
self.Registration = self.env['event.registration']
self.EventMail = self.env['event.mail']
# User groups
self.group_employee_id = self.env['ir.model.data'].xmlid_to_res_id('base.group_user')
self.group_event_user_id = self.env['ir.model.data'].xmlid_to_res_id('event.group_event_user')
self.group_event_manager_id = self.env['ir.model.data'].xmlid_to_res_id('event.group_event_manager')
# Test users to use through the various tests
self.user_eventuser = self.Users.with_context({'no_reset_password': True}).create({
'name': 'Armande EventUser',
'login': 'Armande',
'alias_name': 'armande',
'email': 'armande.eventuser@example.com',
'groups_id': [(6, 0, [self.group_employee_id, self.group_event_user_id])]
})
self.user_eventmanager = self.Users.with_context({'no_reset_password': True}).create({
'name': 'Bastien EventManager',
'login': 'bastien',
'alias_name': 'bastien',
'email': 'bastien.eventmanager@example.com',
'groups_id': [(6, 0, [self.group_employee_id, self.group_event_manager_id])]
}) | unknown | codeparrot/codeparrot-clean | ||
# -- encoding: UTF-8 --
import re
import sys, json, time
import argparse
def read_messages(filenames):
messages = set()
user_map = {}
for fn in sorted(filenames):
with open(fn, "r") as infp:
data = json.load(infp)
for m in data["messages"]:
messages.add((float(m["ts"]), m.get("username") or m.get("user"), m["text"]))
user_map.update(data.get("userMap") or {})
return (messages, user_map)
def format_messages(messages, user_map):
user_name_map = dict((id, u["name"] if isinstance(u, dict) else str(u)) for (id, u) in user_map.items())
for mtup in sorted(messages):
ts = time.strftime("%Y-%m-%d %H:%M.%S", time.gmtime(mtup[0]))
user = user_name_map.get(mtup[1], mtup[1])
message = expand_message(mtup[2], user_name_map)
yield "%s <%s> %s" % (ts, user, message)
def expand_message(message, user_name_map):
message = re.sub(
r"<@([A-Z0-9]+)>",
lambda m: "@%s" % user_name_map[m.group(1)] if (m.group(1) in user_name_map) else m.group(0),
message
)
message = message.replace(">", ">")
message = message.replace("\n", " | ")
return message
def cmdline():
ap = argparse.ArgumentParser()
ap.add_argument("--usermap-file")
ap.add_argument("file", nargs="*")
args = ap.parse_args()
messages, user_map = read_messages(args.file)
if args.usermap_file:
with open(args.usermap_file, "r") as infp:
data = json.load(infp)
if "userMap" in data:
data = data["userMap"]
user_map.update(dict(data))
for message in format_messages(messages, user_map):
print(message)
if __name__ == "__main__":
cmdline() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt4.Qt import (QDialog, QPixmap, QUrl, QScrollArea, QLabel, QSizePolicy,
QDialogButtonBox, QVBoxLayout, QPalette, QApplication, QSize, QIcon,
Qt, QTransform)
from calibre.gui2 import choose_save_file, gprefs
class ImageView(QDialog):
def __init__(self, parent, current_img, current_url, geom_name='viewer_image_popup_geometry'):
QDialog.__init__(self)
dw = QApplication.instance().desktop()
self.avail_geom = dw.availableGeometry(parent)
self.current_img = current_img
self.current_url = current_url
self.factor = 1.0
self.geom_name = geom_name
self.label = l = QLabel()
l.setBackgroundRole(QPalette.Base)
l.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
l.setScaledContents(True)
self.scrollarea = sa = QScrollArea()
sa.setBackgroundRole(QPalette.Dark)
sa.setWidget(l)
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Close)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
self.zi_button = zi = bb.addButton(_('Zoom &in'), bb.ActionRole)
self.zo_button = zo = bb.addButton(_('Zoom &out'), bb.ActionRole)
self.save_button = so = bb.addButton(_('&Save as'), bb.ActionRole)
self.rotate_button = ro = bb.addButton(_('&Rotate'), bb.ActionRole)
zi.setIcon(QIcon(I('plus.png')))
zo.setIcon(QIcon(I('minus.png')))
so.setIcon(QIcon(I('save.png')))
ro.setIcon(QIcon(I('rotate-right.png')))
zi.clicked.connect(self.zoom_in)
zo.clicked.connect(self.zoom_out)
so.clicked.connect(self.save_image)
ro.clicked.connect(self.rotate_image)
self.l = l = QVBoxLayout()
self.setLayout(l)
l.addWidget(sa)
l.addWidget(bb)
def zoom_in(self):
self.factor *= 1.25
self.adjust_image(1.25)
def zoom_out(self):
self.factor *= 0.8
self.adjust_image(0.8)
def save_image(self):
filters=[('Images', ['png', 'jpeg', 'jpg'])]
f = choose_save_file(self, 'viewer image view save dialog',
_('Choose a file to save to'), filters=filters,
all_files=False)
if f:
self.current_img.save(f)
def adjust_image(self, factor):
self.label.resize(self.factor * self.current_img.size())
self.zi_button.setEnabled(self.factor <= 3)
self.zo_button.setEnabled(self.factor >= 0.3333)
self.adjust_scrollbars(factor)
def adjust_scrollbars(self, factor):
for sb in (self.scrollarea.horizontalScrollBar(),
self.scrollarea.verticalScrollBar()):
sb.setValue(int(factor*sb.value()) + ((factor - 1) * sb.pageStep()/2))
def rotate_image(self):
pm = self.label.pixmap()
t = QTransform()
t.rotate(90)
pm = pm.transformed(t)
self.label.setPixmap(pm)
self.label.adjustSize()
def __call__(self, use_exec=False):
geom = self.avail_geom
self.label.setPixmap(self.current_img)
self.label.adjustSize()
self.resize(QSize(int(geom.width()/2.5), geom.height()-50))
geom = gprefs.get(self.geom_name, None)
if geom is not None:
self.restoreGeometry(geom)
try:
self.current_image_name = unicode(self.current_url.toString()).rpartition('/')[-1]
except AttributeError:
self.current_image_name = self.current_url
title = _('View Image: %s')%self.current_image_name
self.setWindowTitle(title)
if use_exec:
self.exec_()
else:
self.show()
def done(self, e):
gprefs[self.geom_name] = bytearray(self.saveGeometry())
return QDialog.done(self, e)
def wheelEvent(self, event):
if event.delta() < -14:
self.zoom_out()
event.accept()
elif event.delta() > 14:
event.accept()
self.zoom_in()
class ImagePopup(object):
def __init__(self, parent):
self.current_img = QPixmap()
self.current_url = QUrl()
self.parent = parent
self.dialogs = []
def __call__(self):
if self.current_img.isNull():
return
d = ImageView(self.parent, self.current_img, self.current_url)
self.dialogs.append(d)
d.finished.connect(self.cleanup, type=Qt.QueuedConnection)
d()
def cleanup(self):
for d in tuple(self.dialogs):
if not d.isVisible():
self.dialogs.remove(d)
if __name__ == '__main__':
import sys
app = QApplication([])
p = QPixmap()
p.load(sys.argv[-1])
u = QUrl.fromLocalFile(sys.argv[-1])
d = ImageView(None, p, u)
d()
app.exec_() | unknown | codeparrot/codeparrot-clean | ||
# TODO: Test robust skewness
# TODO: Test robust kurtosis
import numpy as np
import pandas as pd
from numpy.testing import (assert_almost_equal, assert_raises, TestCase)
from statsmodels.stats.stattools import (omni_normtest, jarque_bera,
durbin_watson, _medcouple_1d, medcouple,
robust_kurtosis, robust_skewness)
from statsmodels.stats.adnorm import normal_ad
#a random array, rounded to 4 decimals
x = np.array([-0.1184, -1.3403, 0.0063, -0.612, -0.3869, -0.2313, -2.8485,
-0.2167, 0.4153, 1.8492, -0.3706, 0.9726, -0.1501, -0.0337,
-1.4423, 1.2489, 0.9182, -0.2331, -0.6182, 0.183])
def test_durbin_watson():
#benchmark values from R car::durbinWatsonTest(x)
#library("car")
#> durbinWatsonTest(x)
#[1] 1.95298958377419
#> durbinWatsonTest(x**2)
#[1] 1.848802400319998
#> durbinWatsonTest(x[2:20]+0.5*x[1:19])
#[1] 1.09897993228779
#> durbinWatsonTest(x[2:20]+0.8*x[1:19])
#[1] 0.937241876707273
#> durbinWatsonTest(x[2:20]+0.9*x[1:19])
#[1] 0.921488912587806
st_R = 1.95298958377419
assert_almost_equal(durbin_watson(x), st_R, 14)
st_R = 1.848802400319998
assert_almost_equal(durbin_watson(x**2), st_R, 14)
st_R = 1.09897993228779
assert_almost_equal(durbin_watson(x[1:] + 0.5 * x[:-1]), st_R, 14)
st_R = 0.937241876707273
assert_almost_equal(durbin_watson(x[1:] + 0.8 * x[:-1]), st_R, 14)
st_R = 0.921488912587806
assert_almost_equal(durbin_watson(x[1:] + 0.9 * x[:-1]), st_R, 14)
def test_omni_normtest():
#tests against R fBasics
from scipy import stats
st_pv_R = np.array(
[[3.994138321207883, -1.129304302161460, 1.648881473704978],
[0.1357325110375005, 0.2587694866795507, 0.0991719192710234]])
nt = omni_normtest(x)
assert_almost_equal(nt, st_pv_R[:, 0], 14)
st = stats.skewtest(x)
assert_almost_equal(st, st_pv_R[:, 1], 14)
kt = stats.kurtosistest(x)
assert_almost_equal(kt, st_pv_R[:, 2], 11)
st_pv_R = np.array(
[[34.523210399523926, 4.429509162503833, 3.860396220444025],
[3.186985686465249e-08, 9.444780064482572e-06, 1.132033129378485e-04]])
x2 = x**2
#TODO: fix precision in these test with relative tolerance
nt = omni_normtest(x2)
assert_almost_equal(nt, st_pv_R[:, 0], 12)
st = stats.skewtest(x2)
assert_almost_equal(st, st_pv_R[:, 1], 12)
kt = stats.kurtosistest(x2)
assert_almost_equal(kt, st_pv_R[:, 2], 12)
def test_omni_normtest_axis():
#test axis of omni_normtest
x = np.random.randn(25, 3)
nt1 = omni_normtest(x)
nt2 = omni_normtest(x, axis=0)
nt3 = omni_normtest(x.T, axis=1)
assert_almost_equal(nt2, nt1, decimal=13)
assert_almost_equal(nt3, nt1, decimal=13)
def test_jarque_bera():
#tests against R fBasics
st_pv_R = np.array([1.9662677226861689, 0.3741367669648314])
jb = jarque_bera(x)[:2]
assert_almost_equal(jb, st_pv_R, 14)
st_pv_R = np.array([78.329987305556, 0.000000000000])
jb = jarque_bera(x**2)[:2]
assert_almost_equal(jb, st_pv_R, 13)
st_pv_R = np.array([5.7135750796706670, 0.0574530296971343])
jb = jarque_bera(np.log(x**2))[:2]
assert_almost_equal(jb, st_pv_R, 14)
st_pv_R = np.array([2.6489315748495761, 0.2659449923067881])
jb = jarque_bera(np.exp(-x**2))[:2]
assert_almost_equal(jb, st_pv_R, 14)
def test_shapiro():
#tests against R fBasics
#testing scipy.stats
from scipy.stats import shapiro
st_pv_R = np.array([0.939984787255526, 0.239621898000460])
sh = shapiro(x)
assert_almost_equal(sh, st_pv_R, 4)
#st is ok -7.15e-06, pval agrees at -3.05e-10
st_pv_R = np.array([5.799574255943298e-01, 1.838456834681376e-06 * 1e4])
sh = shapiro(x**2) * np.array([1, 1e4])
assert_almost_equal(sh, st_pv_R, 5)
st_pv_R = np.array([0.91730442643165588, 0.08793704167882448])
sh = shapiro(np.log(x**2))
assert_almost_equal(sh, st_pv_R, 5)
#diff is [ 9.38773155e-07, 5.48221246e-08]
st_pv_R = np.array([0.818361863493919373, 0.001644620895206969])
sh = shapiro(np.exp(-x**2))
assert_almost_equal(sh, st_pv_R, 5)
def test_adnorm():
#tests against R fBasics
st_pv = []
st_pv_R = np.array([0.5867235358882148, 0.1115380760041617])
ad = normal_ad(x)
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
st_pv_R = np.array([2.976266267594575e+00, 8.753003709960645e-08])
ad = normal_ad(x**2)
assert_almost_equal(ad, st_pv_R, 11)
st_pv.append(st_pv_R)
st_pv_R = np.array([0.4892557856308528, 0.1968040759316307])
ad = normal_ad(np.log(x**2))
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
st_pv_R = np.array([1.4599014654282669312, 0.0006380009232897535])
ad = normal_ad(np.exp(-x**2))
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
ad = normal_ad(np.column_stack((x, x**2, np.log(x**2), np.exp(-x**2))).T,
axis=1)
assert_almost_equal(ad, np.column_stack(st_pv), 11)
def test_durbin_watson_pandas():
x = np.random.randn(50)
x_series = pd.Series(x)
assert_almost_equal(durbin_watson(x), durbin_watson(x_series), decimal=13)
class TestStattools(TestCase):
@classmethod
def setup_class(cls):
x = np.random.standard_normal(1000)
e1, e2, e3, e4, e5, e6, e7 = np.percentile(x, (12.5, 25.0, 37.5, 50.0, 62.5, 75.0, 87.5))
c05, c50, c95 = np.percentile(x, (5.0, 50.0, 95.0))
f025, f25, f75, f975 = np.percentile(x, (2.5, 25.0, 75.0, 97.5))
mean = np.mean
kr1 = mean(((x - mean(x)) / np.std(x))**4.0) - 3.0
kr2 = ((e7 - e5) + (e3 - e1)) / (e6 - e2) - 1.2330951154852172
kr3 = (mean(x[x > c95]) - mean(x[x < c05])) / (mean(x[x > c50]) - mean(x[x < c50])) - 2.5852271228708048
kr4 = (f975 - f025) / (f75 - f25) - 2.9058469516701639
cls.kurtosis_x = x
cls.expected_kurtosis = np.array([kr1, kr2, kr3, kr4])
cls.kurtosis_constants = np.array([3.0,1.2330951154852172,2.5852271228708048,2.9058469516701639])
def test_medcouple_no_axis(self):
x = np.reshape(np.arange(100.0), (50, 2))
mc = medcouple(x, axis=None)
assert_almost_equal(mc, medcouple(x.ravel()))
def test_medcouple_1d(self):
x = np.reshape(np.arange(100.0),(50,2))
assert_raises(ValueError, _medcouple_1d, x)
def test_medcouple_symmetric(self):
mc = medcouple(np.arange(5.0))
assert_almost_equal(mc, 0)
def test_medcouple_nonzero(self):
mc = medcouple(np.array([1, 2, 7, 9, 10.0]))
assert_almost_equal(mc, -0.3333333)
def test_medcouple_symmetry(self):
x = np.random.standard_normal(100)
mcp = medcouple(x)
mcn = medcouple(-x)
assert_almost_equal(mcp + mcn, 0)
def test_durbin_watson(self):
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
assert_almost_equal(dw, durbin_watson(x))
def test_durbin_watson_2d(self):
shape = (1, 10)
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
x = np.tile(x[:, None], shape)
assert_almost_equal(np.squeeze(dw * np.ones(shape)), durbin_watson(x))
def test_durbin_watson_3d(self):
shape = (10, 1, 10)
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
x = np.tile(x[None, :, None], shape)
assert_almost_equal(np.squeeze(dw * np.ones(shape)), durbin_watson(x, axis=1))
def test_robust_skewness_1d(self):
x = np.arange(21.0)
sk = robust_skewness(x)
assert_almost_equal(np.array(sk), np.zeros(4))
def test_robust_skewness_1d_2d(self):
x = np.random.randn(21)
y = x[:, None]
sk_x = robust_skewness(x)
sk_y = robust_skewness(y, axis=None)
assert_almost_equal(np.array(sk_x), np.array(sk_y))
def test_robust_skewness_symmetric(self):
x = np.random.standard_normal(100)
x = np.hstack([x, np.zeros(1), -x])
sk = robust_skewness(x)
assert_almost_equal(np.array(sk), np.zeros(4))
def test_robust_skewness_3d(self):
x = np.random.standard_normal(100)
x = np.hstack([x, np.zeros(1), -x])
x = np.tile(x, (10, 10, 1))
sk_3d = robust_skewness(x, axis=2)
result = np.zeros((10, 10))
for sk in sk_3d:
assert_almost_equal(sk, result)
def test_robust_kurtosis_1d_2d(self):
x = np.random.randn(100)
y = x[:, None]
kr_x = np.array(robust_kurtosis(x))
kr_y = np.array(robust_kurtosis(y, axis=None))
assert_almost_equal(kr_x, kr_y)
def test_robust_kurtosis(self):
x = self.kurtosis_x
assert_almost_equal(np.array(robust_kurtosis(x)), self.expected_kurtosis)
def test_robust_kurtosis_3d(self):
x = np.tile(self.kurtosis_x, (10, 10, 1))
kurtosis = np.array(robust_kurtosis(x, axis=2))
for i, r in enumerate(self.expected_kurtosis):
assert_almost_equal(r * np.ones((10, 10)), kurtosis[i])
def test_robust_kurtosis_excess_false(self):
x = self.kurtosis_x
expected = self.expected_kurtosis + self.kurtosis_constants
kurtosis = np.array(robust_kurtosis(x, excess=False))
assert_almost_equal(expected, kurtosis)
def test_robust_kurtosis_ab(self):
"""Test custom alpha, beta in kr3"""
x = self.kurtosis_x
alpha, beta = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, ab=(alpha,beta), excess=False)
num = np.mean(x[x>np.percentile(x,100.0 - alpha)]) - np.mean(x[x<np.percentile(x,alpha)])
denom = np.mean(x[x>np.percentile(x,100.0 - beta)]) - np.mean(x[x<np.percentile(x,beta)])
assert_almost_equal(kurtosis[2], num/denom)
def test_robust_kurtosis_dg(self):
"""Test custom delta, gamma in kr4"""
x = self.kurtosis_x
delta, gamma = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, dg=(delta,gamma), excess=False)
q = np.percentile(x,[delta, 100.0-delta, gamma, 100.0-gamma])
assert_almost_equal(kurtosis[3], (q[1] - q[0]) / (q[3] - q[2]))
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x'], exit=False) #, '--pdb'
# run_module_suite()
#nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
# exit=False) | unknown | codeparrot/codeparrot-clean | ||
# Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PCBuild directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
fh = os.popen(perl + ' -e "use Win32;"')
fh.read()
rc = fh.close()
if rc:
continue
return perl
print("Can not find a suitable PERL:")
if perls:
print(" the following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print(" NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
return None
# Locate the best SSL directory given a few roots to look into.
def find_best_ssl_dir(sources):
candidates = []
for s in sources:
try:
# note: do not abspath s; the build will fail if any
# higher up directory name has spaces in it.
fnames = os.listdir(s)
except os.error:
fnames = []
for fname in fnames:
fqn = os.path.join(s, fname)
if os.path.isdir(fqn) and fname.startswith("openssl-"):
candidates.append(fqn)
# Now we have all the candidates, locate the best.
best_parts = []
best_name = None
for c in candidates:
parts = re.split("[.-]", os.path.basename(c))[1:]
# eg - openssl-0.9.7-beta1 - ignore all "beta" or any other qualifiers
if len(parts) >= 4:
continue
if parts > best_parts:
best_parts = parts
best_name = c
if best_name is not None:
print("Found an SSL directory at '%s'" % (best_name,))
else:
print("Could not find an SSL directory in '%s'" % (sources,))
sys.stdout.flush()
return best_name
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
with open(makefile) as fin:
lines = fin.readlines()
with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
def run_configure(configure, do_script):
print("perl Configure "+configure)
os.system("perl Configure "+configure)
print(do_script)
os.system(do_script)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32 = makefile
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
#os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl is None:
print("No Perl installation was found. Existing Makefiles are used.")
print("Found a working perl at '%s'" % (perl,))
sys.stdout.flush()
# Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
ssl_dir = find_best_ssl_dir(("..\\..\\..",))
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
if arch == "amd64":
create_makefile64(makefile, m32)
fix_makefile(makefile)
shutil.copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
shutil.copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
# Now run make.
if arch == "amd64":
rc = os.system(r"ml64 -c -Foms\uptable.obj ms\uptable.asm")
if rc:
print("ml64 assembler has failed.")
sys.exit(rc)
shutil.copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
shutil.copy(r"crypto\opensslconf_%s.h" % arch, r"crypto\opensslconf.h")
#makeCommand = "nmake /nologo PERL=\"%s\" -f \"%s\"" %(perl, makefile)
makeCommand = "nmake /nologo -f \"%s\"" % makefile
print("Executing ssl makefiles:", makeCommand)
sys.stdout.flush()
rc = os.system(makeCommand)
if rc:
print("Executing "+makefile+" failed")
print(rc)
sys.exit(rc)
finally:
os.chdir(old_cd)
sys.exit(rc)
if __name__=='__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.db.models import Extent3D, Union
from django.contrib.gis.db.models.functions import (
AsGeoJSON, AsKML, Length, Perimeter, Scale, Translate,
)
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from .models import (
City3D, Interstate2D, Interstate3D, InterstateProj2D, InterstateProj3D,
MultiPoint3D, Point2D, Point3D, Polygon2D, Polygon3D,
)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = {name: coords for name, coords in city_data}
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,'
'-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,'
'-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,'
'-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,'
'-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,'
'-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,'
'-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,'
'-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,'
'-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,'
'-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,'
'-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,'
'942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
class Geo3DLoadingHelper(object):
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_storage")
class Geo3DTest(Geo3DLoadingHelper, TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
"""
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
@ignore_warnings(category=RemovedInDjango20Warning)
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
@skipUnlessDBFeature("supports_3d_functions")
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = (
'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,'
'-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,'
'-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
)
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
# Ordering of points in the resulting geometry may vary between implementations
self.assertSetEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union})
@skipUnlessDBFeature("supports_3d_functions")
@ignore_warnings(category=RemovedInDjango110Warning)
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
self.assertIsNone(City3D.objects.none().extent3d())
self.assertIsNone(City3D.objects.none().aggregate(Extent3D('point'))['point__extent3d'])
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_functions")
class Geo3DFunctionsTests(Geo3DLoadingHelper, TestCase):
def test_kml(self):
"""
Test KML() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(kml=AsKML('point', precision=6)).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoJSON() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(geojson=AsGeoJSON('point', precision=6)).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_perimeter(self):
"""
Testing Perimeter() function on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
poly2d = Polygon2D.objects.annotate(perimeter=Perimeter('poly')).get(name='2D BBox')
self.assertAlmostEqual(ref_perim_2d, poly2d.perimeter.m, tol)
poly3d = Polygon3D.objects.annotate(perimeter=Perimeter('poly')).get(name='3D BBox')
self.assertAlmostEqual(ref_perim_3d, poly3d.perimeter.m, tol)
def test_length(self):
"""
Testing Length() function on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
inter2d = Interstate2D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = Interstate3D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
inter2d = InterstateProj2D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = InterstateProj3D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
def test_scale(self):
"""
Testing Scale() function on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.annotate(scale=Scale('point', 1.0, 1.0, zscale)):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing Translate() function on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.annotate(translate=Translate('point', 0, 0, ztrans)):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
module: _not_deprecated
short_description: This module is not deprecated
description: Its name has a leading underscore, but it is not deprecated.
author:
- Ansible Core Team
"""
EXAMPLES = """#"""
RETURN = """"""
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
module = AnsibleModule(argument_spec=dict())
module.exit_json() | python | github | https://github.com/ansible/ansible | test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/_not_deprecated.py |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
# 20 minutes represented in seconds
TRAVIS_TIMEOUT_DURATION = 20 * 60
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'interface_http.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'feature_help.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py',
'p2p_timeouts.py',
# vv Tests less than 60s vv
'p2p_feefilter.py',
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/bitcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", test) + ".py" for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [re.sub("\.py$", "", test) + ".py" for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list,
config["environment"]["SRCDIR"],
config["environment"]["BUILDDIR"],
tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
)
def run_tests(test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "bitcoind"]) is not None:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if os.getenv('TRAVIS') == 'true' and int(time.time() - start_time) > TRAVIS_TIMEOUT_DURATION:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
A simple countdown timer.
This is a very basic countdown timer. You can change the timer length as well
as pausing, restarting and resetting it. Currently this is more of a demo of a
composite.
Each part of the timer can be changed independently hours, minutes, seconds using
mouse buttons 4 and 5 (scroll wheel).
Button 1 starts/pauses the countdown.
Button 2 resets timer.
Configuration parameters:
format: display format for this module (default 'Timer {timer}')
sound: play sound file path when the timer ends (default None)
time: number of seconds to start countdown with (default 60)
Format placeholders:
{timer} display hours:minutes:seconds
@author tobes
SAMPLE OUTPUT
{'full_text': 'Timer 0:01:00'}
running
[
{'full_text': 'Timer '},
{'color': '#00FF00', 'full_text': '0'},
{'full_text': ':'},
{'color': '#00FF00', 'full_text': '00'},
{'full_text': ':'},
{'color': '#00FF00', 'full_text': '54'},
]
paused
[
{'full_text': 'Timer '},
{'color': '#FFFF00', 'full_text': '0'},
{'full_text': ':'},
{'color': '#FFFF00', 'full_text': '00'},
{'full_text': ':'},
{'color': '#FFFF00', 'full_text': '54'},
]
"""
from time import time
from threading import Timer
class Py3status:
"""
"""
# available configuration parameters
format = 'Timer {timer}'
sound = None
time = 60
def post_config_hook(self):
self.running = False
self.end_time = None
self.time_left = None
self.color = None
self.alarm_timer = None
self.alarm = False
self.done = False
def _time_up(self):
"""
Called when the timer expires
"""
self.running = False
self.color = self.py3.COLOR_BAD
self.time_left = 0
self.done = True
if self.sound:
self.py3.play_sound(self.sound)
self.alarm = True
self.timer()
def timer(self):
if self.running or self.done:
t = int(self.end_time - time())
if t <= 0:
t = 0
else:
if self.time_left:
t = self.time_left
else:
t = self.time
hours, t = divmod(t, 3600)
minutes, t = divmod(t, 60)
seconds = t
if self.running:
cached_until = self.py3.time_in(0, offset=self.cache_offset)
else:
cached_until = self.py3.CACHE_FOREVER
composites = [
{
'full_text': str(hours),
'color': self.color,
'index': 'hours',
},
{
'full_text': ':',
},
{
'full_text': format(minutes, '02d'),
'color': self.color,
'index': 'minutes',
},
{
'full_text': ':',
},
{
'full_text': format(seconds, '02d'),
'color': self.color,
'index': 'seconds',
},
]
timer = self.py3.composite_create(composites)
response = {
'cached_until': cached_until,
'full_text': self.py3.safe_format(self.format, {'timer': timer})
}
if self.done:
response['urgent'] = True
return response
def on_click(self, event):
deltas = {
'hours': 3600,
'minutes': 60,
'seconds': 1
}
index = event['index']
button = event['button']
# If played an alarm sound, then cancel the sound and urgent on any
# button press... otherwise, we only cancel an urgent
if self.done:
self.done = False
if self.alarm:
self.py3.stop_sound()
self.alarm = False
return
if button == 1:
if self.running:
# pause timer
self.running = False
self.time_left = int(self.end_time - time())
self.color = self.py3.COLOR_DEGRADED
if self.alarm_timer:
self.alarm_timer.cancel()
else:
# start/restart timer
self.running = True
if self.time_left:
self.end_time = time() + self.time_left
else:
self.end_time = time() + self.time
self.cache_offset = self.end_time % 1
self.color = self.py3.COLOR_GOOD
if self.alarm_timer:
self.alarm_timer.cancel()
self.done = False
self.alarm_timer = Timer(self.time_left or self.time, self._time_up)
self.alarm_timer.start()
if button == 2:
self.running = False
self.time_left = None
self.color = None
self.done = False
if self.alarm_timer:
self.alarm_timer.cancel()
if not self.running:
self.done = False
# change timer section HH:MM:SS
if self.time_left:
t = self.time_left
else:
t = self.time
if button == 4:
t += deltas.get(index, 0)
if button == 5:
t -= deltas.get(index, 0)
if t < 0:
t = 0
if self.time_left:
self.time_left = t
else:
self.time = t
def kill(self):
# remove any timer
if self.alarm_timer:
self.alarm_timer.cancel()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"io"
"github.com/spf13/cobra"
apimachineryversion "k8s.io/apimachinery/pkg/version"
"k8s.io/component-base/version"
"k8s.io/klog/v2"
"sigs.k8s.io/yaml"
"k8s.io/kubernetes/cmd/kubeadm/app/util/errors"
)
// Version provides the version information of kubeadm.
type Version struct {
ClientVersion *apimachineryversion.Info `json:"clientVersion"`
}
// newCmdVersion provides the version information of kubeadm.
func newCmdVersion(out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "version",
Short: "Print the version of kubeadm",
RunE: func(cmd *cobra.Command, args []string) error {
return RunVersion(out, cmd)
},
Args: cobra.NoArgs,
}
cmd.Flags().StringP("output", "o", "", "Output format; available options are 'yaml', 'json' and 'short'")
return cmd
}
// RunVersion provides the version information of kubeadm in format depending on arguments
// specified in cobra.Command.
func RunVersion(out io.Writer, cmd *cobra.Command) error {
klog.V(1).Infoln("[version] retrieving version info")
clientVersion := version.Get()
v := Version{
ClientVersion: &clientVersion,
}
const flag = "output"
of, err := cmd.Flags().GetString(flag)
if err != nil {
return errors.Wrapf(err, "error accessing flag %s for command %s", flag, cmd.Name())
}
switch of {
case "":
fmt.Fprintf(out, "kubeadm version: %#v\n", v.ClientVersion)
case "short":
fmt.Fprintf(out, "%s\n", v.ClientVersion.GitVersion)
case "yaml":
y, err := yaml.Marshal(&v)
if err != nil {
return err
}
fmt.Fprintln(out, string(y))
case "json":
y, err := json.MarshalIndent(&v, "", " ")
if err != nil {
return err
}
fmt.Fprintln(out, string(y))
default:
return errors.Errorf("invalid output format: %s", of)
}
return nil
} | go | github | https://github.com/kubernetes/kubernetes | cmd/kubeadm/app/cmd/version.go |
---
title: RouterProvider
---
# RouterProvider
<!--
⚠️ ⚠️ IMPORTANT ⚠️ ⚠️
Thank you for helping improve our documentation!
This file is auto-generated from the JSDoc comments in the source
code, so please edit the JSDoc comments in the file below and this
file will be re-generated once those changes are merged.
https://github.com/remix-run/react-router/blob/main/packages/react-router/lib/components.tsx
-->
[MODES: data]
## Summary
[Reference Documentation ↗](https://api.reactrouter.com/v7/functions/react-router.RouterProvider.html)
Render the UI for the given [`DataRouter`](https://api.reactrouter.com/v7/interfaces/react-router.DataRouter.html). This component should
typically be at the top of an app's element tree.
```tsx
import { createBrowserRouter } from "react-router";
import { RouterProvider } from "react-router/dom";
import { createRoot } from "react-dom/client";
const router = createBrowserRouter(routes);
createRoot(document.getElementById("root")).render(
<RouterProvider router={router} />
);
```
<docs-info>Please note that this component is exported both from
`react-router` and `react-router/dom` with the only difference being that the
latter automatically wires up `react-dom`'s [`flushSync`](https://react.dev/reference/react-dom/flushSync)
implementation. You _almost always_ want to use the version from
`react-router/dom` unless you're running in a non-DOM environment.</docs-info>
## Signature
```tsx
function RouterProvider({
router,
flushSync: reactDomFlushSyncImpl,
onError,
unstable_useTransitions,
}: RouterProviderProps): React.ReactElement
```
## Props
### flushSync
The [`ReactDOM.flushSync`](https://react.dev/reference/react-dom/flushSync)
implementation to use for flushing updates.
You usually don't have to worry about this:
- The `RouterProvider` exported from `react-router/dom` handles this internally for you
- If you are rendering in a non-DOM environment, you can import
`RouterProvider` from `react-router` and ignore this prop
### onError
An error handler function that will be called for any middleware, loader, action,
or render errors that are encountered in your application. This is useful for
logging or reporting errors instead of in the `ErrorBoundary` because it's not
subject to re-rendering and will only run one time per error.
The `errorInfo` parameter is passed along from
[`componentDidCatch`](https://react.dev/reference/react/Component#componentdidcatch)
and is only present for render errors.
```tsx
<RouterProvider onError=(error, info) => {
let { location, params, unstable_pattern, errorInfo } = info;
console.error(error, location, errorInfo);
reportToErrorService(error, location, errorInfo);
}} />
```
### router
The [`DataRouter`](https://api.reactrouter.com/v7/interfaces/react-router.DataRouter.html) instance to use for navigation and data fetching.
### unstable_useTransitions
Control whether router state updates are internally wrapped in
[`React.startTransition`](https://react.dev/reference/react/startTransition).
- When left `undefined`, all state updates are wrapped in
`React.startTransition`
- This can lead to buggy behaviors if you are wrapping your own
navigations/fetchers in `startTransition`.
- When set to `true`, [`Link`](../components/Link) and [`Form`](../components/Form) navigations will be wrapped
in `React.startTransition` and router state changes will be wrapped in
`React.startTransition` and also sent through
[`useOptimistic`](https://react.dev/reference/react/useOptimistic) to
surface mid-navigation router state changes to the UI.
- When set to `false`, the router will not leverage `React.startTransition` or
`React.useOptimistic` on any navigations or state changes.
For more information, please see the [docs](https://reactrouter.com/explanation/react-transitions). | unknown | github | https://github.com/remix-run/react-router | docs/api/data-routers/RouterProvider.md |
module.exports = {
redirects() {
return [
{
source: "/",
destination: "/en",
permanent: true,
},
];
},
}; | javascript | github | https://github.com/vercel/next.js | examples/with-i18n-rosetta/next.config.js |
'''Just two examples for using rpy
These examples are mainly for developers.
# example 1: OLS using LM
# example 2: GLM with binomial family
The second results isn't exactly correct since it assumes that each
obvervation has the same number of trials see datasets/longley for an R script
with the correct syntax.
See rmodelwrap.py in the tests folder for a convenience wrapper
to make rpy more like statsmodels. Note, however, that rmodelwrap
was created in a very ad hoc manner and due to the idiosyncracies in R
it does not work for all types of R models.
There are also R scripts included with most of the datasets to run
some basic models for comparisons of results to statsmodels.
'''
from rpy import r
import numpy as np
import statsmodels.api as sm
examples = [1, 2]
if 1 in examples:
data = sm.datasets.longley.load()
y,x = data.endog, sm.add_constant(data.exog, prepend=False)
des_cols = ['x.%d' % (i+1) for i in range(x.shape[1])]
formula = r('y~%s-1' % '+'.join(des_cols))
frame = r.data_frame(y=y, x=x)
results = r.lm(formula, data=frame)
print results.keys()
print results['coefficients']
if 2 in examples:
data2 = sm.datasets.star98.load()
y2,x2 = data2.endog, sm.add_constant(data2.exog, prepend=False)
import rpy
y2 = y2[:,0]/y2.sum(axis=1)
des_cols2 = ['x.%d' % (i+1) for i in range(x2.shape[1])]
formula2 = r('y~%s-1' % '+'.join(des_cols2))
frame2 = r.data_frame(y=y2, x=x2)
results2 = r.glm(formula2, data=frame2, family='binomial')
params_est = [results2['coefficients'][k] for k
in sorted(results2['coefficients'])]
print params_est
print ', '.join(['%13.10f']*21) % tuple(params_est) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class ConnectionsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def connections_connection_id_delete(self, connection_id, **kwargs): # noqa: E501
"""Remove a connection # noqa: E501
Removes a connection from our system and revokes the consent. All the associated accounts and transactions to that connection will be destroyed as well. Salt Edge will send a [destroy](#destroy) callback to your web application. Make sure to specify the `Destroy URL` in your client account by accessing <a href='https://www.saltedge.com/clients/callbacks/edit' target=\"_blank\">callbacks page</a>. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_delete(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:return: RemovedConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connections_connection_id_delete_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.connections_connection_id_delete_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def connections_connection_id_delete_with_http_info(self, connection_id, **kwargs): # noqa: E501
"""Remove a connection # noqa: E501
Removes a connection from our system and revokes the consent. All the associated accounts and transactions to that connection will be destroyed as well. Salt Edge will send a [destroy](#destroy) callback to your web application. Make sure to specify the `Destroy URL` in your client account by accessing <a href='https://www.saltedge.com/clients/callbacks/edit' target=\"_blank\">callbacks page</a>. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_delete_with_http_info(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:return: RemovedConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connections_connection_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `connections_connection_id_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connection_id'] = params['connection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['app_id', 'secret'] # noqa: E501
return self.api_client.call_api(
'/connections/{connection_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RemovedConnectionResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connections_connection_id_get(self, connection_id, **kwargs): # noqa: E501
"""Show a connection # noqa: E501
Returns a single connection object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_get(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connections_connection_id_get_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.connections_connection_id_get_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def connections_connection_id_get_with_http_info(self, connection_id, **kwargs): # noqa: E501
"""Show a connection # noqa: E501
Returns a single connection object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_get_with_http_info(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connections_connection_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `connections_connection_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connection_id'] = params['connection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['app_id', 'secret'] # noqa: E501
return self.api_client.call_api(
'/connections/{connection_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConnectionResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connections_connection_id_interactive_put(self, connection_id, **kwargs): # noqa: E501
"""Interactive step # noqa: E501
If the currently fetching connection requires any interactive credentials for fetching, Salt Edge will send the [Interactive callback](#interactive). Make sure to specify the `Interactive URL` in your client account by accessing <a href='https://www.saltedge.com/clients/callbacks/edit' target=\"_blank\">callbacks page</a>. Upon receiving the interactive callback, your app should ask the user for the interactive credentials and send them to the `/interactive` route for the connection. After that, the fetching process will continue as usual. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_interactive_put(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:param InteractiveConnectionRequestBody body:
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connections_connection_id_interactive_put_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.connections_connection_id_interactive_put_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def connections_connection_id_interactive_put_with_http_info(self, connection_id, **kwargs): # noqa: E501
"""Interactive step # noqa: E501
If the currently fetching connection requires any interactive credentials for fetching, Salt Edge will send the [Interactive callback](#interactive). Make sure to specify the `Interactive URL` in your client account by accessing <a href='https://www.saltedge.com/clients/callbacks/edit' target=\"_blank\">callbacks page</a>. Upon receiving the interactive callback, your app should ask the user for the interactive credentials and send them to the `/interactive` route for the connection. After that, the fetching process will continue as usual. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_interactive_put_with_http_info(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:param InteractiveConnectionRequestBody body:
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connections_connection_id_interactive_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `connections_connection_id_interactive_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connection_id'] = params['connection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['app_id', 'secret'] # noqa: E501
return self.api_client.call_api(
'/connections/{connection_id}/interactive', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConnectionResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connections_connection_id_put(self, connection_id, **kwargs): # noqa: E501
"""Update connection # noqa: E501
Update `status`, `store_credentials` or `daily_refresh` of a connection. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_put(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:param UpdateConnectionRequestBody body:
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connections_connection_id_put_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.connections_connection_id_put_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def connections_connection_id_put_with_http_info(self, connection_id, **kwargs): # noqa: E501
"""Update connection # noqa: E501
Update `status`, `store_credentials` or `daily_refresh` of a connection. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_put_with_http_info(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:param UpdateConnectionRequestBody body:
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connections_connection_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `connections_connection_id_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connection_id'] = params['connection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['app_id', 'secret'] # noqa: E501
return self.api_client.call_api(
'/connections/{connection_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConnectionResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connections_connection_id_reconnect_put(self, connection_id, **kwargs): # noqa: E501
"""Reconnect a connection # noqa: E501
In order to [reconnect](#connections-reconnect) a connection, your app needs to send the credentials object, connection's `id`, [consent object](#consents-object) and/or [attempt object](#attempts-object). This means that the consent confirmation should be handled on the client's side, and the 'access terms' the customer agreed on should be passed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_reconnect_put(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:param ReconnectConnectionRequestBody body:
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connections_connection_id_reconnect_put_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.connections_connection_id_reconnect_put_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def connections_connection_id_reconnect_put_with_http_info(self, connection_id, **kwargs): # noqa: E501
"""Reconnect a connection # noqa: E501
In order to [reconnect](#connections-reconnect) a connection, your app needs to send the credentials object, connection's `id`, [consent object](#consents-object) and/or [attempt object](#attempts-object). This means that the consent confirmation should be handled on the client's side, and the 'access terms' the customer agreed on should be passed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_reconnect_put_with_http_info(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:param ReconnectConnectionRequestBody body:
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connections_connection_id_reconnect_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `connections_connection_id_reconnect_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connection_id'] = params['connection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['app_id', 'secret'] # noqa: E501
return self.api_client.call_api(
'/connections/{connection_id}/reconnect', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConnectionResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connections_connection_id_refresh_put(self, connection_id, **kwargs): # noqa: E501
"""Refresh a connection # noqa: E501
Allows you to trigger a refetch of the data associated with a specific connection. Note that you can refresh a connection only if it has an active [consent](#consents). If the response is successful, it will contain the `next_refresh_possible_at` value, and you can expect the [usual callbacks](#callbacks) of the fetching workflow. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_refresh_put(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:param RefreshConnectionRequestBody body:
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connections_connection_id_refresh_put_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.connections_connection_id_refresh_put_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def connections_connection_id_refresh_put_with_http_info(self, connection_id, **kwargs): # noqa: E501
"""Refresh a connection # noqa: E501
Allows you to trigger a refetch of the data associated with a specific connection. Note that you can refresh a connection only if it has an active [consent](#consents). If the response is successful, it will contain the `next_refresh_possible_at` value, and you can expect the [usual callbacks](#callbacks) of the fetching workflow. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_connection_id_refresh_put_with_http_info(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str connection_id: (required)
:param RefreshConnectionRequestBody body:
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connections_connection_id_refresh_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `connections_connection_id_refresh_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connection_id'] = params['connection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['app_id', 'secret'] # noqa: E501
return self.api_client.call_api(
'/connections/{connection_id}/refresh', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConnectionResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connections_get(self, customer_id, **kwargs): # noqa: E501
"""List of connections # noqa: E501
Returns all the connections accessible to your application for a certain customer. The connections are sorted in ascending order of their `id`, so the newest connections will come last. We recommend you fetch the whole list of connections to check whether any of the properties have changed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_get(customer_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: (required)
:param str from_id:
:return: ConnectionsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connections_get_with_http_info(customer_id, **kwargs) # noqa: E501
else:
(data) = self.connections_get_with_http_info(customer_id, **kwargs) # noqa: E501
return data
def connections_get_with_http_info(self, customer_id, **kwargs): # noqa: E501
"""List of connections # noqa: E501
Returns all the connections accessible to your application for a certain customer. The connections are sorted in ascending order of their `id`, so the newest connections will come last. We recommend you fetch the whole list of connections to check whether any of the properties have changed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_get_with_http_info(customer_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: (required)
:param str from_id:
:return: ConnectionsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'from_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connections_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `connections_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'customer_id' in params:
query_params.append(('customer_id', params['customer_id'])) # noqa: E501
if 'from_id' in params:
query_params.append(('from_id', params['from_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['app_id', 'secret'] # noqa: E501
return self.api_client.call_api(
'/connections', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConnectionsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connections_post(self, **kwargs): # noqa: E501
"""Create a connection # noqa: E501
When not using [Salt Edge Connect](#salt_edge_connect), your app will have to pass the user's values of provider's [fields](#providers-fields) within the payload. The credentials object should be modeled after the provider's fields. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateConnectionRequestBody body:
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connections_post_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.connections_post_with_http_info(**kwargs) # noqa: E501
return data
def connections_post_with_http_info(self, **kwargs): # noqa: E501
"""Create a connection # noqa: E501
When not using [Salt Edge Connect](#salt_edge_connect), your app will have to pass the user's values of provider's [fields](#providers-fields) within the payload. The credentials object should be modeled after the provider's fields. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connections_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateConnectionRequestBody body:
:return: ConnectionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connections_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['app_id', 'secret'] # noqa: E501
return self.api_client.call_api(
'/connections', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConnectionResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo_config import cfg
import unittest2
from st2common.constants.action import LIBS_DIR as ACTION_LIBS_DIR
from st2actions.container.service import RunnerContainerService
from st2tests import config as tests_config
class RunnerContainerServiceTest(unittest2.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_get_pack_base_path(self):
orig_path = cfg.CONF.content.system_packs_base_path
cfg.CONF.content.system_packs_base_path = '/tests/packs'
names = [
'test_pack_1',
'test_pack_2',
'ma_pack'
]
for name in names:
actual = RunnerContainerService().get_pack_base_path(pack_name=name)
expected = os.path.join(cfg.CONF.content.system_packs_base_path,
name)
self.assertEqual(actual, expected)
cfg.CONF.content.system_packs_base_path = orig_path
def test_get_entry_point_absolute_path(self):
service = RunnerContainerService()
orig_path = cfg.CONF.content.system_packs_base_path
cfg.CONF.content.system_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo',
entry_point='/tests/packs/foo/bar.py')
self.assertEqual(acutal_path, '/tests/packs/foo/bar.py', 'Entry point path doesn\'t match.')
cfg.CONF.content.system_packs_base_path = orig_path
def test_get_entry_point_absolute_path_empty(self):
service = RunnerContainerService()
orig_path = cfg.CONF.content.system_packs_base_path
cfg.CONF.content.system_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point=None)
self.assertEqual(acutal_path, None, 'Entry point path doesn\'t match.')
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='')
self.assertEqual(acutal_path, None, 'Entry point path doesn\'t match.')
cfg.CONF.content.system_packs_base_path = orig_path
def test_get_entry_point_relative_path(self):
service = RunnerContainerService()
orig_path = cfg.CONF.content.system_packs_base_path
cfg.CONF.content.system_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='foo/bar.py')
expected_path = os.path.join(cfg.CONF.content.system_packs_base_path, 'foo', 'actions',
'foo/bar.py')
self.assertEqual(acutal_path, expected_path, 'Entry point path doesn\'t match.')
cfg.CONF.content.system_packs_base_path = orig_path
def test_get_action_libs_abs_path(self):
service = RunnerContainerService()
orig_path = cfg.CONF.content.system_packs_base_path
cfg.CONF.content.system_packs_base_path = '/tests/packs'
# entry point relative.
acutal_path = service.get_action_libs_abs_path(pack='foo', entry_point='foo/bar.py')
expected_path = os.path.join(cfg.CONF.content.system_packs_base_path, 'foo', 'actions',
os.path.join('foo', ACTION_LIBS_DIR))
self.assertEqual(acutal_path, expected_path, 'Action libs path doesn\'t match.')
# entry point absolute.
acutal_path = service.get_action_libs_abs_path(pack='foo',
entry_point='/tests/packs/foo/tmp/foo.py')
expected_path = os.path.join('/tests/packs/foo/tmp', ACTION_LIBS_DIR)
self.assertEqual(acutal_path, expected_path, 'Action libs path doesn\'t match.')
cfg.CONF.content.system_packs_base_path = orig_path | unknown | codeparrot/codeparrot-clean | ||
# settings file for builds.
# if you want to have custom builds, copy this file to "localbuildsettings.py" and make changes there.
# possible fields:
# resourceBaseUrl - optional - the URL base for external resources (all resources embedded in standard IITC)
# distUrlBase - optional - the base URL to use for update checks
# buildMobile - optional - if set, mobile builds are built with 'ant'. requires the Android SDK and appropriate mobile/local.properties file configured
# preBuild - optional - an array of strings to run as commands, via os.system, before building the scripts
# postBuild - optional - an array of string to run as commands, via os.system, after all builds are complete
buildSettings = {
# local: use this build if you're not modifying external resources
# no external resources allowed - they're not needed any more
'local': {
'resourceUrlBase': None,
'distUrlBase': None,
},
# local8000: if you need to modify external resources, this build will load them from
# the web server at http://0.0.0.0:8000/dist
# (This shouldn't be required any more - all resources are embedded. but, it remains just in case some new feature
# needs external resources)
'local8000': {
'resourceUrlBase': 'http://0.0.0.0:8000/dist',
'distUrlBase': None,
},
# mobile: default entry that also builds the mobile .apk
# you will need to have the android-sdk installed, and the file mobile/local.properties created as required
'mobile': {
'resourceUrlBase': None,
'distUrlBase': None,
'buildMobile': 'debug',
},
# if you want to publish your own fork of the project, and host it on your own web site
# create a localbuildsettings.py file containing something similar to this
# note: Firefox+Greasemonkey require the distUrlBase to be "https" - they won't check for updates on regular "http" URLs
#'example': {
# 'resourceBaseUrl': 'http://www.example.com/iitc/dist',
# 'distUrlBase': 'https://secure.example.com/iitc/dist',
#},
}
# defaultBuild - the name of the default build to use if none is specified on the build.py command line
# (in here as an example - it only works in localbuildsettings.py)
#defaultBuild = 'local' | unknown | codeparrot/codeparrot-clean | ||
'''
This file contains all the necessary functions for encrypting the message
'''
#import the necessary files
import sys, pickle, ReadFromFile
DEFAULT = 128 #default block size
BYTE_SIZE = 256 #One byte has 256 different values according to ASCII
'''
Convert the message in the block form
'''
def GetBlocksFromText(message, blockSize = DEFAULT):
#encode the message from string to ascii bytes
messageBytes = message.encode('ascii')
blockInteger = []
#do it block-by-block
for start in range (0, len(messageBytes), blockSize):
blockInt = 0
#use the formula (current letter x 256^index of the letter)
#to convert normal ascii number to a big value
for i in range (start, min(start + blockSize, len(messageBytes))):
blockInt = blockInt + messageBytes[i] * (BYTE_SIZE ** (i % blockSize))
blockInteger.append(blockInt)
return blockInteger
'''
Encrypt the message block using the formula c = (m ^ e) % n
'''
def EncryptMessage(message, key, blockSize = DEFAULT):
encrBlock = []
n, e = key
for block in GetBlocksFromText(message, blockSize):
encrBlock.append(pow(block, e, n))
return encrBlock
'''
Get the key file and check whether the block size is equal to or less than key size
'''
def Encrypt(message, filename, keyFile, blockSize = DEFAULT):
#imported file
keySize, n, e = ReadFromFile.ReadFromFile(keyFile)
#check for block size
if keySize < blockSize * 8:
sys.exit("ERROR: Block size is %s-bits and key size is %s-bits. RSA cipher requires the block size to be equal to or less than the key size."%(blockSize * 8, keySize))
#encrypted block
encrypted_block = EncryptMessage(message, (n, e), blockSize)
#convert the bytes form of message to string
for i in range (len(encrypted_block)):
encrypted_block[i] = str(encrypted_block[i])
#if the message was greater than one block
encrypted_content = ','.join(encrypted_block)
encrypted_content = '%s_%s_%s'%(len(message), blockSize, encrypted_content)
#write the encrypted message to a file
try:
with open(filename, 'wb') as encrpM:
pickle.dump(encrypted_content, encrpM)
except pickle.PickleError as pk:
print ("File Error: " + str(pk))
return encrypted_content | unknown | codeparrot/codeparrot-clean | ||
import {
Comment,
type Component,
type ComponentInternalInstance,
type DirectiveBinding,
Fragment,
type FunctionalComponent,
Static,
Text,
type VNode,
type VNodeArrayChildren,
type VNodeProps,
mergeProps,
ssrUtils,
warn,
} from 'vue'
import {
NOOP,
ShapeFlags,
escapeHtml,
escapeHtmlComment,
isArray,
isFunction,
isPromise,
isString,
isVoidTag,
} from '@vue/shared'
import { ssrRenderAttrs } from './helpers/ssrRenderAttrs'
import { ssrCompile } from './helpers/ssrCompile'
import { ssrRenderTeleport } from './helpers/ssrRenderTeleport'
const {
createComponentInstance,
setCurrentRenderingInstance,
setupComponent,
renderComponentRoot,
normalizeVNode,
pushWarningContext,
popWarningContext,
} = ssrUtils
export type SSRBuffer = SSRBufferItem[] & { hasAsync?: boolean }
export type SSRBufferItem = string | SSRBuffer | Promise<SSRBuffer>
export type PushFn = (item: SSRBufferItem) => void
export type Props = Record<string, unknown>
export type SSRContext = {
[key: string]: any
teleports?: Record<string, string>
/**
* @internal
*/
__teleportBuffers?: Record<string, SSRBuffer>
/**
* @internal
*/
__watcherHandles?: (() => void)[]
}
// Each component has a buffer array.
// A buffer array can contain one of the following:
// - plain string
// - A resolved buffer (recursive arrays of strings that can be unrolled
// synchronously)
// - An async buffer (a Promise that resolves to a resolved buffer)
export function createBuffer() {
let appendable = false
const buffer: SSRBuffer = []
return {
getBuffer(): SSRBuffer {
// Return static buffer and await on items during unroll stage
return buffer
},
push(item: SSRBufferItem): void {
const isStringItem = isString(item)
if (appendable && isStringItem) {
buffer[buffer.length - 1] += item as string
return
}
buffer.push(item)
appendable = isStringItem
if (isPromise(item) || (isArray(item) && item.hasAsync)) {
// promise, or child buffer with async, mark as async.
// this allows skipping unnecessary await ticks during unroll stage
buffer.hasAsync = true
}
},
}
}
export function renderComponentVNode(
vnode: VNode,
parentComponent: ComponentInternalInstance | null = null,
slotScopeId?: string,
): SSRBuffer | Promise<SSRBuffer> {
const instance = (vnode.component = createComponentInstance(
vnode,
parentComponent,
null,
))
if (__DEV__) pushWarningContext(vnode)
const res = setupComponent(instance, true /* isSSR */)
if (__DEV__) popWarningContext()
const hasAsyncSetup = isPromise(res)
let prefetches = instance.sp /* LifecycleHooks.SERVER_PREFETCH */
if (hasAsyncSetup || prefetches) {
const p: Promise<unknown> = Promise.resolve(res as Promise<void>)
.then(() => {
// instance.sp may be null until an async setup resolves, so evaluate it here
if (hasAsyncSetup) prefetches = instance.sp
if (prefetches) {
return Promise.all(
prefetches.map(prefetch => prefetch.call(instance.proxy)),
)
}
})
// Note: error display is already done by the wrapped lifecycle hook function.
.catch(NOOP)
return p.then(() => renderComponentSubTree(instance, slotScopeId))
} else {
return renderComponentSubTree(instance, slotScopeId)
}
}
function renderComponentSubTree(
instance: ComponentInternalInstance,
slotScopeId?: string,
): SSRBuffer | Promise<SSRBuffer> {
if (__DEV__) pushWarningContext(instance.vnode)
const comp = instance.type as Component
const { getBuffer, push } = createBuffer()
if (isFunction(comp)) {
let root = renderComponentRoot(instance)
// #5817 scope ID attrs not falling through if functional component doesn't
// have props
if (!(comp as FunctionalComponent).props) {
for (const key in instance.attrs) {
if (key.startsWith(`data-v-`)) {
;(root.props || (root.props = {}))[key] = ``
}
}
}
renderVNode(push, (instance.subTree = root), instance, slotScopeId)
} else {
if (
(!instance.render || instance.render === NOOP) &&
!instance.ssrRender &&
!comp.ssrRender &&
isString(comp.template)
) {
comp.ssrRender = ssrCompile(comp.template, instance)
}
const ssrRender = instance.ssrRender || comp.ssrRender
if (ssrRender) {
// optimized
// resolve fallthrough attrs
let attrs = instance.inheritAttrs !== false ? instance.attrs : undefined
let hasCloned = false
let cur = instance
while (true) {
const scopeId = cur.vnode.scopeId
if (scopeId) {
if (!hasCloned) {
attrs = { ...attrs }
hasCloned = true
}
attrs![scopeId] = ''
}
const parent = cur.parent
if (parent && parent.subTree && parent.subTree === cur.vnode) {
// parent is a non-SSR compiled component and is rendering this
// component as root. inherit its scopeId if present.
cur = parent
} else {
break
}
}
if (slotScopeId) {
if (!hasCloned) attrs = { ...attrs }
const slotScopeIdList = slotScopeId.trim().split(' ')
for (let i = 0; i < slotScopeIdList.length; i++) {
attrs![slotScopeIdList[i]] = ''
}
}
// set current rendering instance for asset resolution
const prev = setCurrentRenderingInstance(instance)
try {
ssrRender(
instance.proxy,
push,
instance,
attrs,
// compiler-optimized bindings
instance.props,
instance.setupState,
instance.data,
instance.ctx,
)
} finally {
setCurrentRenderingInstance(prev)
}
} else if (instance.render && instance.render !== NOOP) {
renderVNode(
push,
(instance.subTree = renderComponentRoot(instance)),
instance,
slotScopeId,
)
} else {
const componentName = comp.name || comp.__file || `<Anonymous>`
warn(`Component ${componentName} is missing template or render function.`)
push(`<!---->`)
}
}
if (__DEV__) popWarningContext()
return getBuffer()
}
export function renderVNode(
push: PushFn,
vnode: VNode,
parentComponent: ComponentInternalInstance,
slotScopeId?: string,
): void {
const { type, shapeFlag, children, dirs, props } = vnode
if (dirs) {
vnode.props = applySSRDirectives(vnode, props, dirs)
}
switch (type) {
case Text:
push(escapeHtml(children as string))
break
case Comment:
push(
children
? `<!--${escapeHtmlComment(children as string)}-->`
: `<!---->`,
)
break
case Static:
push(children as string)
break
case Fragment:
if (vnode.slotScopeIds) {
slotScopeId =
(slotScopeId ? slotScopeId + ' ' : '') + vnode.slotScopeIds.join(' ')
}
push(`<!--[-->`) // open
renderVNodeChildren(
push,
children as VNodeArrayChildren,
parentComponent,
slotScopeId,
)
push(`<!--]-->`) // close
break
default:
if (shapeFlag & ShapeFlags.ELEMENT) {
renderElementVNode(push, vnode, parentComponent, slotScopeId)
} else if (shapeFlag & ShapeFlags.COMPONENT) {
push(renderComponentVNode(vnode, parentComponent, slotScopeId))
} else if (shapeFlag & ShapeFlags.TELEPORT) {
renderTeleportVNode(push, vnode, parentComponent, slotScopeId)
} else if (shapeFlag & ShapeFlags.SUSPENSE) {
renderVNode(push, vnode.ssContent!, parentComponent, slotScopeId)
} else {
warn(
'[@vue/server-renderer] Invalid VNode type:',
type,
`(${typeof type})`,
)
}
}
}
export function renderVNodeChildren(
push: PushFn,
children: VNodeArrayChildren,
parentComponent: ComponentInternalInstance,
slotScopeId?: string,
): void {
for (let i = 0; i < children.length; i++) {
renderVNode(push, normalizeVNode(children[i]), parentComponent, slotScopeId)
}
}
function renderElementVNode(
push: PushFn,
vnode: VNode,
parentComponent: ComponentInternalInstance,
slotScopeId?: string,
) {
const tag = vnode.type as string
let { props, children, shapeFlag, scopeId } = vnode
let openTag = `<${tag}`
if (props) {
openTag += ssrRenderAttrs(props, tag)
}
if (scopeId) {
openTag += ` ${scopeId}`
}
// inherit parent chain scope id if this is the root node
let curParent: ComponentInternalInstance | null = parentComponent
let curVnode = vnode
while (curParent && curVnode === curParent.subTree) {
curVnode = curParent.vnode
if (curVnode.scopeId) {
openTag += ` ${curVnode.scopeId}`
}
curParent = curParent.parent
}
if (slotScopeId) {
openTag += ` ${slotScopeId}`
}
push(openTag + `>`)
if (!isVoidTag(tag)) {
let hasChildrenOverride = false
if (props) {
if (props.innerHTML) {
hasChildrenOverride = true
push(props.innerHTML)
} else if (props.textContent) {
hasChildrenOverride = true
push(escapeHtml(props.textContent))
} else if (tag === 'textarea' && props.value) {
hasChildrenOverride = true
push(escapeHtml(props.value))
}
}
if (!hasChildrenOverride) {
if (shapeFlag & ShapeFlags.TEXT_CHILDREN) {
push(escapeHtml(children as string))
} else if (shapeFlag & ShapeFlags.ARRAY_CHILDREN) {
renderVNodeChildren(
push,
children as VNodeArrayChildren,
parentComponent,
slotScopeId,
)
}
}
push(`</${tag}>`)
}
}
function applySSRDirectives(
vnode: VNode,
rawProps: VNodeProps | null,
dirs: DirectiveBinding[],
): VNodeProps {
const toMerge: VNodeProps[] = []
for (let i = 0; i < dirs.length; i++) {
const binding = dirs[i]
const {
dir: { getSSRProps },
} = binding
if (getSSRProps) {
const props = getSSRProps(binding, vnode)
if (props) toMerge.push(props)
}
}
return mergeProps(rawProps || {}, ...toMerge)
}
function renderTeleportVNode(
push: PushFn,
vnode: VNode,
parentComponent: ComponentInternalInstance,
slotScopeId?: string,
) {
const target = vnode.props && vnode.props.to
const disabled = vnode.props && vnode.props.disabled
if (!target) {
if (!disabled) {
warn(`[@vue/server-renderer] Teleport is missing target prop.`)
}
return []
}
if (!isString(target)) {
warn(
`[@vue/server-renderer] Teleport target must be a query selector string.`,
)
return []
}
ssrRenderTeleport(
push,
push => {
renderVNodeChildren(
push,
vnode.children as VNodeArrayChildren,
parentComponent,
slotScopeId,
)
},
target,
disabled || disabled === '',
parentComponent,
)
} | typescript | github | https://github.com/vuejs/core | packages/server-renderer/src/render.ts |
#ifndef INTERNAL_RANDOM_H /*-*-C-*-vi:se ft=c:*/
#define INTERNAL_RANDOM_H
/**
* @author Ruby developers <ruby-core@ruby-lang.org>
* @copyright This file is a part of the programming language Ruby.
* Permission is hereby granted, to either redistribute and/or
* modify this file, provided that the conditions mentioned in the
* file COPYING are met. Consult the file for details.
* @brief Internal header for Random.
*/
#include <stddef.h> /* for size_t */
/* random.c */
int ruby_fill_random_bytes(void *, size_t, int);
void rb_free_default_rand_key(void);
#endif /* INTERNAL_RANDOM_H */ | c | github | https://github.com/ruby/ruby | internal/random.h |
import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0) | unknown | codeparrot/codeparrot-clean | ||
"""Test rate limiter."""
import time
import pytest
from freezegun import freeze_time
from langchain_core.rate_limiters import InMemoryRateLimiter
@pytest.fixture
def rate_limiter() -> InMemoryRateLimiter:
"""Return an instance of InMemoryRateLimiter."""
return InMemoryRateLimiter(
requests_per_second=2, check_every_n_seconds=0.1, max_bucket_size=2
)
def test_initial_state(rate_limiter: InMemoryRateLimiter) -> None:
"""Test the initial state of the rate limiter."""
assert rate_limiter.available_tokens == 0.0
def test_sync_wait(rate_limiter: InMemoryRateLimiter) -> None:
with freeze_time("2023-01-01 00:00:00") as frozen_time:
rate_limiter.last = time.time()
assert not rate_limiter.acquire(blocking=False)
frozen_time.tick(0.1) # Increment by 0.1 seconds
assert rate_limiter.available_tokens == 0
assert not rate_limiter.acquire(blocking=False)
frozen_time.tick(0.1) # Increment by 0.1 seconds
assert rate_limiter.available_tokens == 0
assert not rate_limiter.acquire(blocking=False)
frozen_time.tick(1.8)
assert rate_limiter.acquire(blocking=False)
assert rate_limiter.available_tokens == 1.0
assert rate_limiter.acquire(blocking=False)
assert rate_limiter.available_tokens == 0
frozen_time.tick(2.1)
assert rate_limiter.acquire(blocking=False)
assert rate_limiter.available_tokens == 1
frozen_time.tick(0.9)
assert rate_limiter.acquire(blocking=False)
assert rate_limiter.available_tokens == 1
# Check max bucket size
frozen_time.tick(100)
assert rate_limiter.acquire(blocking=False)
assert rate_limiter.available_tokens == 1
async def test_async_wait(rate_limiter: InMemoryRateLimiter) -> None:
with freeze_time("2023-01-01 00:00:00") as frozen_time:
rate_limiter.last = time.time()
assert not await rate_limiter.aacquire(blocking=False)
frozen_time.tick(0.1) # Increment by 0.1 seconds
assert rate_limiter.available_tokens == 0
assert not await rate_limiter.aacquire(blocking=False)
frozen_time.tick(0.1) # Increment by 0.1 seconds
assert rate_limiter.available_tokens == 0
assert not await rate_limiter.aacquire(blocking=False)
frozen_time.tick(1.8)
assert await rate_limiter.aacquire(blocking=False)
assert rate_limiter.available_tokens == 1.0
assert await rate_limiter.aacquire(blocking=False)
assert rate_limiter.available_tokens == 0
frozen_time.tick(2.1)
assert await rate_limiter.aacquire(blocking=False)
assert rate_limiter.available_tokens == 1
frozen_time.tick(0.9)
assert await rate_limiter.aacquire(blocking=False)
assert rate_limiter.available_tokens == 1
def test_sync_wait_max_bucket_size() -> None:
with freeze_time("2023-01-01 00:00:00") as frozen_time:
rate_limiter = InMemoryRateLimiter(
requests_per_second=2, check_every_n_seconds=0.1, max_bucket_size=500
)
rate_limiter.last = time.time()
frozen_time.tick(100) # Increment by 100 seconds
assert rate_limiter.acquire(blocking=False)
# After 100 seconds we manage to refill the bucket with 200 tokens
# After consuming 1 token, we should have 199 tokens left
assert rate_limiter.available_tokens == 199.0
frozen_time.tick(10000)
assert rate_limiter.acquire(blocking=False)
assert rate_limiter.available_tokens == 499.0
# Assert that sync wait can proceed without blocking
# since we have enough tokens
rate_limiter.acquire(blocking=True)
async def test_async_wait_max_bucket_size() -> None:
with freeze_time("2023-01-01 00:00:00") as frozen_time:
rate_limiter = InMemoryRateLimiter(
requests_per_second=2, check_every_n_seconds=0.1, max_bucket_size=500
)
rate_limiter.last = time.time()
frozen_time.tick(100) # Increment by 100 seconds
assert await rate_limiter.aacquire(blocking=False)
# After 100 seconds we manage to refill the bucket with 200 tokens
# After consuming 1 token, we should have 199 tokens left
assert rate_limiter.available_tokens == 199.0
frozen_time.tick(10000)
assert await rate_limiter.aacquire(blocking=False)
assert rate_limiter.available_tokens == 499.0
# Assert that sync wait can proceed without blocking
# since we have enough tokens
await rate_limiter.aacquire(blocking=True) | python | github | https://github.com/langchain-ai/langchain | libs/core/tests/unit_tests/rate_limiters/test_in_memory_rate_limiter.py |
"""
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
def parse(source, filename='<unknown>', mode='exec', *,
type_comments=False, feature_version=None, optimize=-1, module=None):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
Pass type_comments=True to get back type comments where the syntax allows.
"""
flags = PyCF_ONLY_AST
if optimize > 0:
flags |= PyCF_OPTIMIZED_AST
if type_comments:
flags |= PyCF_TYPE_COMMENTS
if feature_version is None:
feature_version = -1
elif isinstance(feature_version, tuple):
major, minor = feature_version # Should be a 2-tuple.
if major != 3:
raise ValueError(f"Unsupported major version: {major}")
feature_version = minor
# Else it should be an int giving the minor version for 3.x.
return compile(source, filename, mode, flags,
_feature_version=feature_version, optimize=optimize,
module=module)
def literal_eval(node_or_string):
"""
Evaluate an expression node or a string containing only a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
sets, booleans, and None.
Caution: A complex expression can overflow the C stack and cause a crash.
"""
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string.lstrip(" \t"), mode='eval').body
elif isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
return _convert_literal(node_or_string)
def _convert_literal(node):
"""
Used by `literal_eval` to convert an AST node into a value.
"""
if isinstance(node, Constant):
return node.value
if isinstance(node, Dict) and len(node.keys) == len(node.values):
return dict(zip(
map(_convert_literal, node.keys),
map(_convert_literal, node.values),
))
if isinstance(node, Tuple):
return tuple(map(_convert_literal, node.elts))
if isinstance(node, List):
return list(map(_convert_literal, node.elts))
if isinstance(node, Set):
return set(map(_convert_literal, node.elts))
if (
isinstance(node, Call) and isinstance(node.func, Name)
and node.func.id == 'set' and node.args == node.keywords == []
):
return set()
if (
isinstance(node, UnaryOp)
and isinstance(node.op, (UAdd, USub))
and isinstance(node.operand, Constant)
and type(operand := node.operand.value) in (int, float, complex)
):
if isinstance(node.op, UAdd):
return + operand
else:
return - operand
if (
isinstance(node, BinOp)
and isinstance(node.op, (Add, Sub))
and isinstance(node.left, (Constant, UnaryOp))
and isinstance(node.right, Constant)
and type(left := _convert_literal(node.left)) in (int, float)
and type(right := _convert_literal(node.right)) is complex
):
if isinstance(node.op, Add):
return left + right
else:
return left - right
msg = "malformed node or string"
if lno := getattr(node, 'lineno', None):
msg += f' on line {lno}'
raise ValueError(msg + f': {node!r}')
def dump(
node, annotate_fields=True, include_attributes=False,
*,
indent=None, show_empty=False,
):
"""
Return a formatted dump of the tree in node. This is mainly useful for
debugging purposes. If annotate_fields is true (by default),
the returned string will show the names and the values for fields.
If annotate_fields is false, the result string will be more compact by
omitting unambiguous field names. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
include_attributes can be set to true. If indent is a non-negative
integer or string, then the tree will be pretty-printed with that indent
level. None (the default) selects the single line representation.
If show_empty is False, then empty lists and fields that are None
will be omitted from the output for better readability.
"""
def _format(node, level=0):
if indent is not None:
level += 1
prefix = '\n' + indent * level
sep = ',\n' + indent * level
else:
prefix = ''
sep = ', '
if isinstance(node, AST):
cls = type(node)
args = []
args_buffer = []
allsimple = True
keywords = annotate_fields
for name in node._fields:
try:
value = getattr(node, name)
except AttributeError:
keywords = True
continue
if value is None and getattr(cls, name, ...) is None:
keywords = True
continue
if not show_empty:
if value == []:
field_type = cls._field_types.get(name, object)
if getattr(field_type, '__origin__', ...) is list:
if not keywords:
args_buffer.append(repr(value))
continue
elif isinstance(value, Load):
field_type = cls._field_types.get(name, object)
if field_type is expr_context:
if not keywords:
args_buffer.append(repr(value))
continue
if not keywords:
args.extend(args_buffer)
args_buffer = []
value, simple = _format(value, level)
allsimple = allsimple and simple
if keywords:
args.append('%s=%s' % (name, value))
else:
args.append(value)
if include_attributes and node._attributes:
for name in node._attributes:
try:
value = getattr(node, name)
except AttributeError:
continue
if value is None and getattr(cls, name, ...) is None:
continue
value, simple = _format(value, level)
allsimple = allsimple and simple
args.append('%s=%s' % (name, value))
if allsimple and len(args) <= 3:
return '%s(%s)' % (node.__class__.__name__, ', '.join(args)), not args
return '%s(%s%s)' % (node.__class__.__name__, prefix, sep.join(args)), False
elif isinstance(node, list):
if not node:
return '[]', True
return '[%s%s]' % (prefix, sep.join(_format(x, level)[0] for x in node)), False
return repr(node), True
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
if indent is not None and not isinstance(indent, str):
indent = ' ' * indent
return _format(node)[0]
def copy_location(new_node, old_node):
"""
Copy source location (`lineno`, `col_offset`, `end_lineno`, and `end_col_offset`
attributes) from *old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset', 'end_lineno', 'end_col_offset':
if attr in old_node._attributes and attr in new_node._attributes:
value = getattr(old_node, attr, None)
# end_lineno and end_col_offset are optional attributes, and they
# should be copied whether the value is None or not.
if value is not None or (
hasattr(old_node, attr) and attr.startswith("end_")
):
setattr(new_node, attr, value)
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset, end_lineno, end_col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'end_lineno' in node._attributes:
if getattr(node, 'end_lineno', None) is None:
node.end_lineno = end_lineno
else:
end_lineno = node.end_lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
if 'end_col_offset' in node._attributes:
if getattr(node, 'end_col_offset', None) is None:
node.end_col_offset = end_col_offset
else:
end_col_offset = node.end_col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset, end_lineno, end_col_offset)
_fix(node, 1, 0, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number and end line number of each node in the tree
starting at *node* by *n*. This is useful to "move code" to a different
location in a file.
"""
for child in walk(node):
# TypeIgnore is a special case where lineno is not an attribute
# but rather a field of the node itself.
if isinstance(child, TypeIgnore):
child.lineno = getattr(child, 'lineno', 0) + n
continue
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
if (
"end_lineno" in child._attributes
and (end_lineno := getattr(child, "end_lineno", 0)) is not None
):
child.end_lineno = end_lineno + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
If *clean* is `True`, all tabs are expanded to spaces and any whitespace
that can be uniformly removed from the second line onwards is removed.
"""
if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if not(node.body and isinstance(node.body[0], Expr)):
return None
node = node.body[0].value
if isinstance(node, Constant) and isinstance(node.value, str):
text = node.value
else:
return None
if clean:
import inspect
text = inspect.cleandoc(text)
return text
_line_pattern = None
def _splitlines_no_ff(source, maxlines=None):
"""Split a string into lines ignoring form feed and other chars.
This mimics how the Python parser splits source code.
"""
global _line_pattern
if _line_pattern is None:
# lazily computed to speedup import time of `ast`
import re
_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
lines = []
for lineno, match in enumerate(_line_pattern.finditer(source), 1):
if maxlines is not None and lineno > maxlines:
break
lines.append(match[0])
return lines
def _pad_whitespace(source):
r"""Replace all chars except '\f\t' in a line with spaces."""
result = ''
for c in source:
if c in '\f\t':
result += c
else:
result += ' '
return result
def get_source_segment(source, node, *, padded=False):
"""Get source code segment of the *source* that generated *node*.
If some location information (`lineno`, `end_lineno`, `col_offset`,
or `end_col_offset`) is missing, return None.
If *padded* is `True`, the first line of a multi-line statement will
be padded with spaces to match its original position.
"""
try:
if node.end_lineno is None or node.end_col_offset is None:
return None
lineno = node.lineno - 1
end_lineno = node.end_lineno - 1
col_offset = node.col_offset
end_col_offset = node.end_col_offset
except AttributeError:
return None
lines = _splitlines_no_ff(source, maxlines=end_lineno+1)
if end_lineno == lineno:
return lines[lineno].encode()[col_offset:end_col_offset].decode()
if padded:
padding = _pad_whitespace(lines[lineno].encode()[:col_offset].decode())
else:
padding = ''
first = padding + lines[lineno].encode()[col_offset:].decode()
last = lines[end_lineno].encode()[:end_col_offset].decode()
lines = lines[lineno+1:end_lineno]
lines.insert(0, first)
lines.append(last)
return ''.join(lines)
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
def compare(
a,
b,
/,
*,
compare_attributes=False,
):
"""Recursively compares two ASTs.
compare_attributes affects whether AST attributes are considered
in the comparison. If compare_attributes is False (default), then
attributes are ignored. Otherwise they must all be equal. This
option is useful to check whether the ASTs are structurally equal but
might differ in whitespace or similar details.
"""
sentinel = object() # handle the possibility of a missing attribute/field
def _compare(a, b):
# Compare two fields on an AST object, which may themselves be
# AST objects, lists of AST objects, or primitive ASDL types
# like identifiers and constants.
if isinstance(a, AST):
return compare(
a,
b,
compare_attributes=compare_attributes,
)
elif isinstance(a, list):
# If a field is repeated, then both objects will represent
# the value as a list.
if len(a) != len(b):
return False
for a_item, b_item in zip(a, b):
if not _compare(a_item, b_item):
return False
else:
return True
else:
return type(a) is type(b) and a == b
def _compare_fields(a, b):
if a._fields != b._fields:
return False
for field in a._fields:
a_field = getattr(a, field, sentinel)
b_field = getattr(b, field, sentinel)
if a_field is sentinel and b_field is sentinel:
# both nodes are missing a field at runtime
continue
if a_field is sentinel or b_field is sentinel:
# one of the node is missing a field
return False
if not _compare(a_field, b_field):
return False
else:
return True
def _compare_attributes(a, b):
if a._attributes != b._attributes:
return False
# Attributes are always ints.
for attr in a._attributes:
a_attr = getattr(a, attr, sentinel)
b_attr = getattr(b, attr, sentinel)
if a_attr is sentinel and b_attr is sentinel:
# both nodes are missing an attribute at runtime
continue
if a_attr != b_attr:
return False
else:
return True
if type(a) is not type(b):
return False
if not _compare_fields(a, b):
return False
if compare_attributes and not _compare_attributes(a, b):
return False
return True
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return Subscript(
value=Name(id='data', ctx=Load()),
slice=Constant(value=node.id),
ctx=node.ctx
)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class slice(AST):
"""Deprecated AST node class."""
class Index(slice):
"""Deprecated AST node class. Use the index value directly instead."""
def __new__(cls, value, **kwargs):
return value
class ExtSlice(slice):
"""Deprecated AST node class. Use ast.Tuple instead."""
def __new__(cls, dims=(), **kwargs):
return Tuple(list(dims), Load(), **kwargs)
# If the ast module is loaded more than once, only add deprecated methods once
if not hasattr(Tuple, 'dims'):
# The following code is for backward compatibility.
# It will be removed in future.
def _dims_getter(self):
"""Deprecated. Use elts instead."""
return self.elts
def _dims_setter(self, value):
self.elts = value
Tuple.dims = property(_dims_getter, _dims_setter)
class Suite(mod):
"""Deprecated AST node class. Unused in Python 3."""
class AugLoad(expr_context):
"""Deprecated AST node class. Unused in Python 3."""
class AugStore(expr_context):
"""Deprecated AST node class. Unused in Python 3."""
class Param(expr_context):
"""Deprecated AST node class. Unused in Python 3."""
def unparse(ast_obj):
global _Unparser
try:
unparser = _Unparser()
except NameError:
from _ast_unparse import Unparser as _Unparser
unparser = _Unparser()
return unparser.visit(ast_obj)
def main(args=None):
import argparse
import sys
parser = argparse.ArgumentParser(color=True)
parser.add_argument('infile', nargs='?', default='-',
help='the file to parse; defaults to stdin')
parser.add_argument('-m', '--mode', default='exec',
choices=('exec', 'single', 'eval', 'func_type'),
help='specify what kind of code must be parsed')
parser.add_argument('--no-type-comments', default=True, action='store_false',
help="don't add information about type comments")
parser.add_argument('-a', '--include-attributes', action='store_true',
help='include attributes such as line numbers and '
'column offsets')
parser.add_argument('-i', '--indent', type=int, default=3,
help='indentation of nodes (number of spaces)')
parser.add_argument('--feature-version',
type=str, default=None, metavar='VERSION',
help='Python version in the format 3.x '
'(for example, 3.10)')
parser.add_argument('-O', '--optimize',
type=int, default=-1, metavar='LEVEL',
help='optimization level for parser (default -1)')
parser.add_argument('--show-empty', default=False, action='store_true',
help='show empty lists and fields in dump output')
args = parser.parse_args(args)
if args.infile == '-':
name = '<stdin>'
source = sys.stdin.buffer.read()
else:
name = args.infile
with open(args.infile, 'rb') as infile:
source = infile.read()
# Process feature_version
feature_version = None
if args.feature_version:
try:
major, minor = map(int, args.feature_version.split('.', 1))
except ValueError:
parser.error('Invalid format for --feature-version; '
'expected format 3.x (for example, 3.10)')
feature_version = (major, minor)
tree = parse(source, name, args.mode, type_comments=args.no_type_comments,
feature_version=feature_version, optimize=args.optimize)
print(dump(tree, include_attributes=args.include_attributes,
indent=args.indent, show_empty=args.show_empty))
if __name__ == '__main__':
main() | python | github | https://github.com/python/cpython | Lib/ast.py |
.sk-global {
/* Definition of color scheme common for light and dark mode */
--sklearn-color-text: #000;
--sklearn-color-text-muted: #666;
--sklearn-color-line: gray;
/* Definition of color scheme for unfitted estimators */
--sklearn-color-unfitted-level-0: #fff5e6;
--sklearn-color-unfitted-level-1: #f6e4d2;
--sklearn-color-unfitted-level-2: #ffe0b3;
--sklearn-color-unfitted-level-3: chocolate;
/* Definition of color scheme for fitted estimators */
--sklearn-color-fitted-level-0: #f0f8ff;
--sklearn-color-fitted-level-1: #d4ebff;
--sklearn-color-fitted-level-2: #b3dbfd;
--sklearn-color-fitted-level-3: cornflowerblue;
}
.sk-global.light {
/* Specific color for light theme */
--sklearn-color-text-on-default-background: black;
--sklearn-color-background: white;
--sklearn-color-border-box: black;
--sklearn-color-icon: #696969;
}
.sk-global.dark {
--sklearn-color-text-on-default-background: white;
--sklearn-color-background: #111;
--sklearn-color-border-box: white;
--sklearn-color-icon: #878787;
}
.sk-global {
color: var(--sklearn-color-text);
}
.sk-global pre {
padding: 0;
}
.sk-global input.sk-hidden--visually {
border: 0;
clip: rect(1px 1px 1px 1px);
clip: rect(1px, 1px, 1px, 1px);
height: 1px;
margin: -1px;
overflow: hidden;
padding: 0;
position: absolute;
width: 1px;
}
.sk-global div.sk-dashed-wrapped {
border: 1px dashed var(--sklearn-color-line);
margin: 0 0.4em 0.5em 0.4em;
box-sizing: border-box;
padding-bottom: 0.4em;
background-color: var(--sklearn-color-background);
}
.sk-global div.sk-container {
/* jupyter's `normalize.less` sets `[hidden] { display: none; }`
but bootstrap.min.css set `[hidden] { display: none !important; }`
so we also need the `!important` here to be able to override the
default hidden behavior on the sphinx rendered scikit-learn.org.
See: https://github.com/scikit-learn/scikit-learn/issues/21755 */
display: inline-block !important;
position: relative;
}
.sk-global div.sk-text-repr-fallback {
display: none;
}
div.sk-parallel-item,
div.sk-serial,
div.sk-item {
/* draw centered vertical line to link estimators */
background-image: linear-gradient(var(--sklearn-color-text-on-default-background), var(--sklearn-color-text-on-default-background));
background-size: 2px 100%;
background-repeat: no-repeat;
background-position: center center;
}
/* Parallel-specific style estimator block */
.sk-global div.sk-parallel-item::after {
content: "";
width: 100%;
border-bottom: 2px solid var(--sklearn-color-text-on-default-background);
flex-grow: 1;
}
.sk-global div.sk-parallel {
display: flex;
align-items: stretch;
justify-content: center;
background-color: var(--sklearn-color-background);
position: relative;
}
.sk-global div.sk-parallel-item {
display: flex;
flex-direction: column;
}
.sk-global div.sk-parallel-item:first-child::after {
align-self: flex-end;
width: 50%;
}
.sk-global div.sk-parallel-item:last-child::after {
align-self: flex-start;
width: 50%;
}
.sk-global div.sk-parallel-item:only-child::after {
width: 0;
}
/* Serial-specific style estimator block */
.sk-global div.sk-serial {
display: flex;
flex-direction: column;
align-items: center;
background-color: var(--sklearn-color-background);
padding-right: 1em;
padding-left: 1em;
}
/* Toggleable style: style used for estimator/Pipeline/ColumnTransformer box that is
clickable and can be expanded/collapsed.
- Pipeline and ColumnTransformer use this feature and define the default style
- Estimators will overwrite some part of the style using the `sk-estimator` class
*/
/* Pipeline and ColumnTransformer style (default) */
.sk-global div.sk-toggleable {
/* Default theme specific background. It is overwritten whether we have a
specific estimator or a Pipeline/ColumnTransformer */
background-color: var(--sklearn-color-background);
}
/* Toggleable label */
.sk-global label.sk-toggleable__label {
cursor: pointer;
display: flex;
width: 100%;
margin-bottom: 0;
padding: 0.5em;
box-sizing: border-box;
text-align: center;
align-items: center;
justify-content: center;
gap: 0.5em;
}
.sk-global label.sk-toggleable__label .caption {
font-size: 0.6rem;
font-weight: lighter;
color: var(--sklearn-color-text-muted);
}
.sk-global label.sk-toggleable__label-arrow:before {
/* Arrow on the left of the label */
content: "▸";
float: left;
margin-right: 0.25em;
color: var(--sklearn-color-icon);
}
.sk-global label.sk-toggleable__label-arrow:hover:before {
color: var(--sklearn-color-text);
}
/* Toggleable content - dropdown */
.sk-global div.sk-toggleable__content {
display: none;
text-align: left;
/* unfitted */
background-color: var(--sklearn-color-unfitted-level-0);
}
.sk-global div.sk-toggleable__content.fitted {
/* fitted */
background-color: var(--sklearn-color-fitted-level-0);
}
.sk-global div.sk-toggleable__content pre {
margin: 0.2em;
border-radius: 0.25em;
color: var(--sklearn-color-text);
/* unfitted */
background-color: var(--sklearn-color-unfitted-level-0);
}
.sk-global div.sk-toggleable__content.fitted pre {
/* unfitted */
background-color: var(--sklearn-color-fitted-level-0);
}
.sk-global input.sk-toggleable__control:checked~div.sk-toggleable__content {
/* Expand drop-down */
display: block;
width: 100%;
overflow: visible;
}
.sk-global input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {
content: "▾";
}
/* Pipeline/ColumnTransformer-specific style */
.sk-global div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {
color: var(--sklearn-color-text);
background-color: var(--sklearn-color-unfitted-level-2);
}
.sk-global div.sk-label.fitted input.sk-toggleable__control:checked~label.sk-toggleable__label {
background-color: var(--sklearn-color-fitted-level-2);
}
/* Estimator-specific style */
/* Colorize estimator box */
.sk-global div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {
/* unfitted */
background-color: var(--sklearn-color-unfitted-level-2);
}
.sk-global div.sk-estimator.fitted input.sk-toggleable__control:checked~label.sk-toggleable__label {
/* fitted */
background-color: var(--sklearn-color-fitted-level-2);
}
.sk-global div.sk-label label.sk-toggleable__label,
.sk-global div.sk-label label {
/* The background is the default theme color */
color: var(--sklearn-color-text-on-default-background);
}
/* On hover, darken the color of the background */
.sk-global div.sk-label:hover label.sk-toggleable__label {
color: var(--sklearn-color-text);
background-color: var(--sklearn-color-unfitted-level-2);
}
/* Label box, darken color on hover, fitted */
.sk-global div.sk-label.fitted:hover label.sk-toggleable__label.fitted {
color: var(--sklearn-color-text);
background-color: var(--sklearn-color-fitted-level-2);
}
/* Estimator label */
.sk-global div.sk-label label {
font-family: monospace;
font-weight: bold;
line-height: 1.2em;
}
.sk-global div.sk-label-container {
text-align: center;
}
/* Estimator-specific */
.sk-global div.sk-estimator {
font-family: monospace;
border: 1px dotted var(--sklearn-color-border-box);
border-radius: 0.25em;
box-sizing: border-box;
margin-bottom: 0.5em;
/* unfitted */
background-color: var(--sklearn-color-unfitted-level-0);
}
.sk-global div.sk-estimator.fitted {
/* fitted */
background-color: var(--sklearn-color-fitted-level-0);
}
/* on hover */
.sk-global div.sk-estimator:hover {
/* unfitted */
background-color: var(--sklearn-color-unfitted-level-2);
}
.sk-global div.sk-estimator.fitted:hover {
/* fitted */
background-color: var(--sklearn-color-fitted-level-2);
}
/* Specification for estimator info (e.g. "i" and "?") */
/* Common style for "i" and "?" */
.sk-estimator-doc-link,
a:link.sk-estimator-doc-link,
a:visited.sk-estimator-doc-link {
float: right;
font-size: smaller;
line-height: 1em;
font-family: monospace;
background-color: var(--sklearn-color-unfitted-level-0);
border-radius: 1em;
height: 1em;
width: 1em;
text-decoration: none !important;
margin-left: 0.5em;
text-align: center;
/* unfitted */
border: var(--sklearn-color-unfitted-level-3) 1pt solid;
color: var(--sklearn-color-unfitted-level-3);
}
.sk-estimator-doc-link.fitted,
a:link.sk-estimator-doc-link.fitted,
a:visited.sk-estimator-doc-link.fitted {
/* fitted */
background-color: var(--sklearn-color-fitted-level-0);
border: var(--sklearn-color-fitted-level-3) 1pt solid;
color: var(--sklearn-color-fitted-level-3);
}
/* On hover */
div.sk-estimator:hover .sk-estimator-doc-link:hover,
.sk-estimator-doc-link:hover,
div.sk-label-container:hover .sk-estimator-doc-link:hover,
.sk-estimator-doc-link:hover {
/* unfitted */
background-color: var(--sklearn-color-unfitted-level-3);
border: var(--sklearn-color-fitted-level-0) 1pt solid;
color: var(--sklearn-color-unfitted-level-0);
text-decoration: none;
}
div.sk-estimator.fitted:hover .sk-estimator-doc-link.fitted:hover,
.sk-estimator-doc-link.fitted:hover,
div.sk-label-container:hover .sk-estimator-doc-link.fitted:hover,
.sk-estimator-doc-link.fitted:hover {
/* fitted */
background-color: var(--sklearn-color-fitted-level-3);
border: var(--sklearn-color-fitted-level-0) 1pt solid;
color: var(--sklearn-color-fitted-level-0);
text-decoration: none;
}
/* Span, style for the box shown on hovering the info icon */
.sk-estimator-doc-link span {
display: none;
z-index: 9999;
position: relative;
font-weight: normal;
right: .2ex;
padding: .5ex;
margin: .5ex;
width: min-content;
min-width: 20ex;
max-width: 50ex;
color: var(--sklearn-color-text);
box-shadow: 2pt 2pt 4pt #999;
/* unfitted */
background: var(--sklearn-color-unfitted-level-0);
border: .5pt solid var(--sklearn-color-unfitted-level-3);
}
.sk-estimator-doc-link.fitted span {
/* fitted */
background: var(--sklearn-color-fitted-level-0);
border: var(--sklearn-color-fitted-level-3);
}
.sk-estimator-doc-link:hover span {
display: block;
}
/* "?"-specific style due to the `<a>` HTML tag */
.sk-global a.estimator_doc_link {
float: right;
font-size: 1rem;
line-height: 1em;
font-family: monospace;
background-color: var(--sklearn-color-unfitted-level-0);
border-radius: 1rem;
height: 1rem;
width: 1rem;
text-decoration: none;
/* unfitted */
color: var(--sklearn-color-unfitted-level-1);
border: var(--sklearn-color-unfitted-level-1) 1pt solid;
}
.sk-global a.estimator_doc_link.fitted {
/* fitted */
background-color: var(--sklearn-color-fitted-level-0);
border: var(--sklearn-color-fitted-level-1) 1pt solid;
color: var(--sklearn-color-fitted-level-1);
}
/* On hover */
.sk-global a.estimator_doc_link:hover {
/* unfitted */
background-color: var(--sklearn-color-unfitted-level-3);
color: var(--sklearn-color-background);
text-decoration: none;
}
.sk-global a.estimator_doc_link.fitted:hover {
/* fitted */
background-color: var(--sklearn-color-fitted-level-3);
} | css | github | https://github.com/scikit-learn/scikit-learn | sklearn/utils/_repr_html/estimator.css |
"""
Caching utilities for robotic browsers. Credit to
https://github.com/Lukasa/httpcache
"""
import logging
import datetime
from requests.adapters import HTTPAdapter
from robobrowser.compat import OrderedDict, iteritems
logger = logging.getLogger(__name__)
# Modified from https://github.com/Lukasa/httpcache/blob/master/httpcache/cache.py
# RoboBrowser should only cache GET requests; HEAD and OPTIONS not exposed
CACHE_VERBS = ['GET']
CACHE_CODES = [200, 203, 300, 301, 410]
class RoboCache(object):
def __init__(self, max_age=None, max_count=None):
self.data = OrderedDict()
self.max_age = max_age
self.max_count = max_count
def _reduce_age(self, now):
"""Reduce size of cache by date.
:param datetime.datetime now: Current time
"""
if self.max_age:
keys = [
key for key, value in iteritems(self.data)
if now - value['date'] > self.max_age
]
for key in keys:
del self.data[key]
def _reduce_count(self):
"""Reduce size of cache by count.
"""
if self.max_count:
while len(self.data) > self.max_count:
self.data.popitem(last=False)
def store(self, response):
"""Store response in cache, skipping if code is forbidden.
:param requests.Response response: HTTP response
"""
if response.status_code not in CACHE_CODES:
return
now = datetime.datetime.now()
self.data[response.url] = {
'date': now,
'response': response,
}
logger.info('Stored response in cache')
self._reduce_age(now)
self._reduce_count()
def retrieve(self, request):
"""Look up request in cache, skipping if verb is forbidden.
:param requests.Request request: HTTP request
"""
if request.method not in CACHE_VERBS:
return
try:
response = self.data[request.url]['response']
logger.info('Retrieved response from cache')
return response
except KeyError:
return None
def clear(self):
"Clear cache."
self.data = OrderedDict()
class RoboHTTPAdapter(HTTPAdapter):
def __init__(self, max_age=None, max_count=None, **kwargs):
super(RoboHTTPAdapter, self).__init__(**kwargs)
self.cache = RoboCache(max_age=max_age, max_count=max_count)
def send(self, request, **kwargs):
cached_resp = self.cache.retrieve(request)
if cached_resp is not None:
return cached_resp
else:
return super(RoboHTTPAdapter, self).send(request, **kwargs)
def build_response(self, request, response):
resp = super(RoboHTTPAdapter, self).build_response(request, response)
self.cache.store(resp)
return resp | unknown | codeparrot/codeparrot-clean | ||
---
setup:
- skip:
features:
- allowed_warnings
- allowed_warnings_regex
---
"Test with existing allowed warnings":
- do:
allowed_warnings:
- "a"
- "b"
allowed_warnings_regex:
- "c"
- "d"
something:
id: "something"
- match: { acknowledged: true }
---
"Test with existing allowed warnings not to change":
- do:
allowed_warnings:
- "a"
- "b"
allowed_warnings_regex:
- "c"
- "d"
something:
id: "something_else"
- match: { acknowledged: true } | unknown | github | https://github.com/elastic/elasticsearch | build-tools-internal/src/test/resources/rest/transform/warnings/with_existing_allowed_warnings.yml |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import os
import base64
try:
import cPickle as pickle
except ImportError:
import pickle
import random
import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
from itertools import groupby
from operator import itemgetter
FIELD_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('not_existing', 'Not Existing'), ('new', 'New')]
ANONYMIZATION_STATES = FIELD_STATES + [('unstable', 'Unstable')]
WIZARD_ANONYMIZATION_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('unstable', 'Unstable')]
ANONYMIZATION_HISTORY_STATE = [('started', 'Started'), ('done', 'Done'), ('in_exception', 'Exception occured')]
ANONYMIZATION_DIRECTION = [('clear -> anonymized', 'clear -> anonymized'), ('anonymized -> clear', 'anonymized -> clear')]
def group(lst, cols):
if isinstance(cols, basestring):
cols = [cols]
return dict((k, [v for v in itr]) for k, itr in groupby(sorted(lst, key=itemgetter(*cols)), itemgetter(*cols)))
class ir_model_fields_anonymization(osv.osv):
_name = 'ir.model.fields.anonymization'
_rec_name = 'field_id'
_columns = {
'model_name': fields.char('Object Name', required=True),
'model_id': fields.many2one('ir.model', 'Object', ondelete='set null'),
'field_name': fields.char('Field Name', required=True),
'field_id': fields.many2one('ir.model.fields', 'Field', ondelete='set null'),
'state': fields.selection(selection=FIELD_STATES, String='Status', required=True, readonly=True),
}
_sql_constraints = [
('model_id_field_id_uniq', 'unique (model_name, field_name)', _("You cannot have two fields with the same name on the same object!")),
]
def _get_global_state(self, cr, uid, context=None):
ids = self.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = self.browse(cr, uid, ids, context=context)
if not len(fields) or len(fields) == len([f for f in fields if f.state == 'clear']):
state = 'clear' # all fields are clear
elif len(fields) == len([f for f in fields if f.state == 'anonymized']):
state = 'anonymized' # all fields are anonymized
else:
state = 'unstable' # fields are mixed: this should be fixed
return state
def _check_write(self, cr, uid, context=None):
"""check that the field is created from the menu and not from an database update
otherwise the database update can crash:"""
if context is None:
context = {}
if context.get('manual'):
global_state = self._get_global_state(cr, uid, context=context)
if global_state == 'anonymized':
raise osv.except_osv('Error!', "The database is currently anonymized, you cannot create, modify or delete fields.")
elif global_state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to create, write or delete fields.")
raise osv.except_osv('Error!', msg)
return True
def _get_model_and_field_ids(self, cr, uid, vals, context=None):
model_and_field_ids = (False, False)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
ir_model_fields_obj = self.pool.get('ir.model.fields')
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', vals['model_name'])], context=context)
if model_ids:
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', vals['field_name']), ('model_id', '=', model_ids[0])], context=context)
if field_ids:
field_id = field_ids[0]
model_and_field_ids = (model_ids[0], field_id)
return model_and_field_ids
def create(self, cr, uid, vals, context=None):
# check field state: all should be clear before we can add a new field to anonymize:
self._check_write(cr, uid, context=context)
global_state = self._get_global_state(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).create(cr, uid, vals, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
# check field state: all should be clear before we can modify a field:
if not (len(vals.keys()) == 1 and vals.get('state') == 'clear'):
self._check_write(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if 'field_id' in vals:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
global_state = self._get_global_state(cr, uid, context)
if global_state != 'unstable':
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).write(cr, uid, ids, vals, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
# check field state: all should be clear before we can unlink a field:
self._check_write(cr, uid, context=context)
res = super(ir_model_fields_anonymization, self).unlink(cr, uid, ids, context=context)
return res
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_name': False,
}}
if model_id:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('id', '=', model_id)])
model_id = model_ids and model_ids[0] or None
model_name = model_id and ir_model_obj.browse(cr, uid, model_id).model or False
res['value']['model_name'] = model_name
return res
def onchange_model_name(self, cr, uid, ids, model_name, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_id': False,
}}
if model_name:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', model_name)])
model_id = model_ids and model_ids[0] or False
res['value']['model_id'] = model_id
return res
def onchange_field_name(self, cr, uid, ids, field_name, model_name):
res = {'value': {
'field_id': False,
}}
if field_name and model_name:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', field_name), ('model', '=', model_name)])
field_id = field_ids and field_ids[0] or False
res['value']['field_id'] = field_id
return res
def onchange_field_id(self, cr, uid, ids, field_id, model_name):
res = {'value': {
'field_name': False,
}}
if field_id:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field = ir_model_fields_obj.browse(cr, uid, field_id)
res['value']['field_name'] = field.name
return res
_defaults = {
'state': lambda *a: 'clear',
}
class ir_model_fields_anonymization_history(osv.osv):
_name = 'ir.model.fields.anonymization.history'
_order = "date desc"
_columns = {
'date': fields.datetime('Date', required=True, readonly=True),
'field_ids': fields.many2many('ir.model.fields.anonymization', 'anonymized_field_to_history_rel', 'field_id', 'history_id', 'Fields', readonly=True),
'state': fields.selection(selection=ANONYMIZATION_HISTORY_STATE, string='Status', required=True, readonly=True),
'direction': fields.selection(selection=ANONYMIZATION_DIRECTION, string='Direction', size=20, required=True, readonly=True),
'msg': fields.text('Message', readonly=True),
'filepath': fields.char(string='File path', readonly=True),
}
class ir_model_fields_anonymize_wizard(osv.osv_memory):
_name = 'ir.model.fields.anonymize.wizard'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
state = self._get_state_value(cr, uid, context=None)
for id in ids:
res[id] = state
return res
def _get_summary(self, cr, uid, ids, name, arg, context=None):
res = {}
summary = self._get_summary_value(cr, uid, context)
for id in ids:
res[id] = summary
return res
_columns = {
'name': fields.char(string='File Name'),
'summary': fields.function(_get_summary, type='text', string='Summary'),
'file_export': fields.binary(string='Export'),
'file_import': fields.binary(string='Import', help="This is the file created by the anonymization process. It should have the '.pickle' extention."),
'state': fields.function(_get_state, string='Status', type='selection', selection=WIZARD_ANONYMIZATION_STATES, readonly=False),
'msg': fields.text(string='Message'),
}
def _get_state_value(self, cr, uid, context=None):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
return state
def _get_summary_value(self, cr, uid, context=None):
summary = u''
anon_field_obj = self.pool.get('ir.model.fields.anonymization')
ir_model_fields_obj = self.pool.get('ir.model.fields')
anon_field_ids = anon_field_obj.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
anon_fields = anon_field_obj.browse(cr, uid, anon_field_ids, context=context)
field_ids = [anon_field.field_id.id for anon_field in anon_fields if anon_field.field_id]
fields = ir_model_fields_obj.browse(cr, uid, field_ids, context=context)
fields_by_id = dict([(f.id, f) for f in fields])
for anon_field in anon_fields:
field = fields_by_id.get(anon_field.field_id.id)
values = {
'model_name': field.model_id.name,
'model_code': field.model_id.model,
'field_code': field.name,
'field_name': field.field_description,
'state': anon_field.state,
}
summary += u" * %(model_name)s (%(model_code)s) -> %(field_name)s (%(field_code)s): state: (%(state)s)\n" % values
return summary
def default_get(self, cr, uid, fields_list, context=None):
res = {}
res['name'] = '.pickle'
res['summary'] = self._get_summary_value(cr, uid, context)
res['state'] = self._get_state_value(cr, uid, context)
res['msg'] = _("""Before executing the anonymization process, you should make a backup of your database.""")
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, *args, **kwargs):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if context is None:
context = {}
step = context.get('step', 'new_window')
res = super(ir_model_fields_anonymize_wizard, self).fields_view_get(cr, uid, view_id, view_type, context=context, *args, **kwargs)
eview = etree.fromstring(res['arch'])
placeholder = eview.xpath("group[@name='placeholder1']")
if len(placeholder):
placeholder = placeholder[0]
if step == 'new_window' and state == 'clear':
# clicked in the menu and the fields are not anonymized: warn the admin that backuping the db is very important
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Warning'}))
eview.remove(placeholder)
elif step == 'new_window' and state == 'anonymized':
# clicked in the menu and the fields are already anonymized
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_import', 'required': "1"}))
placeholder.addnext(etree.Element('label', {'string': 'Anonymization file'}))
eview.remove(placeholder)
elif step == 'just_anonymized':
# we just ran the anonymization process, we need the file export field
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_export'}))
# we need to remove the button:
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
elif step == 'just_desanonymized':
# we just reversed the anonymization process, we don't need any field
# we need to remove the button
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
else:
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything else.")
raise osv.except_osv('Error!', msg)
res['arch'] = etree.tostring(eview)
return res
def _raise_after_history_update(self, cr, uid, history_id, error_type, error_msg):
self.pool.get('ir.model.fields.anonymization.history').write(cr, uid, history_id, {
'state': 'in_exception',
'msg': error_msg,
})
raise osv.except_osv(error_type, error_msg)
def anonymize_database(self, cr, uid, ids, context=None):
"""Sets the 'anonymized' state to defined fields"""
# create a new history record:
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'clear -> anonymized',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'clear' state
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if state == 'anonymized':
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("The database is currently anonymized, you cannot anonymize it again."))
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# do the anonymization:
dirpath = os.environ.get('HOME') or os.getcwd()
rel_filepath = 'field_anonymization_%s_%s.pickle' % (cr.dbname, history_id)
abs_filepath = os.path.abspath(os.path.join(dirpath, rel_filepath))
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = ir_model_fields_anonymization_model.browse(cr, uid, field_ids, context=context)
if not fields:
msg = "No fields are going to be anonymized."
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
data = []
for field in fields:
model_name = field.model_id.model
field_name = field.field_id.name
field_type = field.field_id.ttype
table_name = self.pool[model_name]._table
# get the current value
sql = "select id, %s from %s" % (field_name, table_name)
cr.execute(sql)
records = cr.dictfetchall()
for record in records:
data.append({"model_id": model_name, "field_id": field_name, "id": record['id'], "value": record[field_name]})
# anonymize the value:
anonymized_value = None
sid = str(record['id'])
if field_type == 'char':
anonymized_value = 'xxx'+sid
elif field_type == 'selection':
anonymized_value = 'xxx'+sid
elif field_type == 'text':
anonymized_value = 'xxx'+sid
elif field_type == 'html':
anonymized_value = 'xxx'+sid
elif field_type == 'boolean':
anonymized_value = random.choice([True, False])
elif field_type == 'date':
anonymized_value = '2011-11-11'
elif field_type == 'datetime':
anonymized_value = '2011-11-11 11:11:11'
elif field_type == 'float':
anonymized_value = 0.0
elif field_type == 'integer':
anonymized_value = 0
elif field_type in ['binary', 'many2many', 'many2one', 'one2many', 'reference']: # cannot anonymize these kind of fields
msg = _("Cannot anonymize fields of these types: binary, many2many, many2one, one2many, reference.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
if anonymized_value is None:
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("Anonymized value is None. This cannot happens."))
sql = "update %(table)s set %(field)s = %%(anonymized_value)s where id = %%(id)s" % {
'table': table_name,
'field': field_name,
}
cr.execute(sql, {
'anonymized_value': anonymized_value,
'id': record['id']
})
# save pickle:
fn = open(abs_filepath, 'w')
pickle.dump(data, fn, pickle.HIGHEST_PROTOCOL)
# update the anonymization fields:
values = {
'state': 'anonymized',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msgs = ["Anonymization successful.",
"",
"Donot forget to save the resulting file to a safe place because you will not be able to revert the anonymization without this file.",
"",
"This file is also stored in the %s directory. The absolute file path is: %s.",
]
msg = '\n'.join(msgs) % (dirpath, abs_filepath)
fn = open(abs_filepath, 'r')
self.write(cr, uid, ids, {
'msg': msg,
'file_export': base64.encodestring(fn.read()),
})
fn.close()
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': abs_filepath,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_anonymized'},
'target':'new',
}
def reverse_anonymize_database(self, cr, uid, ids, context=None):
"""Set the 'clear' state to defined fields"""
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
# create a new history record:
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'anonymized -> clear',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'anonymized' state
state = ir_model_fields_anonymization_model._get_global_state(cr, uid, context=context)
if state == 'clear':
raise osv.except_osv_('Error!', "The database is not currently anonymized, you cannot reverse the anonymization.")
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
raise osv.except_osv('Error!', msg)
wizards = self.browse(cr, uid, ids, context=context)
for wizard in wizards:
if not wizard.file_import:
msg = _("It is not possible to reverse the anonymization process without supplying the anonymization export file.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# reverse the anonymization:
# load the pickle file content into a data structure:
data = pickle.loads(base64.decodestring(wizard.file_import))
migration_fix_obj = self.pool.get('ir.model.fields.anonymization.migration.fix')
fix_ids = migration_fix_obj.search(cr, uid, [('target_version', '=', '8.0')])
fixes = migration_fix_obj.read(cr, uid, fix_ids, ['model_name', 'field_name', 'query', 'query_type', 'sequence'])
fixes = group(fixes, ('model_name', 'field_name'))
for line in data:
queries = []
table_name = self.pool[line['model_id']]._table if line['model_id'] in self.pool else None
# check if custom sql exists:
key = (line['model_id'], line['field_id'])
custom_updates = fixes.get(key)
if custom_updates:
custom_updates.sort(key=itemgetter('sequence'))
queries = [(record['query'], record['query_type']) for record in custom_updates if record['query_type']]
elif table_name:
queries = [("update %(table)s set %(field)s = %%(value)s where id = %%(id)s" % {
'table': table_name,
'field': line['field_id'],
}, 'sql')]
for query in queries:
if query[1] == 'sql':
sql = query[0]
cr.execute(sql, {
'value': line['value'],
'id': line['id']
})
elif query[1] == 'python':
raw_code = query[0]
code = raw_code % line
eval(code)
else:
raise Exception("Unknown query type '%s'. Valid types are: sql, python." % (query['query_type'], ))
# update the anonymization fields:
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
values = {
'state': 'clear',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msg = '\n'.join(["Successfully reversed the anonymization.",
"",
])
self.write(cr, uid, ids, {'msg': msg})
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': False,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_desanonymized'},
'target':'new',
}
def _id_get(self, cr, uid, model, id_str, mod):
if '.' in id_str:
mod, id_str = id_str.split('.')
try:
idn = self.pool.get('ir.model.data')._get_id(cr, uid, mod, id_str)
res = int(self.pool.get('ir.model.data').read(cr, uid, [idn], ['res_id'])[0]['res_id'])
except:
res = None
return res
class ir_model_fields_anonymization_migration_fix(osv.osv):
_name = 'ir.model.fields.anonymization.migration.fix'
_order = "sequence"
_columns = {
'target_version': fields.char('Target Version'),
'model_name': fields.char('Model'),
'field_name': fields.char('Field'),
'query': fields.text('Query'),
'query_type': fields.selection(string='Query', selection=[('sql', 'sql'), ('python', 'python')]),
'sequence': fields.integer('Sequence'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
# :markup: markdown
require "nio"
module ActionCable
module Connection
class StreamEventLoop
def initialize
@nio = @executor = @thread = nil
@map = {}
@stopping = false
@todo = Queue.new
@spawn_mutex = Mutex.new
end
def timer(interval, &block)
Concurrent::TimerTask.new(execution_interval: interval, &block).tap(&:execute)
end
def post(task = nil, &block)
task ||= block
spawn
@executor << task
end
def attach(io, stream)
@todo << lambda do
@map[io] = @nio.register(io, :r)
@map[io].value = stream
end
wakeup
end
def detach(io, stream)
@todo << lambda do
@nio.deregister io
@map.delete io
io.close
end
wakeup
end
def writes_pending(io)
@todo << lambda do
if monitor = @map[io]
monitor.interests = :rw
end
end
wakeup
end
def stop
@stopping = true
wakeup if @nio
end
private
def spawn
return if @thread && @thread.status
@spawn_mutex.synchronize do
return if @thread && @thread.status
@nio ||= NIO::Selector.new
@executor ||= Concurrent::ThreadPoolExecutor.new(
name: "ActionCable-streamer",
min_threads: 1,
max_threads: 10,
max_queue: 0,
)
@thread = Thread.new { run }
return true
end
end
def wakeup
spawn || @nio.wakeup
end
def run
loop do
if @stopping
@nio.close
break
end
until @todo.empty?
@todo.pop(true).call
end
next unless monitors = @nio.select
monitors.each do |monitor|
io = monitor.io
stream = monitor.value
begin
if monitor.writable?
if stream.flush_write_buffer
monitor.interests = :r
end
next unless monitor.readable?
end
incoming = io.read_nonblock(4096, exception: false)
case incoming
when :wait_readable
next
when nil
stream.close
else
stream.receive incoming
end
rescue
# We expect one of EOFError or Errno::ECONNRESET in normal operation (when the
# client goes away). But if anything else goes wrong, this is still the best way
# to handle it.
begin
stream.close
rescue
@nio.deregister io
@map.delete io
end
end
end
end
end
end
end
end | ruby | github | https://github.com/rails/rails | actioncable/lib/action_cable/connection/stream_event_loop.rb |
import unittest
from django.template.smartif import IfParser, Literal
class SmartIfTests(unittest.TestCase):
def assertCalcEqual(self, expected, tokens):
self.assertEqual(expected, IfParser(tokens).parse().eval({}))
# We only test things here that are difficult to test elsewhere
# Many other tests are found in the main tests for builtin template tags
# Test parsing via the printed parse tree
def test_not(self):
var = IfParser(["not", False]).parse()
self.assertEqual("(not (literal False))", repr(var))
self.assert_(var.eval({}))
self.assertFalse(IfParser(["not", True]).parse().eval({}))
def test_or(self):
var = IfParser([True, "or", False]).parse()
self.assertEqual("(or (literal True) (literal False))", repr(var))
self.assert_(var.eval({}))
def test_in(self):
list_ = [1,2,3]
self.assertCalcEqual(True, [1, 'in', list_])
self.assertCalcEqual(False, [1, 'in', None])
self.assertCalcEqual(False, [None, 'in', list_])
def test_not_in(self):
list_ = [1,2,3]
self.assertCalcEqual(False, [1, 'not', 'in', list_])
self.assertCalcEqual(True, [4, 'not', 'in', list_])
self.assertCalcEqual(False, [1, 'not', 'in', None])
self.assertCalcEqual(True, [None, 'not', 'in', list_])
def test_precedence(self):
# (False and False) or True == True <- we want this one, like Python
# False and (False or True) == False
self.assertCalcEqual(True, [False, 'and', False, 'or', True])
# True or (False and False) == True <- we want this one, like Python
# (True or False) and False == False
self.assertCalcEqual(True, [True, 'or', False, 'and', False])
# (1 or 1) == 2 -> False
# 1 or (1 == 2) -> True <- we want this one
self.assertCalcEqual(True, [1, 'or', 1, '==', 2])
self.assertCalcEqual(True, [True, '==', True, 'or', True, '==', False])
self.assertEqual("(or (and (== (literal 1) (literal 2)) (literal 3)) (literal 4))",
repr(IfParser([1, '==', 2, 'and', 3, 'or', 4]).parse())) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function, division
from sympy import sqrt, exp, S, pi, I
from sympy.physics.quantum.constants import hbar
def wavefunction(n, x):
"""
Returns the wavefunction for particle on ring.
n is the quantum number, x is the angle,
here n can be positive as well as negative
which can be used to describe the direction of motion of particle
Examples
========
>>> from sympy.physics.pring import wavefunction, energy
>>> from sympy import Symbol, integrate, pi
>>> x=Symbol("x")
>>> wavefunction(1, x)
sqrt(2)*exp(I*x)/(2*sqrt(pi))
>>> wavefunction(2, x)
sqrt(2)*exp(2*I*x)/(2*sqrt(pi))
>>> wavefunction(3, x)
sqrt(2)*exp(3*I*x)/(2*sqrt(pi))
The normalization of the wavefunction is:
>>> integrate(wavefunction(2, x)*wavefunction(-2, x), (x, 0, 2*pi))
1
>>> integrate(wavefunction(4, x)*wavefunction(-4, x), (x, 0, 2*pi))
1
References
==========
.. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum
Mechanics (4th ed.). Pages 71-73.
"""
# sympify arguments
n, x = S(n), S(x)
return exp(n * I * x) / sqrt(2 * pi)
def energy(n, m, r):
"""
Returns the energy of the state corresponding to quantum number n.
E=(n**2 * (hcross)**2) / (2 * m * r**2)
here n is the quantum number, m is the mass of the particle
and r is the radius of circle.
Examples
========
>>> from sympy.physics.pring import energy
>>> from sympy import Symbol
>>> m=Symbol("m")
>>> r=Symbol("r")
>>> energy(1, m, r)
hbar**2/(2*m*r**2)
>>> energy(2, m, r)
2*hbar**2/(m*r**2)
>>> energy(-2, 2.0, 3.0)
0.111111111111111*hbar**2
References
==========
.. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum
Mechanics (4th ed.). Pages 71-73.
"""
n, m, r = S(n), S(m), S(r)
if n.is_integer:
return (n**2 * hbar**2) / (2 * m * r**2)
else:
raise ValueError("'n' must be integer") | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Claudiu Belu, Cloudbase Solutions Srl
"""
Unit tests for the Hyper-V Security Groups Driver.
"""
import mock
from oslo.config import cfg
from neutron.plugins.hyperv.agent import security_groups_driver as sg_driver
from neutron.plugins.hyperv.agent import utilsfactory
from neutron.tests import base
CONF = cfg.CONF
class TestHyperVSecurityGroupsDriver(base.BaseTestCase):
_FAKE_DEVICE = 'fake_device'
_FAKE_ID = 'fake_id'
_FAKE_DIRECTION = 'ingress'
_FAKE_ETHERTYPE = 'IPv4'
_FAKE_ETHERTYPE_IPV6 = 'IPv6'
_FAKE_DEST_IP_PREFIX = 'fake_dest_ip_prefix'
_FAKE_SOURCE_IP_PREFIX = 'fake_source_ip_prefix'
_FAKE_PARAM_NAME = 'fake_param_name'
_FAKE_PARAM_VALUE = 'fake_param_value'
_FAKE_PORT_MIN = 9001
_FAKE_PORT_MAX = 9011
def setUp(self):
super(TestHyperVSecurityGroupsDriver, self).setUp()
self._mock_windows_version = mock.patch.object(utilsfactory,
'get_hypervutils')
self._mock_windows_version.start()
self._driver = sg_driver.HyperVSecurityGroupsDriver()
self._driver._utils = mock.MagicMock()
@mock.patch('neutron.plugins.hyperv.agent.security_groups_driver'
'.HyperVSecurityGroupsDriver._create_port_rules')
def test_prepare_port_filter(self, mock_create_rules):
mock_port = self._get_port()
mock_utils_method = self._driver._utils.create_default_reject_all_rules
self._driver.prepare_port_filter(mock_port)
self.assertEqual(mock_port,
self._driver._security_ports[self._FAKE_DEVICE])
mock_utils_method.assert_called_once_with(self._FAKE_ID)
self._driver._create_port_rules.assert_called_once_with(
self._FAKE_ID, mock_port['security_group_rules'])
def test_update_port_filter(self):
mock_port = self._get_port()
new_mock_port = self._get_port()
new_mock_port['id'] += '2'
new_mock_port['security_group_rules'][0]['ethertype'] += "2"
self._driver._security_ports[mock_port['device']] = mock_port
self._driver._create_port_rules = mock.MagicMock()
self._driver._remove_port_rules = mock.MagicMock()
self._driver.update_port_filter(new_mock_port)
self._driver._remove_port_rules.assert_called_once_with(
mock_port['id'], mock_port['security_group_rules'])
self._driver._create_port_rules.assert_called_once_with(
new_mock_port['id'], new_mock_port['security_group_rules'])
self.assertEqual(new_mock_port,
self._driver._security_ports[new_mock_port['device']])
@mock.patch('neutron.plugins.hyperv.agent.security_groups_driver'
'.HyperVSecurityGroupsDriver.prepare_port_filter')
def test_update_port_filter_new_port(self, mock_method):
mock_port = self._get_port()
self._driver.prepare_port_filter = mock.MagicMock()
self._driver.update_port_filter(mock_port)
self._driver.prepare_port_filter.assert_called_once_with(mock_port)
def test_remove_port_filter(self):
mock_port = self._get_port()
self._driver._security_ports[mock_port['device']] = mock_port
self._driver.remove_port_filter(mock_port)
self.assertFalse(mock_port['device'] in self._driver._security_ports)
def test_create_port_rules_exception(self):
fake_rule = self._create_security_rule()
self._driver._utils.create_security_rule.side_effect = Exception(
'Generated Exception for testing.')
self._driver._create_port_rules(self._FAKE_ID, [fake_rule])
def test_create_param_map(self):
fake_rule = self._create_security_rule()
self._driver._get_rule_remote_address = mock.MagicMock(
return_value=self._FAKE_SOURCE_IP_PREFIX)
actual = self._driver._create_param_map(fake_rule)
expected = {
'direction': self._driver._ACL_PROP_MAP[
'direction'][self._FAKE_DIRECTION],
'acl_type': self._driver._ACL_PROP_MAP[
'ethertype'][self._FAKE_ETHERTYPE],
'local_port': '%s-%s' % (self._FAKE_PORT_MIN, self._FAKE_PORT_MAX),
'protocol': self._driver._ACL_PROP_MAP['default'],
'remote_address': self._FAKE_SOURCE_IP_PREFIX
}
self.assertEqual(expected, actual)
@mock.patch('neutron.plugins.hyperv.agent.security_groups_driver'
'.HyperVSecurityGroupsDriver._create_param_map')
def test_create_port_rules(self, mock_method):
fake_rule = self._create_security_rule()
mock_method.return_value = {
self._FAKE_PARAM_NAME: self._FAKE_PARAM_VALUE}
self._driver._create_port_rules(self._FAKE_ID, [fake_rule])
self._driver._utils.create_security_rule.assert_called_once_with(
self._FAKE_ID, fake_param_name=self._FAKE_PARAM_VALUE)
def test_convert_any_address_to_same_ingress(self):
rule = self._create_security_rule()
actual = self._driver._get_rule_remote_address(rule)
self.assertEqual(self._FAKE_SOURCE_IP_PREFIX, actual)
def test_convert_any_address_to_same_egress(self):
rule = self._create_security_rule()
rule['direction'] += '2'
actual = self._driver._get_rule_remote_address(rule)
self.assertEqual(self._FAKE_DEST_IP_PREFIX, actual)
def test_convert_any_address_to_ipv4(self):
rule = self._create_security_rule()
del rule['source_ip_prefix']
actual = self._driver._get_rule_remote_address(rule)
self.assertEqual(self._driver._ACL_PROP_MAP['address_default']['IPv4'],
actual)
def test_convert_any_address_to_ipv6(self):
rule = self._create_security_rule()
del rule['source_ip_prefix']
rule['ethertype'] = self._FAKE_ETHERTYPE_IPV6
actual = self._driver._get_rule_remote_address(rule)
self.assertEqual(self._driver._ACL_PROP_MAP['address_default']['IPv6'],
actual)
def test_get_rule_protocol_icmp(self):
self._test_get_rule_protocol(
'icmp', self._driver._ACL_PROP_MAP['protocol']['icmp'])
def test_get_rule_protocol_no_icmp(self):
self._test_get_rule_protocol('tcp', 'tcp')
def _test_get_rule_protocol(self, protocol, expected):
rule = self._create_security_rule()
rule['protocol'] = protocol
actual = self._driver._get_rule_protocol(rule)
self.assertEqual(expected, actual)
def _get_port(self):
return {
'device': self._FAKE_DEVICE,
'id': self._FAKE_ID,
'security_group_rules': [self._create_security_rule()]
}
def _create_security_rule(self):
return {
'direction': self._FAKE_DIRECTION,
'ethertype': self._FAKE_ETHERTYPE,
'dest_ip_prefix': self._FAKE_DEST_IP_PREFIX,
'source_ip_prefix': self._FAKE_SOURCE_IP_PREFIX,
'port_range_min': self._FAKE_PORT_MIN,
'port_range_max': self._FAKE_PORT_MAX
} | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Contact(Model):
"""The contact information for the vault certificates.
:param email_address: Email addresss.
:type email_address: str
:param name: Name.
:type name: str
:param phone: Phone number.
:type phone: str
"""
_attribute_map = {
'email_address': {'key': 'email', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'phone': {'key': 'phone', 'type': 'str'},
}
def __init__(self, email_address=None, name=None, phone=None):
self.email_address = email_address
self.name = name
self.phone = phone | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Status Admin API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import flags
from nova.openstack.common import log as logging
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'extended_status')
class ExtendedStatusController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedStatusController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_server(self, server, instance):
for state in ['task_state', 'vm_state', 'power_state']:
key = "%s:%s" % (Extended_status.alias, state)
server[key] = instance[state]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedStatusTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedStatusesTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(server, db_instance)
class Extended_status(extensions.ExtensionDescriptor):
"""Extended Status support"""
name = "ExtendedStatus"
alias = "OS-EXT-STS"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_status/api/v1.1")
updated = "2011-11-03T00:00:00+00:00"
def get_controller_extensions(self):
controller = ExtendedStatusController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def make_server(elem):
elem.set('{%s}task_state' % Extended_status.namespace,
'%s:task_state' % Extended_status.alias)
elem.set('{%s}power_state' % Extended_status.namespace,
'%s:power_state' % Extended_status.alias)
elem.set('{%s}vm_state' % Extended_status.namespace,
'%s:vm_state' % Extended_status.alias)
class ExtendedStatusTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
class ExtendedStatusesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace}) | unknown | codeparrot/codeparrot-clean | ||
from yowsup.stacks import YowStack
from .layer import SyncLayer
from yowsup.layers import YowLayerEvent
from yowsup.layers.auth import YowCryptLayer, YowAuthenticationProtocolLayer, AuthError
from yowsup.layers.coder import YowCoderLayer
from yowsup.layers.network import YowNetworkLayer
from yowsup.layers.stanzaregulator import YowStanzaRegulator
from yowsup.layers.protocol_receipts import YowReceiptProtocolLayer
from yowsup.layers.protocol_acks import YowAckProtocolLayer
from yowsup.layers.logger import YowLoggerLayer
from yowsup.layers.protocol_contacts import YowContactsIqProtocolLayer
from yowsup.layers import YowParallelLayer
class YowsupSyncStack(object):
def __init__(self, credentials, contacts, encryptionEnabled = False):
"""
:param credentials:
:param contacts: list of [jid ]
:param encryptionEnabled:
:return:
"""
if encryptionEnabled:
from yowsup.layers.axolotl import YowAxolotlLayer
layers = (
SyncLayer,
YowParallelLayer([YowAuthenticationProtocolLayer, YowContactsIqProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer]),
YowAxolotlLayer,
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
else:
layers = (
SyncLayer,
YowParallelLayer([YowAuthenticationProtocolLayer, YowContactsIqProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer]),
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
self.stack = YowStack(layers)
self.stack.setProp(SyncLayer.PROP_CONTACTS, contacts)
self.stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True)
self.stack.setCredentials(credentials)
def start(self):
self.stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
try:
self.stack.loop()
except AuthError as e:
print("Authentication Error: %s" % e.message) | unknown | codeparrot/codeparrot-clean | ||
#pragma once
#include <c10/core/impl/PyInterpreter.h>
#include <c10/macros/Export.h>
namespace c10::impl {
struct C10_API PythonDispatcherTLS {
static void set_state(PyInterpreter* state);
static PyInterpreter* get_state();
static void reset_state();
};
struct C10_API DisablePythonDispatcher {
DisablePythonDispatcher() : old_(PythonDispatcherTLS::get_state()) {
PythonDispatcherTLS::set_state({});
}
DisablePythonDispatcher(DisablePythonDispatcher&& other) = delete;
DisablePythonDispatcher(const DisablePythonDispatcher&) = delete;
DisablePythonDispatcher& operator=(const DisablePythonDispatcher&) = delete;
DisablePythonDispatcher& operator=(DisablePythonDispatcher&&) = delete;
~DisablePythonDispatcher() {
PythonDispatcherTLS::set_state(old_);
}
PyInterpreter* old_;
};
} // namespace c10::impl | c | github | https://github.com/pytorch/pytorch | c10/core/impl/PythonDispatcherTLS.h |
import functools
import os
import re
from io import open
from designer.core.undo_manager import WidgetDragOperation, WidgetOperation
from designer.uix.confirmation_dialog import ConfirmationDialogSave
from designer.uix.settings import SettingListContent
from designer.utils.toolbox_widgets import toolbox_widgets as widgets_common
from designer.utils.utils import (
FakeSettingList,
get_app_widget,
get_current_project,
get_designer,
ignore_proj_watcher,
show_message,
)
from kivy.app import App
from kivy.base import EventLoop
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.factory import Factory
from kivy.graphics import Color, Line
from kivy.properties import (
BooleanProperty,
ListProperty,
ObjectProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.carousel import Carousel
from kivy.uix.filechooser import FileChooserIconView, FileChooserListView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.layout import Layout
from kivy.uix.popup import Popup
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.scatter import ScatterPlane
from kivy.uix.scatterlayout import ScatterLayout
from kivy.uix.screenmanager import Screen, ScreenManager
from kivy.uix.tabbedpanel import TabbedPanel
class PlaygroundDragElement(BoxLayout):
'''An instance of this class is the drag element shown when user tries to
add a widget to :class:`~designer.components.playground.Playground`
by dragging from :class:`~designer.components.toolbox.Toolbox` to
:class:`~designer.components.playground.Playground`.
'''
playground = ObjectProperty()
'''Reference to the :class:`~designer.components.playground.Playground`
:data:`playground` is a :class:`~kivy.properties.ObjectProperty`
'''
target = ObjectProperty(allownone=True)
'''Widget where widget is to be added.
:data:`target` a :class:`~kivy.properties.ObjectProperty`
'''
can_place = BooleanProperty(False)
'''Whether widget can be added or not.
:data:`can_place` is a :class:`~kivy.properties.BooleanProperty`
'''
drag_type = OptionProperty('new widget', options=('new widget',
'dragndrop'))
'''Specifies the type of dragging currently done by PlaygroundDragElement.
If it is 'new widget', then it means a new widget will be added
If it is 'dragndrop', then it means already created widget is
drag-n-drop, from one position to another.
:data:`drag_type` is a :class:`~kivy.properties.OptionProperty`
'''
drag_parent = ObjectProperty(None)
'''Parent of currently dragged widget.
Will be none if 'drag_type' is 'new widget'
:data:`drag_parent` is a :class:`~kivy.properties.ObjectProperty`
'''
widgettree = ObjectProperty(None)
'''Reference to class:`~designer.nodetree.WidgetsTree`,
the widget_tree of Designer.
:data:`widgettree` is a :class:`~kivy.properties.ObjectProperty`
'''
child = ObjectProperty(None)
'''The widget which is currently being dragged.
:data:`child` is a :class:`~kivy.properties.ObjectProperty`
'''
widget = ObjectProperty(None)
'''The widget which is currently being dragged and will be added to the UI.
This is similar to child, however does not contains custom style used
to present the dragging widget
:data:`widget` is a :class:`~kivy.properties.ObjectProperty`
'''
def __init__(self, **kwargs):
super(PlaygroundDragElement, self).__init__(**kwargs)
if self.child:
self.add_widget(self.child)
def show_lines_on_child(self, *args):
'''To schedule Clock's callback for _show_lines_on_child.
'''
Clock.schedule_once(self._show_lines_on_child)
def on_widget(self, *args):
def update_parent(*largs):
p = self.widget.parent
if p:
self.widget.KD__last_parent = p
self.widget.unbind(parent=update_parent)
self.widget.bind(parent=update_parent)
update_parent()
def _show_lines_on_child(self, *args):
'''To show boundaries around the child.
'''
x, y = self.child.pos
right, top = self.child.right, self.child.top
points = [x, y, right, y, right, top, x, top]
if hasattr(self, '_canvas_instr'):
points_equal = True
for i in range(len(points)):
if points[i] != self._canvas_instr[1].points[i]:
points_equal = False
break
if points_equal:
return
self.remove_lines_on_child()
with self.child.canvas.after:
color = Color(1, 0.5, 0.8)
line = Line(points=points, close=True, width=2.)
self._canvas_instr = [color, line]
def remove_lines_on_child(self, *args):
'''Remove lines from canvas of child.
'''
if hasattr(self, '_canvas_instr') \
and self._canvas_instr[1].points[0] != -1:
try:
self.child.canvas.after.remove(self._canvas_instr[0])
self.child.canvas.after.remove(self._canvas_instr[1])
except ValueError:
pass
self._canvas_instr[1].points[0] = -1
Clock.unschedule(self._show_lines_on_child)
def is_intersecting_playground(self, x, y):
'''To determine whether x,y is inside playground
'''
if not self.playground:
return False
if self.playground.x <= x <= self.playground.right \
and self.playground.y <= y <= self.playground.top:
return True
return False
def is_intersecting_widgettree(self, x, y):
'''To determine whether x,y is inside playground
'''
if not self.widgettree:
return False
if self.widgettree.x <= x <= self.widgettree.right \
and self.widgettree.y <= y <= self.widgettree.top:
return True
return False
def on_touch_move(self, touch):
'''This is responsible for moving the drag element and showing where
the widget it contains will be added.
'''
# if this widget is not being dragged, exit
if touch.grab_current is not self:
return False
# update dragging position
self.center_x = touch.x
self.y = touch.y + 20
# the widget where it will be added
target = None
# now, getting the target widget
# if is targeting the playground
if self.is_intersecting_playground(touch.x, touch.y):
target = self.playground.try_place_widget(
self.widget, touch.x, touch.y)
# if is targeting widget tree
elif self.is_intersecting_widgettree(touch.x, touch.y):
pos_in_tree = self.widgettree.tree.to_widget(
touch.x, touch.y)
node = self.widgettree.tree.get_node_at_pos(pos_in_tree)
if node:
# if the same widget, skip
if node.node == self.widget:
return True
else:
# otherwise, runs recursively until get a valid target
while node and node.node != self.playground.sandbox:
widget = node.node
if self.playground.allowed_target_for(
widget, self.children):
# current target is valid
target = widget
break
# runs each parent to find a valid target
node = node.parent_node
self.target = target
# check if its added somewhere, remove it
if self.widget.parent:
if self.target:
# special cases
if isinstance(self.target, ScreenManager):
if isinstance(self.widget, Screen):
self.target.remove_widget(self.widget)
self.target.real_remove_widget(self.widget)
elif not isinstance(self.target, TabbedPanel):
self.target.remove_widget(self.widget)
# inside a usual widget
if self.widget.parent:
self.widget.parent.remove_widget(self.widget)
# check if it can be placed in the target
# if moving from another place
if self.drag_type == 'dragndrop':
self.can_place = target == self.drag_parent
# if is a new widget
else:
self.can_place = target is not None
# if cannot add it, go away
if not target or not self.can_place:
return True
# try to add the widget
self.playground.sandbox.error_active = True
with self.playground.sandbox:
# adding a new widget
if isinstance(target, ScreenManager):
target.real_add_widget(self.widget)
# usual target
else:
target.add_widget(self.widget)
App.get_running_app().focus_widget(target)
self.playground.sandbox.error_active = False
return True
def on_touch_up(self, touch):
'''This is responsible for adding the widget to the parent
'''
# if this widget is not being dragged, exit
if touch.grab_current is not self:
return False
# aborts the dragging
touch.ungrab(self)
widget_from = None
target = None
# get info about the dragged widget
if self.is_intersecting_playground(touch.x, touch.y):
# if added by playground
target = self.playground.try_place_widget(
self.widget, touch.x, touch.y)
widget_from = 'playground'
elif self.is_intersecting_widgettree(touch.x, touch.y):
# if added by widgettree
pos_in_tree = self.widgettree.tree.to_widget(
touch.x, touch.y)
node = self.widgettree.tree.get_node_at_pos(pos_in_tree)
if node:
widget = node.node
while widget and widget != self.playground.sandbox:
if self.playground.allowed_target_for(
widget, self.widget):
target = widget
widget_from = 'treeview'
break
widget = widget.parent
# check if has parent
parent = self.widget.parent
# check if it's possible to add it in the target
if self.drag_type == 'dragndrop':
self.can_place = target == self.drag_parent and \
parent is not None
else:
self.can_place = target is not None and parent is not None
# try to find the widget on parent(from preview) and remove it
index = -1
if self.target:
try:
index = self.target.children.index(self.widget)
except ValueError:
pass
if isinstance(self.target, ScreenManager):
self.target.real_remove_widget(self.widget)
else:
self.target.remove_widget(self.widget)
# check if we can add this new widget
self.playground.sandbox.error_active = True
with self.playground.sandbox:
if self.can_place or self.playground.root is None:
# if widget already exists, just moving it
if self.drag_type == 'dragndrop':
if parent:
if widget_from == 'playground':
# adding by playground
self.playground.place_widget(
self.widget, touch.x, touch.y, index=index)
else:
# adding by tree
self.playground.place_widget(
self.widget, touch.x, touch.y,
index=index, target=target)
else:
# adding by playground
if widget_from == 'playground':
self.playground.place_widget(
self.widget, touch.x, touch.y)
# adding by widget tree
else:
self.playground.add_widget_to_parent(
self.widget, target)
# if could not add it and from playground, undo the modifications
elif self.drag_type == 'dragndrop':
# just cant add it, undo the last modifications
self.playground.undo_dragging()
# if widget outside of screen is removed
if target is None:
self.playground.remove_widget_from_parent(self.widget)
self.playground.sandbox.error_active = False
# remove the dragging widget
self.target = None
if self.parent:
self.parent.remove_widget(self)
self.playground.drag_operation = []
self.playground.from_drag = False
return True
def fit_child(self, *args):
'''Updates it's size to display the child correctly
'''
if self.child:
self.size = self.child.size
class Playground(ScatterPlane):
'''Playground represents the actual area where user will add and delete
the widgets. It has event on_show_edit, which is emitted whenever
Playground is clicked.
'''
root = ObjectProperty(None, allownone=True)
'''This property represents the root widget.
:data:`root` is a :class:`~kivy.properties.ObjectProperty`
'''
root_name = StringProperty('')
'''Specifies the current widget under modification on Playground
:data:`root_name` is a :class:`~kivy.properties.StringProperty`
'''
root_app_widget = ObjectProperty(None, allownone=True)
'''This property represents the root widget as a ProjectManager.AppWidget
:data:`root_app_widget` is a :class:`~kivy.properties.ObjectProperty`
'''
tree = ObjectProperty()
clicked = BooleanProperty(False)
'''This property represents whether
:class:`~designer.components.playground.Playground`
has been clicked or not
:data:`clicked` is a :class:`~kivy.properties.BooleanProperty`
'''
sandbox = ObjectProperty(None)
'''This property represents the sandbox widget which is added to
:class:`~designer.components.playground.Playground`.
:data:`sandbox` is a :class:`~kivy.properties.ObjectProperty`
'''
kv_code_input = ObjectProperty()
'''This property refers to the
:class:`~designer.components.ui_creator.UICreator`'s KVLangArea.
:data:`kv_code_input` is a :class:`~kivy.properties.ObjectProperty`
'''
widgettree = ObjectProperty()
'''This property refers to the
:class:`~designer.components.ui_creator.UICreator`'s WidgetTree.
:data:`widgettree` is a :class:`~kivy.properties.ObjectProperty`
'''
from_drag = BooleanProperty(False)
'''Specifies whether a widget is dragged or a new widget is added.
:data:`from_drag` is a :class:`~kivy.properties.BooleanProperty`
'''
drag_operation = ListProperty((), allownone=True)
'''Stores data of drag_operation in form of a tuple.
drag_operation[0] is the widget which has been dragged.
drag_operation[1] is the parent of above widget.
drag_operation[2] is the index of widget in parent's children property.
:data:`drag_operation` is a :class:`~kivy.properties.ListProperty`
'''
_touch_still_down = BooleanProperty(False)
'''Specifies whether touch is still down or not.
:data:`_touch_still_down` is a :class:`~kivy.properties.BooleanProperty`
'''
dragging = BooleanProperty(False)
'''Specifies whether currently dragging is performed or not.
:data:`dragging` is a :class:`~kivy.properties.BooleanProperty`
'''
__events__ = ('on_show_edit',)
def __init__(self, **kwargs):
super(Playground, self).__init__(**kwargs)
self.keyboard = None
self.selected_widget = None
self.undo_manager = None
self._widget_x = -1
self._widget_y = -1
self.widget_to_paste = None
self._popup = None
self._last_root = None
def on_root(self, *args):
if self.root:
self._last_root = self.root
def on_pos(self, *args):
'''Default handler for 'on_pos'
'''
if self.sandbox:
self.sandbox.pos = self.pos
def on_size(self, *args):
'''Default handler for 'on_size'
'''
if self.sandbox:
self.sandbox.size = self.size
def on_show_edit(self, *args):
'''Default handler for 'on_show_edit'
'''
pass
def on_widget_select_pressed(self, *args):
'''Event handler to playground widget selector press
'''
d = get_designer()
if d.popup:
return False
widgets = get_current_project().app_widgets
app_widgets = []
for name in widgets.keys():
widget = widgets[name]
if widget.is_root:
name = 'Root - ' + name
app_widgets.append(name)
fake_setting = FakeSettingList()
fake_setting.allow_custom = False
fake_setting.items = app_widgets
fake_setting.desc = 'Select the Widget to edit on Playground'
fake_setting.group = 'playground_widget'
content = SettingListContent(setting=fake_setting)
popup_width = min(0.95 * Window.width, 500)
popup_height = min(0.95 * Window.height, 500)
d.popup = Popup(
content=content,
title='Playground - Edit Widget',
size_hint=(None, None),
size=(popup_width, popup_height),
auto_dismiss=False
)
content.bind(on_apply=self._perform_select_root_widget,
on_cancel=d.close_popup)
content.selected_items = [self.root_name]
if self.root_app_widget and self.root_app_widget.is_root:
content.selected_items = ['Root - ' + self.root_name]
content.show_items()
d.popup.open()
def _perform_select_root_widget(self, instance, selected_item, *args):
'''On Playground edit item selection
:type selected_item: instance of selected array
'''
get_designer().close_popup()
name = selected_item[0]
# remove Root label from widget name
if name.startswith('Root - '):
name = name.replace('Root - ', '')
self.load_widget(name)
def no_widget(self, *args):
'''Remove any reamining sandbox content and shows an message
'''
self.root = None
show_message('No widget found!', 5, 'error')
self.sandbox.clear_widgets()
def load_widget(self, widget_name, update_kv_lang=True):
'''Load and display and widget given its name.
If widget is not found, shows information on status bar and clear
the playground
:param widget_name name of the widget to display
:param update_kv_lang if True, reloads the kv file. If False, keep the
kv lang text
'''
d = get_designer()
if d.popup:
# if has a popup, it's not using playground
return False
widgets = get_current_project().app_widgets
# if displaying no widget or this widget is not know
if self.root is None or self.root_app_widget is None or \
widget_name not in widgets:
self._perform_load_widget(widget_name, update_kv_lang)
return
# if a know widget, continue
target = widgets[widget_name]
# check if we are switching kv files
if target.kv_path != self.root_app_widget.kv_path and \
not self.kv_code_input.saved:
file_name = os.path.basename(self.root_app_widget.kv_path)
_confirm_dlg = ConfirmationDialogSave(
'The %s was not saved. \n'
'If you continue, your modifications will be lost.\n'
'Do you want to save and continue?' % file_name
)
@ignore_proj_watcher
def save_and_load(*args):
get_current_project().save()
self._perform_load_widget(widget_name, True)
def dont_save(*args):
d.close_popup()
self._perform_load_widget(widget_name, True)
_confirm_dlg.bind(
on_save=save_and_load,
on_dont_save=dont_save,
on_cancel=d.close_popup)
d.popup = Popup(title='Change Widget', content=_confirm_dlg,
size_hint=(None, None), size=('400pt', '150pt'),
auto_dismiss=False)
d.popup.open()
return
self._perform_load_widget(widget_name, update_kv_lang)
def _perform_load_widget(self, widget_name, update_kv_lang=True):
'''Loads the widget if everything is ok
:param widget_name name of the widget to display
:param update_kv_lang if True, reloads the kv file. If False, keep the
kv lang text
'''
self.root_name = widget_name
self.root = None
self.sandbox.clear_widgets()
widgets = get_current_project().app_widgets
try:
target = widgets[widget_name]
if update_kv_lang:
# updates kv lang text with file
kv_path = target.kv_path
if kv_path:
self.kv_code_input.text = open(kv_path,
encoding='utf-8').read()
else:
show_message(
'Could not found the associated .kv file with %s'
' widget' % widget_name, 5, 'error'
)
self.kv_code_input.text = ''
self.root_app_widget = target
wdg = get_app_widget(target)
if wdg is None:
self.kv_code_input.have_error = True
self.add_widget_to_parent(wdg, None, from_undo=True, from_kv=True)
self.kv_code_input.path = target.kv_path
except (KeyError, AttributeError):
show_message(
'Failed to load %s widget' % widget_name, 5, 'error')
def on_reload_kv(self, kv_lang_area, text, force):
'''Reloads widgets from kv lang input and update the
visible widget.
if force is True, all widgets must be reloaded before parsing the new kv
:param force: if True, will parse the project again
:param text: kv source
:param kv_lang_area: instance of kivy lang area
'''
proj = get_current_project()
# copy of initial widgets
widgets = dict(proj.app_widgets)
try:
if force:
proj.parse()
if self.root_name:
kv_path = widgets[self.root_name].kv_path
else:
kv_path = self.kv_code_input.path
proj.parse_kv(text, kv_path)
# if was displaying one widget, but it was removed
if self.root_name and self.root_name not in proj.app_widgets:
self.load_widget_from_file(self.root_app_widget.kv_path)
show_message(
'The %s is not available. Displaying another widget'
% self.root_name, 5, 'info'
)
elif not self.root_name and not widgets and proj.app_widgets:
# if was not displaying a widget because there was no widget
# and now a widget is available
first_wdg = proj.app_widgets[list(proj.app_widgets.keys())[-1]]
self.load_widget(first_wdg.name, update_kv_lang=False)
else:
# displaying an usual widget
self.load_widget(self.root_name, update_kv_lang=False)
except KeyError:
show_message(
'Failed to load %s widget' % self.root_name, 5, 'error')
def load_widget_from_file(self, kv_path):
'''Loads first widget from a file
:param kv_path: absolute kv path
'''
self.sandbox.clear_widgets()
proj = get_current_project()
widgets = proj.app_widgets
if not os.path.exists(kv_path):
show_message(kv_path + ' not exists', 5, 'error')
return
self.kv_code_input.text = open(kv_path, 'r', encoding='utf-8').read()
self.kv_code_input.path = kv_path
for key in widgets:
wd = widgets[key]
if wd.kv_path == kv_path:
self.load_widget(wd.name, update_kv_lang=False)
return
# if not found a widget in the path, open the first one
if len(widgets):
first_wdg = widgets[list(widgets.keys())[-1]]
self.load_widget(first_wdg.name, update_kv_lang=False)
return
show_message('No widget was found', 5, 'error')
def try_place_widget(self, widget, x, y):
'''This function is used to determine where to add the widget
:param y: new widget position
:param x: new widget position
:param widget: widget to be added
'''
x, y = self.to_local(x, y)
return self.find_target(x, y, self.root, widget)
def place_widget(self, widget, x, y, index=0, target=None):
'''This function is used to first determine the target where to add
the widget. Then it add that widget.
:param target: where this widget should be added.
If None, coordinates will be used to locate the target
:param index: index used in add_widget
:param x: widget position x
:param y: widget position y
:param widget: widget to add
'''
local_x, local_y = self.to_local(x, y)
if not target:
target = self.find_target(local_x, local_y, self.root, widget)
if not self.from_drag:
self.add_widget_to_parent(widget, target)
else:
extra_args = {'x': x, 'y': y, 'index': index}
self.add_widget_to_parent(widget, target, from_kv=True,
from_undo=True, extra_args=extra_args)
def drag_wigdet(self, widget, target, extra_args, from_undo=False):
'''This function will drag widget from one place to another inside
target
'''
extra_args['prev_x'], extra_args['prev_y'] = \
self.to_parent(self._widget_x, self._widget_y)
if isinstance(target, FloatLayout) or \
isinstance(target, ScatterLayout) or \
isinstance(target, RelativeLayout):
target.add_widget(widget, self.drag_operation[2])
widget.pos_hint = {}
widget.x, widget.y = self.to_local(extra_args['x'],
extra_args['y'])
self.from_drag = False
added = True
local_x, local_y = widget.x - target.x, widget.y - target.y
self.kv_code_input.set_property_value(
widget, 'pos_hint', "{'x': %f, 'y': %f}" % (
local_x / target.width, local_y / target.height),
'ListPropery')
if not from_undo:
self.undo_manager.push_operation(
WidgetDragOperation(widget, target,
self.drag_operation[1],
self.drag_operation[2],
self, extra_args=extra_args))
elif isinstance(target, BoxLayout) or \
isinstance(target, AnchorLayout) or \
isinstance(target, GridLayout):
target.add_widget(widget, extra_args['index'])
self.from_drag = False
added = True
if 'prev_index' in extra_args:
self.kv_code_input.shift_widget(widget,
extra_args['prev_index'])
else:
self.kv_code_input.shift_widget(widget, self.drag_operation[2])
if not from_undo:
self.undo_manager.push_operation(
WidgetDragOperation(widget, target,
self.drag_operation[1],
self.drag_operation[2],
self, extra_args=extra_args))
def add_widget_to_parent(self, widget, target, from_undo=False,
from_kv=False, kv_str='', extra_args={}):
'''This function is used to add the widget to the target.
:param from_undo: this action is comming from undo
:param target: target will receive the widget
:param widget: widget to be added
'''
added = False
if widget is None:
return False
with self.sandbox:
if target is None:
self.root = widget
self.sandbox.add_widget(widget)
widget.size = self.sandbox.size
added = True
else:
if extra_args and self.from_drag:
self.drag_wigdet(widget, target, extra_args=extra_args)
else:
target.add_widget(widget)
added = True
if not added:
return False
self.widgettree.refresh()
if not from_kv:
if not kv_str and hasattr(widget, '_KD_KV_STR'):
kv_str = widget._KD_KV_STR
del widget._KD_KV_STR
self.kv_code_input.add_widget_to_parent(widget, target,
kv_str=kv_str)
if not from_undo:
root = App.get_running_app().root
root.undo_manager.push_operation(WidgetOperation('add',
widget, target,
self, ''))
def get_widget(self, widget_name, **default_args):
'''This function is used to get the instance of class of name,
widgetname.
:param widget_name: name of the widget to be instantiated
'''
widget = None
for _widget in widgets_common:
if _widget[0] == widget_name and _widget[1] == 'custom':
app_widgets = get_current_project().app_widgets
widget = get_app_widget(app_widgets[widget_name])
break
if not widget:
try:
widget = Factory.get(widget_name)(**default_args)
except:
pass
return widget
def generate_kv_from_args(self, widget_name, kv_dict, *args):
'''Converts a dictionary to kv string
:param widget_name: name of the widget
:param kv_dict: dict with widget rules
'''
kv = widget_name + ':'
indent = '\n' + ' ' * 4
try: # check whether python knows about 'basestring'
basestring
except NameError: # no, it doesn't (it's Python3); use 'str' instead
basestring = str
for key in kv_dict.keys():
value = kv_dict[key]
if isinstance(value, basestring):
value = "'" + value + "'"
kv += indent + key + ': ' + str(value)
return kv
def get_playground_drag_element(self, instance, widget_name, touch,
default_args, extra_args, *args):
'''This function will return the desired playground element
for widget_name.
:param extra_args: extra args used to display the dragging widget
:param default_args: default widget args
:param touch: instance of the current touch
:param instance: if from toolbox, ToolboxButton instance.
None otherwise
:param widget_name: name of the widget that will be dragged
'''
# create default widget that will be added and the custom to display
widget = self.get_widget(widget_name, **default_args)
widget._KD_KV_STR = self.generate_kv_from_args(widget_name,
default_args)
values = default_args.copy()
values.update(extra_args)
child = self.get_widget(widget_name, **values)
custom = False
for op in widgets_common:
if op[0] == widget_name:
if op[1] == 'custom':
custom = True
break
container = PlaygroundDragElement(
playground=self, child=child, widget=widget)
if not custom:
container.fit_child()
touch.grab(container)
touch_pos = [touch.x, touch.y]
if instance:
touch_pos = instance.to_window(*touch.pos)
container.center_x = touch_pos[0]
container.y = touch_pos[1] + 20
return container
def cleanup(self):
'''This function is used to clean the state of Playground, cleaning
the changes done by currently opened project.
'''
# Cleanup is called when project is created or loaded
# so this operation shouldn't be recorded in Undo
if self.root:
self.remove_widget_from_parent(self.root, from_undo=True,
from_kv=True)
self.selected_widget = None
self._widget_x = -1
self._widget_y = -1
self.widget_to_paste = None
def remove_widget_from_parent(self, widget, from_undo=False,
from_kv=False):
'''This function is used to remove widget its parent.
:param from_undo: is comming from an undo action
:param widget: widget to be removed
'''
parent = None
d = get_designer()
if not widget:
return
removed_str = ''
if not from_kv:
removed_str = self.kv_code_input.remove_widget_from_parent(widget)
if widget != self.root:
parent = widget.parent
if parent is None and hasattr(widget, 'KD__last_parent'):
parent = widget.KD__last_parent
if isinstance(parent.parent, Carousel):
parent.parent.remove_widget(widget)
elif isinstance(parent, ScreenManager):
if isinstance(widget, Screen):
parent.remove_widget(widget)
else:
parent.real_remove_widget(widget)
else:
parent.remove_widget(widget)
else:
self.root.parent.remove_widget(self.root)
self.root = None
# if is designer
if hasattr(d, 'ui_creator'):
d.ui_creator.widgettree.refresh()
if not from_undo and hasattr(d, 'ui_creator'):
d.undo_manager.push_operation(
WidgetOperation('remove', widget, parent, self, removed_str))
def find_target(self, x, y, target, widget=None):
'''This widget is used to find the widget which collides with x,y
:param widget: widget to be added in target
:param target: widget to search over
:param x: position to search
:param y: position to search
'''
if target is None or not target.collide_point(x, y):
return None
x, y = target.to_local(x, y)
class_rules = get_current_project().app_widgets
for child in target.children:
is_child_custom = False
if child == widget:
continue
for rule_name in class_rules:
if rule_name == type(child).__name__:
is_child_custom = True
break
is_child_complex = False
for _widget in widgets_common:
if _widget[0] == type(child).__name__ and \
_widget[1] == 'complex':
is_child_complex = True
break
# if point lies in custom wigdet's child then return custom widget
if is_child_custom or is_child_complex:
if not widget and self._custom_widget_collides(child, x, y):
return child
elif widget:
if isinstance(child, TabbedPanel):
if child.current_tab:
_item = self.find_target(
x, y, child.current_tab.content, widget)
return _item
else:
return target
elif isinstance(child.parent, Carousel):
t = self.find_target(x, y, child, widget)
return t
else:
if not child.collide_point(x, y):
continue
if not self.allowed_target_for(child, widget) and not \
child.children:
continue
return self.find_target(x, y, child, widget)
return target
def _custom_widget_collides(self, widget, x, y):
'''This widget is used to find which custom widget collides with x,y
'''
if not widget:
return False
if widget.collide_point(x, y):
return True
x, y = widget.to_local(x, y)
for child in widget.children:
if self._custom_widget_collides(child, x, y):
return True
return False
def allowed_target_for(self, target, widget):
'''This function is used to determine if widget could be added to
target.
'''
# stop on complex widget
t = target if widget else target.parent
if isinstance(t, FileChooserListView):
return False
if isinstance(t, FileChooserIconView):
return False
# if we don't have widget, always return true
if widget is None:
return True
is_widget_layout = isinstance(widget, Layout)
is_target_layout = isinstance(target, Layout)
if is_widget_layout and is_target_layout:
return True
if is_target_layout or isinstance(target, Carousel):
return True
return False
def _keyboard_released(self, *args):
'''Called when self.keyboard is released
'''
self.keyboard.unbind(on_key_down=self._on_keyboard_down)
self.keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
'''Called when a key on keyboard is pressed
'''
if modifiers != [] and modifiers[-1] == 'ctrl':
if keycode[1] == 'c':
self.do_copy()
elif keycode[1] == 'v':
self.do_paste()
elif keycode[1] == 'x':
self.do_cut()
elif keycode[1] == 'a':
self.do_select_all()
elif keycode[1] == 'z':
self.do_undo()
elif modifiers[0] == 'shift' and keycode[1] == 'z':
self.do_redo()
elif keycode[1] == 'delete':
self.do_delete()
def do_undo(self):
'''Undoes the last operation
'''
self.undo_manager.do_undo()
def do_redo(self):
'''Undoes the last operation
'''
self.undo_manager.do_redo()
def do_copy(self, for_drag=False):
'''Copy the selected widget
'''
base_widget = self.selected_widget
if base_widget:
self.widget_to_paste = self.get_widget(type(base_widget).__name__)
props = base_widget.properties()
for prop in props:
if prop == 'id' or prop == 'children':
continue
setattr(self.widget_to_paste, prop,
getattr(base_widget, prop))
self.widget_to_paste.parent = None
widget_str = self.kv_code_input. \
get_widget_text_from_kv(base_widget, None)
if not for_drag:
widget_str = re.sub(r'\s+id:\s*[\w\d_]+', '', widget_str)
self._widget_str_to_paste = widget_str
def do_paste(self):
'''Paste the selected widget to the current widget
'''
parent = self.selected_widget
if parent and self.widget_to_paste:
d = get_current_project()
class_rules = d.app_widgets
root_widget = self.root
is_child_custom = False
for rule_name in class_rules:
if rule_name == type(parent).__name__:
is_child_custom = True
break
# find appropriate parent to add widget_to_paste
while parent:
if isinstance(parent, Layout) and (not is_child_custom
or root_widget == parent):
break
parent = parent.parent
is_child_custom = False
for rule_name in class_rules:
if rule_name == type(parent).__name__:
is_child_custom = True
break
if parent is not None:
self.add_widget_to_parent(self.widget_to_paste,
parent,
kv_str=self._widget_str_to_paste)
self.widget_to_paste = None
def do_cut(self):
'''Cuts the selected widget
'''
base_widget = self.selected_widget
if base_widget and base_widget.parent:
self.widget_to_paste = base_widget
self._widget_str_to_paste = self.kv_code_input. \
get_widget_text_from_kv(base_widget, None)
self.remove_widget_from_parent(base_widget)
def do_select_all(self):
'''Select All widgets which basically means selecting root widget
'''
self.selected_widget = self.root
App.get_running_app().focus_widget(self.root)
def do_delete(self):
'''Delete the selected widget
'''
if self.selected_widget:
self.remove_widget_from_parent(self.selected_widget)
self.selected_widget = None
def on_touch_move(self, touch):
'''Default handler for 'on_touch_move'
'''
if self.widgettree.dragging is True:
return True
super(Playground, self).on_touch_move(touch)
return False
def on_touch_up(self, touch):
'''Default handler for 'on_touch_up'
'''
if super(ScatterPlane, self).collide_point(*touch.pos):
self.dragging = False
Clock.unschedule(self.start_widget_dragging)
self.dragging = False
return super(Playground, self).on_touch_up(touch)
def undo_dragging(self):
'''To undo the last dragging operation if it has not been completed.
'''
if not self.drag_operation:
return
if self.drag_operation[0].parent:
self.drag_operation[0].parent.remove_widget(self.drag_operation[0])
try:
self.drag_operation[1].add_widget(self.drag_operation[0],
self.drag_operation[2])
except TypeError:
# some widgets not allow index
self.drag_operation[1].add_widget(self.drag_operation[0])
Clock.schedule_once(functools.partial(
App.get_running_app().focus_widget,
self.drag_operation[0]), 0.01)
self.drag_operation = []
def start_widget_dragging(self, *args):
'''This function will create PlaygroundDragElement
which will start dragging currently selected widget.
'''
if not self.dragging and not self.drag_operation \
and self.selected_widget and self.selected_widget != self.root:
# x, y = self.to_local(*touch.pos)
# target = self.find_target(x, y, self.root)
drag_widget = self.selected_widget
self._widget_x, self._widget_y = drag_widget.x, drag_widget.y
index = self.selected_widget.parent.children.index(drag_widget)
self.drag_operation = (drag_widget, drag_widget.parent, index)
self.selected_widget.parent.remove_widget(self.selected_widget)
drag_elem = App.get_running_app().create_draggable_element(
None, '', self.touch, self.selected_widget)
drag_elem.drag_type = 'dragndrop'
drag_elem.drag_parent = self.drag_operation[1]
self.dragging = True
self.from_drag = True
App.get_running_app().focus_widget(None)
def on_touch_down(self, touch):
'''An override of ScatterPlane's on_touch_down.
Used to determine the current selected widget and also emits,
on_show_edit event.
'''
if super(ScatterPlane, self).collide_point(*touch.pos) and \
not self.keyboard:
win = EventLoop.window
self.keyboard = win.request_keyboard(self._keyboard_released, self)
self.keyboard.bind(on_key_down=self._on_keyboard_down)
if super(ScatterPlane, self).collide_point(*touch.pos):
if not self.dragging:
self.touch = touch
Clock.schedule_once(self.start_widget_dragging, 0.5)
x, y = self.to_local(*touch.pos)
target = self.find_target(x, y, self.root)
self.selected_widget = target
App.get_running_app().focus_widget(target)
self.clicked = True
self.dispatch('on_show_edit', Playground)
return True
if self.parent.collide_point(*touch.pos):
super(Playground, self).on_touch_down(touch)
return False | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/goarch"
"unsafe"
)
const (
_NSIG = 33
_SI_USER = 0
_SS_DISABLE = 4
_SIG_BLOCK = 1
_SIG_UNBLOCK = 2
_SIG_SETMASK = 3
)
type mOS struct {
waitsema uint32 // semaphore for parking on locks
}
//go:noescape
func lwp_create(param *lwpparams) int32
//go:noescape
func sigaltstack(new, old *stackt)
//go:noescape
func sigaction(sig uint32, new, old *sigactiont)
//go:noescape
func sigprocmask(how int32, new, old *sigset)
//go:noescape
func setitimer(mode int32, new, old *itimerval)
//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
func raiseproc(sig uint32)
func lwp_gettid() int32
func lwp_kill(pid, tid int32, sig int)
//go:noescape
func sys_umtx_sleep(addr *uint32, val, timeout int32) int32
//go:noescape
func sys_umtx_wakeup(addr *uint32, val int32) int32
func osyield()
//go:nosplit
func osyield_no_g() {
osyield()
}
func kqueue() int32
//go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
func pipe2(flags int32) (r, w int32, errno int32)
func fcntl(fd, cmd, arg int32) (ret int32, errno int32)
func issetugid() int32
// From DragonFly's <sys/sysctl.h>
const (
_CTL_HW = 6
_HW_NCPU = 3
_HW_PAGESIZE = 7
)
var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
func getCPUCount() int32 {
mib := [2]uint32{_CTL_HW, _HW_NCPU}
out := uint32(0)
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 {
return int32(out)
}
return 1
}
func getPageSize() uintptr {
mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
out := uint32(0)
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 {
return uintptr(out)
}
return 0
}
//go:nosplit
func futexsleep(addr *uint32, val uint32, ns int64) {
systemstack(func() {
futexsleep1(addr, val, ns)
})
}
func futexsleep1(addr *uint32, val uint32, ns int64) {
var timeout int32
if ns >= 0 {
// The timeout is specified in microseconds - ensure that we
// do not end up dividing to zero, which would put us to sleep
// indefinitely...
timeout = int32(ns / 1000)
if timeout == 0 {
timeout = 1
}
}
// sys_umtx_sleep will return EWOULDBLOCK (EAGAIN) when the timeout
// expires or EBUSY if the mutex value does not match.
ret := sys_umtx_sleep(addr, int32(val), timeout)
if ret >= 0 || ret == -_EINTR || ret == -_EAGAIN || ret == -_EBUSY {
return
}
print("umtx_sleep addr=", addr, " val=", val, " ret=", ret, "\n")
*(*int32)(unsafe.Pointer(uintptr(0x1005))) = 0x1005
}
//go:nosplit
func futexwakeup(addr *uint32, cnt uint32) {
ret := sys_umtx_wakeup(addr, int32(cnt))
if ret >= 0 {
return
}
systemstack(func() {
print("umtx_wake_addr=", addr, " ret=", ret, "\n")
*(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
})
}
func lwp_start(uintptr)
// May run with m.p==nil, so write barriers are not allowed.
//
//go:nowritebarrier
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " lwp_start=", abi.FuncPCABI0(lwp_start), " id=", mp.id, " ostk=", &mp, "\n")
}
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
params := lwpparams{
start_func: abi.FuncPCABI0(lwp_start),
arg: unsafe.Pointer(mp),
stack: uintptr(stk),
tid1: nil, // minit will record tid
tid2: nil,
}
// TODO: Check for error.
retryOnEAGAIN(func() int32 {
lwp_create(¶ms)
return 0
})
sigprocmask(_SIG_SETMASK, &oset, nil)
}
func osinit() {
numCPUStartup = getCPUCount()
if physPageSize == 0 {
physPageSize = getPageSize()
}
}
var urandom_dev = []byte("/dev/urandom\x00")
//go:nosplit
func readRandom(r []byte) int {
fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
closefd(fd)
return int(n)
}
func goenvs() {
goenvs_unix()
}
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
mp.gsignal = malg(32 * 1024)
mp.gsignal.m = mp
}
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
getg().m.procid = uint64(lwp_gettid())
minitSignals()
}
// Called from dropm to undo the effect of an minit.
//
//go:nosplit
func unminit() {
unminitSignals()
getg().m.procid = 0
}
// Called from mexit, but not from dropm, to undo the effect of thread-owned
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
//
// This always runs without a P, so //go:nowritebarrierrec is required.
//
//go:nowritebarrierrec
func mdestroy(mp *m) {
}
func sigtramp()
type sigactiont struct {
sa_sigaction uintptr
sa_flags int32
sa_mask sigset
}
//go:nosplit
//go:nowritebarrierrec
func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
}
//go:nosplit
//go:nowritebarrierrec
func setsigstack(i uint32) {
throw("setsigstack")
}
//go:nosplit
//go:nowritebarrierrec
func getsig(i uint32) uintptr {
var sa sigactiont
sigaction(i, nil, &sa)
return sa.sa_sigaction
}
// setSignalstackSP sets the ss_sp field of a stackt.
//
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
s.ss_sp = sp
}
//go:nosplit
//go:nowritebarrierrec
func sigaddset(mask *sigset, i int) {
mask.__bits[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
}
func sigdelset(mask *sigset, i int) {
mask.__bits[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
}
//go:nosplit
func (c *sigctxt) fixsigcode(sig uint32) {
}
func setProcessCPUProfiler(hz int32) {
setProcessCPUProfilerTimer(hz)
}
func setThreadCPUProfiler(hz int32) {
setThreadCPUProfilerHz(hz)
}
//go:nosplit
func validSIGPROF(mp *m, c *sigctxt) bool {
return true
}
func sysargs(argc int32, argv **byte) {
n := argc + 1
// skip over argv, envp to get to auxv
for argv_index(argv, n) != nil {
n++
}
// skip NULL separator
n++
auxvp := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
pairs := sysauxv(auxvp[:])
auxv = auxvp[: pairs*2 : pairs*2]
}
const (
_AT_NULL = 0
_AT_PAGESZ = 6
)
func sysauxv(auxv []uintptr) (pairs int) {
var i int
for i = 0; auxv[i] != _AT_NULL; i += 2 {
tag, val := auxv[i], auxv[i+1]
switch tag {
case _AT_PAGESZ:
physPageSize = val
}
}
return i / 2
}
// raise sends a signal to the calling thread.
//
// It must be nosplit because it is used by the signal handler before
// it definitely has a Go stack.
//
//go:nosplit
func raise(sig uint32) {
lwp_kill(-1, lwp_gettid(), int(sig))
}
func signalM(mp *m, sig int) {
lwp_kill(-1, int32(mp.procid), sig)
}
// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
// number.
const sigPerThreadSyscall = 1 << 31
//go:nosplit
func runPerThreadSyscall() {
throw("runPerThreadSyscall only valid on linux")
} | go | github | https://github.com/golang/go | src/runtime/os_dragonfly.go |
import os
import sys
from xbmcswift2 import Plugin
import xbmcaddon
__addonname__ = 'plugin.video.rainiertamayo'
__addonpath__ = xbmcaddon.Addon(id=__addonname__).getAddonInfo('path')
# append lib directory
sys.path.append(os.path.join(__addonpath__, 'resources', 'lib'))
from rainiertamayo import RainierTamayo
plugin = Plugin()
rainiertamayo = RainierTamayo()
# TODO : Factor function get_<item>
# for item in ITEMS = ['newests', 'categories', 'series']
@plugin.route('/')
def index():
"""Display plugin's main menu."""
entries = {'Newest Videos': 'get_newests',
'Movies': 'get_movies',
'Categories': 'get_categories',
'TV Series': 'get_series'}
items = [{'label': entry,
'path': plugin.url_for(entries[entry])
} for entry in entries]
return plugin.finish(items)
@plugin.cached_route('/newests')
def get_newests():
"""Display newests videos."""
videos = rainiertamayo.get_newests()
items = [{'label': video['label'],
'path': plugin.url_for('get_video',
url=video['path']),
'thumbnail': video['thumbnail']
} for video in videos]
return plugin.finish(items)
@plugin.cached_route('/movies', TTL=30)
def get_movies():
pass
@plugin.cached_route('/categories')
def get_categories():
"""Display all available categories."""
categories = rainiertamayo.get_categories()
items = [{'label': category['label'],
'path': plugin.url_for('get_category',
category=category['label'],
page='1')
} for category in categories]
return plugin.finish(items)
@plugin.cached_route('/series', TTL=90)
def get_series():
"""Display all available series."""
series = rainiertamayo.get_series()
print(series)
items = [{'label': serie['label'],
'path': plugin.url_for('get_serie',
serie=serie['label'],
season='1',
page='1')
} for serie in series]
return plugin.finish(items)
@plugin.route('/categories/<category>/<page>')
def get_category(category, page='1'):
"""Display videos for the provided category.
:param category: category to display.
:param page: category page to display."""
videos, next_page = rainiertamayo.get_category(category, page)
items = [{'label': video['label'],
'path': plugin.url_for('get_video',
url=video['path']),
'thumbnail': video['thumbnail']
} for video in videos]
if next_page:
items.insert(0, {'label': 'Next >>',
'path': plugin.url_for('get_category',
category=category,
page=int(page)+1)
})
if int(page) > 1:
items.insert(0, {'label': '<< Previous',
'path': plugin.url_for('get_category',
category=category,
page=int(page)-1)
})
return plugin.finish(items)
@plugin.route('/series/<serie>/<season>/<page>')
def get_serie(serie, season='1', page='1'):
"""Display videos for the provided serie.
:param serie: serie to display.
:param season: season to display.
:param season: page to display."""
videos, next_page = rainiertamayo.get_serie(serie, season, page)
items = [{'label': video['label'],
'path': plugin.url_for('get_video',
url=video['path'])
} for video in videos]
if next_page:
items.insert(0, {'label': 'Next >>',
'path': plugin.url_for('get_serie',
season=season,
page=int(page)+1)
})
if page > 1:
items.insert(0, {'label': '<< Previous',
'path': plugin.url_for('get_serie',
season=season,
page=int(page)-1)
})
return plugin.finish(items)
@plugin.route('/videos/<page>')
def get_videos(page='1'):
"""Display videos for the provided page.
:param page: page to display."""
videos, next_page = get_videos(page)
items = [make_item(video) for video in videos]
if next_page:
items.insert(0, {'label': 'Next >>',
'path': plugin.url_for('show_videos', page=int(page)+1)
})
if page > 1:
items.insert(0, {'label': '<< Previous',
'path': plugin.url_for('show_videos', page=int(page)-1)
})
return plugin.finish(items)
@plugin.route('/video/<url>')
def get_video(url):
"""Display the provided video.
:param video: video to display."""
media_url = rainiertamayo.get_video(url)
plugin.log.info('Playing url: %s' % media_url)
plugin.set_resolved_url(media_url)
if __name__ == '__main__':
try:
plugin.run()
except Exception, e:
plugin.log.error(e) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package image
import (
"image/color"
"image/color/palette"
"testing"
)
type image interface {
Image
Opaque() bool
Set(int, int, color.Color)
SubImage(Rectangle) Image
}
func cmp(cm color.Model, c0, c1 color.Color) bool {
r0, g0, b0, a0 := cm.Convert(c0).RGBA()
r1, g1, b1, a1 := cm.Convert(c1).RGBA()
return r0 == r1 && g0 == g1 && b0 == b1 && a0 == a1
}
var testImages = []struct {
name string
image func() image
}{
{"rgba", func() image { return NewRGBA(Rect(0, 0, 10, 10)) }},
{"rgba64", func() image { return NewRGBA64(Rect(0, 0, 10, 10)) }},
{"nrgba", func() image { return NewNRGBA(Rect(0, 0, 10, 10)) }},
{"nrgba64", func() image { return NewNRGBA64(Rect(0, 0, 10, 10)) }},
{"alpha", func() image { return NewAlpha(Rect(0, 0, 10, 10)) }},
{"alpha16", func() image { return NewAlpha16(Rect(0, 0, 10, 10)) }},
{"gray", func() image { return NewGray(Rect(0, 0, 10, 10)) }},
{"gray16", func() image { return NewGray16(Rect(0, 0, 10, 10)) }},
{"paletted", func() image {
return NewPaletted(Rect(0, 0, 10, 10), color.Palette{
Transparent,
Opaque,
})
}},
}
func TestImage(t *testing.T) {
for _, tc := range testImages {
m := tc.image()
if !Rect(0, 0, 10, 10).Eq(m.Bounds()) {
t.Errorf("%T: want bounds %v, got %v", m, Rect(0, 0, 10, 10), m.Bounds())
continue
}
if !cmp(m.ColorModel(), Transparent, m.At(6, 3)) {
t.Errorf("%T: at (6, 3), want a zero color, got %v", m, m.At(6, 3))
continue
}
m.Set(6, 3, Opaque)
if !cmp(m.ColorModel(), Opaque, m.At(6, 3)) {
t.Errorf("%T: at (6, 3), want a non-zero color, got %v", m, m.At(6, 3))
continue
}
if !m.SubImage(Rect(6, 3, 7, 4)).(image).Opaque() {
t.Errorf("%T: at (6, 3) was not opaque", m)
continue
}
m = m.SubImage(Rect(3, 2, 9, 8)).(image)
if !Rect(3, 2, 9, 8).Eq(m.Bounds()) {
t.Errorf("%T: sub-image want bounds %v, got %v", m, Rect(3, 2, 9, 8), m.Bounds())
continue
}
if !cmp(m.ColorModel(), Opaque, m.At(6, 3)) {
t.Errorf("%T: sub-image at (6, 3), want a non-zero color, got %v", m, m.At(6, 3))
continue
}
if !cmp(m.ColorModel(), Transparent, m.At(3, 3)) {
t.Errorf("%T: sub-image at (3, 3), want a zero color, got %v", m, m.At(3, 3))
continue
}
m.Set(3, 3, Opaque)
if !cmp(m.ColorModel(), Opaque, m.At(3, 3)) {
t.Errorf("%T: sub-image at (3, 3), want a non-zero color, got %v", m, m.At(3, 3))
continue
}
// Test that taking an empty sub-image starting at a corner does not panic.
m.SubImage(Rect(0, 0, 0, 0))
m.SubImage(Rect(10, 0, 10, 0))
m.SubImage(Rect(0, 10, 0, 10))
m.SubImage(Rect(10, 10, 10, 10))
}
}
func TestNewXxxBadRectangle(t *testing.T) {
// call calls f(r) and reports whether it ran without panicking.
call := func(f func(Rectangle), r Rectangle) (ok bool) {
defer func() {
if recover() != nil {
ok = false
}
}()
f(r)
return true
}
testCases := []struct {
name string
f func(Rectangle)
}{
{"RGBA", func(r Rectangle) { NewRGBA(r) }},
{"RGBA64", func(r Rectangle) { NewRGBA64(r) }},
{"NRGBA", func(r Rectangle) { NewNRGBA(r) }},
{"NRGBA64", func(r Rectangle) { NewNRGBA64(r) }},
{"Alpha", func(r Rectangle) { NewAlpha(r) }},
{"Alpha16", func(r Rectangle) { NewAlpha16(r) }},
{"Gray", func(r Rectangle) { NewGray(r) }},
{"Gray16", func(r Rectangle) { NewGray16(r) }},
{"CMYK", func(r Rectangle) { NewCMYK(r) }},
{"Paletted", func(r Rectangle) { NewPaletted(r, color.Palette{color.Black, color.White}) }},
{"YCbCr", func(r Rectangle) { NewYCbCr(r, YCbCrSubsampleRatio422) }},
{"NYCbCrA", func(r Rectangle) { NewNYCbCrA(r, YCbCrSubsampleRatio444) }},
}
for _, tc := range testCases {
// Calling NewXxx(r) should fail (panic, since NewXxx doesn't return an
// error) unless r's width and height are both non-negative.
for _, negDx := range []bool{false, true} {
for _, negDy := range []bool{false, true} {
r := Rectangle{
Min: Point{15, 28},
Max: Point{16, 29},
}
if negDx {
r.Max.X = 14
}
if negDy {
r.Max.Y = 27
}
got := call(tc.f, r)
want := !negDx && !negDy
if got != want {
t.Errorf("New%s: negDx=%t, negDy=%t: got %t, want %t",
tc.name, negDx, negDy, got, want)
}
}
}
// Passing a Rectangle whose width and height is MaxInt should also fail
// (panic), due to overflow.
{
zeroAsUint := uint(0)
maxUint := zeroAsUint - 1
maxInt := int(maxUint / 2)
got := call(tc.f, Rectangle{
Min: Point{0, 0},
Max: Point{maxInt, maxInt},
})
if got {
t.Errorf("New%s: overflow: got ok, want !ok", tc.name)
}
}
}
}
func Test16BitsPerColorChannel(t *testing.T) {
testColorModel := []color.Model{
color.RGBA64Model,
color.NRGBA64Model,
color.Alpha16Model,
color.Gray16Model,
}
for _, cm := range testColorModel {
c := cm.Convert(color.RGBA64{0x1234, 0x1234, 0x1234, 0x1234}) // Premultiplied alpha.
r, _, _, _ := c.RGBA()
if r != 0x1234 {
t.Errorf("%T: want red value 0x%04x got 0x%04x", c, 0x1234, r)
continue
}
}
testImage := []image{
NewRGBA64(Rect(0, 0, 10, 10)),
NewNRGBA64(Rect(0, 0, 10, 10)),
NewAlpha16(Rect(0, 0, 10, 10)),
NewGray16(Rect(0, 0, 10, 10)),
}
for _, m := range testImage {
m.Set(1, 2, color.NRGBA64{0xffff, 0xffff, 0xffff, 0x1357}) // Non-premultiplied alpha.
r, _, _, _ := m.At(1, 2).RGBA()
if r != 0x1357 {
t.Errorf("%T: want red value 0x%04x got 0x%04x", m, 0x1357, r)
continue
}
}
}
func TestRGBA64Image(t *testing.T) {
// memset sets every element of s to v.
memset := func(s []byte, v byte) {
for i := range s {
s[i] = v
}
}
r := Rect(0, 0, 3, 2)
testCases := []Image{
NewAlpha(r),
NewAlpha16(r),
NewCMYK(r),
NewGray(r),
NewGray16(r),
NewNRGBA(r),
NewNRGBA64(r),
NewNYCbCrA(r, YCbCrSubsampleRatio444),
NewPaletted(r, palette.Plan9),
NewRGBA(r),
NewRGBA64(r),
NewUniform(color.RGBA64{}),
NewYCbCr(r, YCbCrSubsampleRatio444),
r,
}
for _, tc := range testCases {
switch tc := tc.(type) {
// Most of the concrete image types in the testCases implement the
// draw.RGBA64Image interface: they have a SetRGBA64 method. We use an
// interface literal here, instead of importing "image/draw", to avoid
// an import cycle.
//
// The YCbCr and NYCbCrA types are special-cased. Chroma subsampling
// means that setting one pixel can modify neighboring pixels. They
// don't have Set or SetRGBA64 methods because that side effect could
// be surprising. Here, we just memset the channel buffers instead.
//
// The Uniform and Rectangle types are also special-cased, as they
// don't have a Set or SetRGBA64 method.
case interface {
SetRGBA64(x, y int, c color.RGBA64)
}:
tc.SetRGBA64(1, 1, color.RGBA64{0x7FFF, 0x3FFF, 0x0000, 0x7FFF})
case *NYCbCrA:
memset(tc.YCbCr.Y, 0x77)
memset(tc.YCbCr.Cb, 0x88)
memset(tc.YCbCr.Cr, 0x99)
memset(tc.A, 0xAA)
case *Uniform:
tc.C = color.RGBA64{0x7FFF, 0x3FFF, 0x0000, 0x7FFF}
case *YCbCr:
memset(tc.Y, 0x77)
memset(tc.Cb, 0x88)
memset(tc.Cr, 0x99)
case Rectangle:
// No-op. Rectangle pixels' colors are immutable. They're always
// color.Opaque.
default:
t.Errorf("could not initialize pixels for %T", tc)
continue
}
// Check that RGBA64At(x, y) is equivalent to At(x, y).RGBA().
rgba64Image, ok := tc.(RGBA64Image)
if !ok {
t.Errorf("%T is not an RGBA64Image", tc)
continue
}
got := rgba64Image.RGBA64At(1, 1)
wantR, wantG, wantB, wantA := tc.At(1, 1).RGBA()
if (uint32(got.R) != wantR) || (uint32(got.G) != wantG) ||
(uint32(got.B) != wantB) || (uint32(got.A) != wantA) {
t.Errorf("%T:\ngot (0x%04X, 0x%04X, 0x%04X, 0x%04X)\n"+
"want (0x%04X, 0x%04X, 0x%04X, 0x%04X)", tc,
got.R, got.G, got.B, got.A,
wantR, wantG, wantB, wantA)
continue
}
}
}
func BenchmarkAt(b *testing.B) {
for _, tc := range testImages {
b.Run(tc.name, func(b *testing.B) {
m := tc.image()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.At(4, 5)
}
})
}
}
func BenchmarkSet(b *testing.B) {
c := color.Gray{0xff}
for _, tc := range testImages {
b.Run(tc.name, func(b *testing.B) {
m := tc.image()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Set(4, 5, c)
}
})
}
}
func BenchmarkRGBAAt(b *testing.B) {
m := NewRGBA(Rect(0, 0, 10, 10))
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.RGBAAt(4, 5)
}
}
func BenchmarkRGBASetRGBA(b *testing.B) {
m := NewRGBA(Rect(0, 0, 10, 10))
c := color.RGBA{0xff, 0xff, 0xff, 0x13}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SetRGBA(4, 5, c)
}
}
func BenchmarkRGBA64At(b *testing.B) {
m := NewRGBA64(Rect(0, 0, 10, 10))
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.RGBA64At(4, 5)
}
}
func BenchmarkRGBA64SetRGBA64(b *testing.B) {
m := NewRGBA64(Rect(0, 0, 10, 10))
c := color.RGBA64{0xffff, 0xffff, 0xffff, 0x1357}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SetRGBA64(4, 5, c)
}
}
func BenchmarkNRGBAAt(b *testing.B) {
m := NewNRGBA(Rect(0, 0, 10, 10))
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.NRGBAAt(4, 5)
}
}
func BenchmarkNRGBASetNRGBA(b *testing.B) {
m := NewNRGBA(Rect(0, 0, 10, 10))
c := color.NRGBA{0xff, 0xff, 0xff, 0x13}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SetNRGBA(4, 5, c)
}
}
func BenchmarkNRGBA64At(b *testing.B) {
m := NewNRGBA64(Rect(0, 0, 10, 10))
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.NRGBA64At(4, 5)
}
}
func BenchmarkNRGBA64SetNRGBA64(b *testing.B) {
m := NewNRGBA64(Rect(0, 0, 10, 10))
c := color.NRGBA64{0xffff, 0xffff, 0xffff, 0x1357}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SetNRGBA64(4, 5, c)
}
}
func BenchmarkAlphaAt(b *testing.B) {
m := NewAlpha(Rect(0, 0, 10, 10))
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.AlphaAt(4, 5)
}
}
func BenchmarkAlphaSetAlpha(b *testing.B) {
m := NewAlpha(Rect(0, 0, 10, 10))
c := color.Alpha{0x13}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SetAlpha(4, 5, c)
}
}
func BenchmarkAlpha16At(b *testing.B) {
m := NewAlpha16(Rect(0, 0, 10, 10))
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Alpha16At(4, 5)
}
}
func BenchmarkAlphaSetAlpha16(b *testing.B) {
m := NewAlpha16(Rect(0, 0, 10, 10))
c := color.Alpha16{0x13}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SetAlpha16(4, 5, c)
}
}
func BenchmarkGrayAt(b *testing.B) {
m := NewGray(Rect(0, 0, 10, 10))
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.GrayAt(4, 5)
}
}
func BenchmarkGraySetGray(b *testing.B) {
m := NewGray(Rect(0, 0, 10, 10))
c := color.Gray{0x13}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SetGray(4, 5, c)
}
}
func BenchmarkGray16At(b *testing.B) {
m := NewGray16(Rect(0, 0, 10, 10))
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Gray16At(4, 5)
}
}
func BenchmarkGraySetGray16(b *testing.B) {
m := NewGray16(Rect(0, 0, 10, 10))
c := color.Gray16{0x13}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SetGray16(4, 5, c)
}
} | go | github | https://github.com/golang/go | src/image/image_test.go |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"encoding/json"
"errors"
"fmt"
"sort"
"strings"
"time"
"github.com/hashicorp/cli"
"github.com/hashicorp/vault/api"
"github.com/posener/complete"
"github.com/ryanuber/columnize"
)
var (
_ cli.Command = (*OperatorUsageCommand)(nil)
_ cli.CommandAutocomplete = (*OperatorUsageCommand)(nil)
)
type OperatorUsageCommand struct {
*BaseCommand
flagStartTime time.Time
flagEndTime time.Time
}
func (c *OperatorUsageCommand) Synopsis() string {
return "Lists historical client counts"
}
func (c *OperatorUsageCommand) Help() string {
helpText := `
Usage: vault operator usage
List the client counts for the default reporting period.
$ vault operator usage
List the client counts for a specific time period.
$ vault operator usage -start-time=2020-10 -end-time=2020-11
` + c.Flags().Help()
return strings.TrimSpace(helpText)
}
func (c *OperatorUsageCommand) Flags() *FlagSets {
set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat)
f := set.NewFlagSet("Command Options")
f.TimeVar(&TimeVar{
Name: "start-time",
Usage: "Start of report period. Defaults to billing start time",
Target: &c.flagStartTime,
Completion: complete.PredictNothing,
Default: time.Time{},
Formats: TimeVar_TimeOrDay | TimeVar_Month,
})
f.TimeVar(&TimeVar{
Name: "end-time",
Usage: "End of report period. Defaults to end of the current month.",
Target: &c.flagEndTime,
Completion: complete.PredictNothing,
Default: time.Time{},
Formats: TimeVar_TimeOrDay | TimeVar_Month,
})
return set
}
func (c *OperatorUsageCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictAnything
}
func (c *OperatorUsageCommand) AutocompleteFlags() complete.Flags {
return c.Flags().Completions()
}
func (c *OperatorUsageCommand) Run(args []string) int {
f := c.Flags()
if err := f.Parse(args); err != nil {
c.UI.Error(err.Error())
return 1
}
data := make(map[string][]string)
if !c.flagStartTime.IsZero() {
data["start_time"] = []string{c.flagStartTime.Format(time.RFC3339)}
}
if !c.flagEndTime.IsZero() {
data["end_time"] = []string{c.flagEndTime.Format(time.RFC3339)}
}
client, err := c.Client()
if err != nil {
c.UI.Error(err.Error())
return 2
}
resp, err := client.Logical().ReadWithData("sys/internal/counters/activity", data)
if err != nil {
c.UI.Error(fmt.Sprintf("Error retrieving client counts: %v", err))
return 2
}
if resp == nil || resp.Data == nil {
if c.noReportAvailable(client) {
c.UI.Warn("Vault does not have any usage data available. A report will be available\n" +
"after the first calendar month in which monitoring is enabled.")
} else {
c.UI.Warn("No data is available for the given time range.")
}
// No further output
return 0
}
switch Format(c.UI) {
case "table":
default:
// Handle JSON, YAML, etc.
return OutputData(c.UI, resp)
}
// Show this before the headers
c.outputTimestamps(resp.Data)
out := []string{
"Namespace path | Entity Clients | Non-Entity clients | Secret syncs | ACME clients | Active clients",
}
out = append(out, c.namespacesOutput(resp.Data)...)
out = append(out, c.totalOutput(resp.Data)...)
colConfig := columnize.DefaultConfig()
colConfig.Empty = " " // Do not show n/a on intentional blank lines
colConfig.Glue = " "
c.UI.Output(tableOutput(out, colConfig))
// Also, output the warnings returned, if any:
for _, warning := range resp.Warnings {
c.UI.Warn(warning)
}
return 0
}
// noReportAvailable checks whether we can definitively say that no
// queries can be answered; if there's an error, just fall back to
// reporting that the response is empty.
func (c *OperatorUsageCommand) noReportAvailable(client *api.Client) bool {
if c.flagOutputCurlString || c.flagOutputPolicy {
// Don't mess up the original query string
return false
}
resp, err := client.Logical().Read("sys/internal/counters/config")
if err != nil || resp == nil || resp.Data == nil {
c.UI.Warn("bad response from config")
return false
}
qaRaw, ok := resp.Data["queries_available"]
if !ok {
c.UI.Warn("no queries_available key")
return false
}
qa, ok := qaRaw.(bool)
if !ok {
c.UI.Warn("wrong type")
return false
}
return !qa
}
func (c *OperatorUsageCommand) outputTimestamps(data map[string]interface{}) {
c.UI.Output(fmt.Sprintf("Period start: %v\nPeriod end: %v\n",
data["start_time"].(string),
data["end_time"].(string)))
}
type UsageCommandNamespace struct {
formattedLine string
sortOrder string
// Sort order:
// -- root first
// -- namespaces in lexicographic order
// -- deleted namespace "xxxxx" last
}
type UsageResponse struct {
namespacePath string
entityCount int64
// As per 1.9, the tokenCount field will contain the distinct non-entity
// token clients instead of each individual token.
tokenCount int64
secretSyncs int64
acmeCount int64
clientCount int64
}
func jsonNumberOK(m map[string]interface{}, key string) (int64, bool) {
val, ok := m[key].(json.Number)
if !ok {
return 0, false
}
intVal, err := val.Int64()
if err != nil {
return 0, false
}
return intVal, true
}
// TODO: provide a function in the API module for doing this conversion?
func (c *OperatorUsageCommand) parseNamespaceCount(rawVal interface{}) (UsageResponse, error) {
var ret UsageResponse
val, ok := rawVal.(map[string]interface{})
if !ok {
return ret, errors.New("value is not a map")
}
ret.namespacePath, ok = val["namespace_path"].(string)
if !ok {
return ret, errors.New("bad namespace path")
}
counts, ok := val["counts"].(map[string]interface{})
if !ok {
return ret, errors.New("missing counts")
}
ret.entityCount, ok = jsonNumberOK(counts, "entity_clients")
if !ok {
return ret, errors.New("missing entity_clients")
}
ret.tokenCount, ok = jsonNumberOK(counts, "non_entity_clients")
if !ok {
return ret, errors.New("missing non_entity_clients")
}
// don't error if the secret syncs key is missing
ret.secretSyncs, _ = jsonNumberOK(counts, "secret_syncs")
// don't error if acme clients is missing
ret.acmeCount, _ = jsonNumberOK(counts, "acme_clients")
ret.clientCount, ok = jsonNumberOK(counts, "clients")
if !ok {
return ret, errors.New("missing clients")
}
return ret, nil
}
func (c *OperatorUsageCommand) namespacesOutput(data map[string]interface{}) []string {
byNs, ok := data["by_namespace"].([]interface{})
if !ok {
c.UI.Error("missing namespace breakdown in response")
return nil
}
nsOut := make([]UsageCommandNamespace, 0, len(byNs))
for _, rawVal := range byNs {
val, err := c.parseNamespaceCount(rawVal)
if err != nil {
c.UI.Error(fmt.Sprintf("malformed namespace in response: %v", err))
continue
}
sortOrder := "1" + val.namespacePath
if val.namespacePath == "" {
val.namespacePath = "[root]"
sortOrder = "0"
} else if strings.HasPrefix(val.namespacePath, "deleted namespace") {
sortOrder = "2" + val.namespacePath
}
formattedLine := fmt.Sprintf("%s | %d | %d | %d | %d | %d",
val.namespacePath, val.entityCount, val.tokenCount, val.secretSyncs, val.acmeCount, val.clientCount)
nsOut = append(nsOut, UsageCommandNamespace{
formattedLine: formattedLine,
sortOrder: sortOrder,
})
}
sort.Slice(nsOut, func(i, j int) bool {
return nsOut[i].sortOrder < nsOut[j].sortOrder
})
out := make([]string, len(nsOut))
for i := range nsOut {
out[i] = nsOut[i].formattedLine
}
return out
}
func (c *OperatorUsageCommand) totalOutput(data map[string]interface{}) []string {
// blank line separating it from namespaces
out := []string{" | | | | | "}
total, ok := data["total"].(map[string]interface{})
if !ok {
c.UI.Error("missing total in response")
return out
}
entityCount, ok := jsonNumberOK(total, "entity_clients")
if !ok {
c.UI.Error("missing entity_clients in total")
return out
}
tokenCount, ok := jsonNumberOK(total, "non_entity_clients")
if !ok {
c.UI.Error("missing non_entity_clients in total")
return out
}
// don't error if secret syncs key is missing
secretSyncs, _ := jsonNumberOK(total, "secret_syncs")
// don't error if acme clients is missing
acmeCount, _ := jsonNumberOK(total, "acme_clients")
clientCount, ok := jsonNumberOK(total, "clients")
if !ok {
c.UI.Error("missing clients in total")
return out
}
out = append(out, fmt.Sprintf("Total | %d | %d | %d | %d | %d",
entityCount, tokenCount, secretSyncs, acmeCount, clientCount))
return out
} | go | github | https://github.com/hashicorp/vault | command/operator_usage.go |
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#ifndef _NGX_MAIL_H_INCLUDED_
#define _NGX_MAIL_H_INCLUDED_
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_event.h>
#include <ngx_event_connect.h>
#if (NGX_MAIL_SSL)
#include <ngx_mail_ssl_module.h>
#endif
typedef struct {
void **main_conf;
void **srv_conf;
} ngx_mail_conf_ctx_t;
typedef struct {
struct sockaddr *sockaddr;
socklen_t socklen;
ngx_str_t addr_text;
/* server ctx */
ngx_mail_conf_ctx_t *ctx;
unsigned bind:1;
unsigned wildcard:1;
unsigned ssl:1;
#if (NGX_HAVE_INET6)
unsigned ipv6only:1;
#endif
unsigned so_keepalive:2;
unsigned proxy_protocol:1;
#if (NGX_HAVE_KEEPALIVE_TUNABLE)
int tcp_keepidle;
int tcp_keepintvl;
int tcp_keepcnt;
#endif
int backlog;
int rcvbuf;
int sndbuf;
} ngx_mail_listen_t;
typedef struct {
ngx_mail_conf_ctx_t *ctx;
ngx_str_t addr_text;
unsigned ssl:1;
unsigned proxy_protocol:1;
} ngx_mail_addr_conf_t;
typedef struct {
in_addr_t addr;
ngx_mail_addr_conf_t conf;
} ngx_mail_in_addr_t;
#if (NGX_HAVE_INET6)
typedef struct {
struct in6_addr addr6;
ngx_mail_addr_conf_t conf;
} ngx_mail_in6_addr_t;
#endif
typedef struct {
/* ngx_mail_in_addr_t or ngx_mail_in6_addr_t */
void *addrs;
ngx_uint_t naddrs;
} ngx_mail_port_t;
typedef struct {
int family;
in_port_t port;
ngx_array_t addrs; /* array of ngx_mail_conf_addr_t */
} ngx_mail_conf_port_t;
typedef struct {
ngx_mail_listen_t opt;
} ngx_mail_conf_addr_t;
typedef struct {
ngx_array_t servers; /* ngx_mail_core_srv_conf_t */
ngx_array_t listen; /* ngx_mail_listen_t */
} ngx_mail_core_main_conf_t;
#define NGX_MAIL_POP3_PROTOCOL 0
#define NGX_MAIL_IMAP_PROTOCOL 1
#define NGX_MAIL_SMTP_PROTOCOL 2
typedef struct ngx_mail_protocol_s ngx_mail_protocol_t;
typedef struct {
ngx_mail_protocol_t *protocol;
ngx_msec_t timeout;
ngx_msec_t resolver_timeout;
ngx_uint_t max_errors;
ngx_str_t server_name;
u_char *file_name;
ngx_uint_t line;
ngx_resolver_t *resolver;
ngx_log_t *error_log;
/* server ctx */
ngx_mail_conf_ctx_t *ctx;
ngx_uint_t listen; /* unsigned listen:1; */
} ngx_mail_core_srv_conf_t;
typedef enum {
ngx_pop3_start = 0,
ngx_pop3_user,
ngx_pop3_passwd,
ngx_pop3_auth_login_username,
ngx_pop3_auth_login_password,
ngx_pop3_auth_plain,
ngx_pop3_auth_cram_md5,
ngx_pop3_auth_external
} ngx_pop3_state_e;
typedef enum {
ngx_imap_start = 0,
ngx_imap_auth_login_username,
ngx_imap_auth_login_password,
ngx_imap_auth_plain,
ngx_imap_auth_cram_md5,
ngx_imap_auth_external,
ngx_imap_login,
ngx_imap_user,
ngx_imap_passwd
} ngx_imap_state_e;
typedef enum {
ngx_smtp_start = 0,
ngx_smtp_auth_login_username,
ngx_smtp_auth_login_password,
ngx_smtp_auth_plain,
ngx_smtp_auth_cram_md5,
ngx_smtp_auth_external,
ngx_smtp_helo,
ngx_smtp_helo_xclient,
ngx_smtp_helo_auth,
ngx_smtp_helo_from,
ngx_smtp_xclient,
ngx_smtp_xclient_from,
ngx_smtp_xclient_helo,
ngx_smtp_xclient_auth,
ngx_smtp_from,
ngx_smtp_to
} ngx_smtp_state_e;
typedef struct {
ngx_peer_connection_t upstream;
ngx_buf_t *buffer;
ngx_uint_t proxy_protocol; /* unsigned proxy_protocol:1; */
} ngx_mail_proxy_ctx_t;
typedef struct {
uint32_t signature; /* "MAIL" */
ngx_connection_t *connection;
ngx_str_t out;
ngx_buf_t *buffer;
void **ctx;
void **main_conf;
void **srv_conf;
ngx_resolver_ctx_t *resolver_ctx;
ngx_mail_proxy_ctx_t *proxy;
ngx_uint_t mail_state;
unsigned ssl:1;
unsigned protocol:3;
unsigned blocked:1;
unsigned quit:1;
unsigned quoted:1;
unsigned backslash:1;
unsigned no_sync_literal:1;
unsigned starttls:1;
unsigned esmtp:1;
unsigned auth_method:3;
unsigned auth_wait:1;
ngx_str_t login;
ngx_str_t passwd;
ngx_str_t salt;
ngx_str_t tag;
ngx_str_t tagged_line;
ngx_str_t text;
ngx_str_t *addr_text;
ngx_str_t host;
ngx_str_t smtp_helo;
ngx_str_t smtp_from;
ngx_str_t smtp_to;
ngx_str_t cmd;
ngx_uint_t command;
ngx_array_t args;
ngx_uint_t errors;
ngx_uint_t login_attempt;
/* used to parse POP3/IMAP/SMTP command */
ngx_uint_t state;
u_char *tag_start;
u_char *cmd_start;
u_char *arg_start;
ngx_uint_t literal_len;
} ngx_mail_session_t;
typedef struct {
ngx_str_t *client;
ngx_mail_session_t *session;
} ngx_mail_log_ctx_t;
#define NGX_POP3_USER 1
#define NGX_POP3_PASS 2
#define NGX_POP3_CAPA 3
#define NGX_POP3_QUIT 4
#define NGX_POP3_NOOP 5
#define NGX_POP3_STLS 6
#define NGX_POP3_APOP 7
#define NGX_POP3_AUTH 8
#define NGX_POP3_STAT 9
#define NGX_POP3_LIST 10
#define NGX_POP3_RETR 11
#define NGX_POP3_DELE 12
#define NGX_POP3_RSET 13
#define NGX_POP3_TOP 14
#define NGX_POP3_UIDL 15
#define NGX_IMAP_LOGIN 1
#define NGX_IMAP_LOGOUT 2
#define NGX_IMAP_CAPABILITY 3
#define NGX_IMAP_NOOP 4
#define NGX_IMAP_STARTTLS 5
#define NGX_IMAP_NEXT 6
#define NGX_IMAP_AUTHENTICATE 7
#define NGX_SMTP_HELO 1
#define NGX_SMTP_EHLO 2
#define NGX_SMTP_AUTH 3
#define NGX_SMTP_QUIT 4
#define NGX_SMTP_NOOP 5
#define NGX_SMTP_MAIL 6
#define NGX_SMTP_RSET 7
#define NGX_SMTP_RCPT 8
#define NGX_SMTP_DATA 9
#define NGX_SMTP_VRFY 10
#define NGX_SMTP_EXPN 11
#define NGX_SMTP_HELP 12
#define NGX_SMTP_STARTTLS 13
#define NGX_MAIL_AUTH_PLAIN 0
#define NGX_MAIL_AUTH_LOGIN 1
#define NGX_MAIL_AUTH_LOGIN_USERNAME 2
#define NGX_MAIL_AUTH_APOP 3
#define NGX_MAIL_AUTH_CRAM_MD5 4
#define NGX_MAIL_AUTH_EXTERNAL 5
#define NGX_MAIL_AUTH_NONE 6
#define NGX_MAIL_AUTH_PLAIN_ENABLED 0x0002
#define NGX_MAIL_AUTH_LOGIN_ENABLED 0x0004
#define NGX_MAIL_AUTH_APOP_ENABLED 0x0008
#define NGX_MAIL_AUTH_CRAM_MD5_ENABLED 0x0010
#define NGX_MAIL_AUTH_EXTERNAL_ENABLED 0x0020
#define NGX_MAIL_AUTH_NONE_ENABLED 0x0040
#define NGX_MAIL_PARSE_INVALID_COMMAND 20
typedef void (*ngx_mail_init_session_pt)(ngx_mail_session_t *s,
ngx_connection_t *c);
typedef void (*ngx_mail_init_protocol_pt)(ngx_event_t *rev);
typedef void (*ngx_mail_auth_state_pt)(ngx_event_t *rev);
typedef ngx_int_t (*ngx_mail_parse_command_pt)(ngx_mail_session_t *s);
struct ngx_mail_protocol_s {
ngx_str_t name;
ngx_str_t alpn;
in_port_t port[4];
ngx_uint_t type;
ngx_mail_init_session_pt init_session;
ngx_mail_init_protocol_pt init_protocol;
ngx_mail_parse_command_pt parse_command;
ngx_mail_auth_state_pt auth_state;
ngx_str_t internal_server_error;
ngx_str_t cert_error;
ngx_str_t no_cert;
};
typedef struct {
ngx_mail_protocol_t *protocol;
void *(*create_main_conf)(ngx_conf_t *cf);
char *(*init_main_conf)(ngx_conf_t *cf, void *conf);
void *(*create_srv_conf)(ngx_conf_t *cf);
char *(*merge_srv_conf)(ngx_conf_t *cf, void *prev,
void *conf);
} ngx_mail_module_t;
#define NGX_MAIL_MODULE 0x4C49414D /* "MAIL" */
#define NGX_MAIL_MAIN_CONF 0x02000000
#define NGX_MAIL_SRV_CONF 0x04000000
#define NGX_MAIL_MAIN_CONF_OFFSET offsetof(ngx_mail_conf_ctx_t, main_conf)
#define NGX_MAIL_SRV_CONF_OFFSET offsetof(ngx_mail_conf_ctx_t, srv_conf)
#define ngx_mail_get_module_ctx(s, module) (s)->ctx[module.ctx_index]
#define ngx_mail_set_ctx(s, c, module) s->ctx[module.ctx_index] = c;
#define ngx_mail_delete_ctx(s, module) s->ctx[module.ctx_index] = NULL;
#define ngx_mail_get_module_main_conf(s, module) \
(s)->main_conf[module.ctx_index]
#define ngx_mail_get_module_srv_conf(s, module) (s)->srv_conf[module.ctx_index]
#define ngx_mail_conf_get_module_main_conf(cf, module) \
((ngx_mail_conf_ctx_t *) cf->ctx)->main_conf[module.ctx_index]
#define ngx_mail_conf_get_module_srv_conf(cf, module) \
((ngx_mail_conf_ctx_t *) cf->ctx)->srv_conf[module.ctx_index]
#if (NGX_MAIL_SSL)
void ngx_mail_starttls_handler(ngx_event_t *rev);
ngx_int_t ngx_mail_starttls_only(ngx_mail_session_t *s, ngx_connection_t *c);
#endif
void ngx_mail_init_connection(ngx_connection_t *c);
ngx_int_t ngx_mail_salt(ngx_mail_session_t *s, ngx_connection_t *c,
ngx_mail_core_srv_conf_t *cscf);
ngx_int_t ngx_mail_auth_plain(ngx_mail_session_t *s, ngx_connection_t *c,
ngx_uint_t n);
ngx_int_t ngx_mail_auth_login_username(ngx_mail_session_t *s,
ngx_connection_t *c, ngx_uint_t n);
ngx_int_t ngx_mail_auth_login_password(ngx_mail_session_t *s,
ngx_connection_t *c);
ngx_int_t ngx_mail_auth_cram_md5_salt(ngx_mail_session_t *s,
ngx_connection_t *c, char *prefix, size_t len);
ngx_int_t ngx_mail_auth_cram_md5(ngx_mail_session_t *s, ngx_connection_t *c);
ngx_int_t ngx_mail_auth_external(ngx_mail_session_t *s, ngx_connection_t *c,
ngx_uint_t n);
ngx_int_t ngx_mail_auth_parse(ngx_mail_session_t *s, ngx_connection_t *c);
void ngx_mail_send(ngx_event_t *wev);
ngx_int_t ngx_mail_read_command(ngx_mail_session_t *s, ngx_connection_t *c);
void ngx_mail_auth(ngx_mail_session_t *s, ngx_connection_t *c);
void ngx_mail_close_connection(ngx_connection_t *c);
void ngx_mail_session_internal_server_error(ngx_mail_session_t *s);
u_char *ngx_mail_log_error(ngx_log_t *log, u_char *buf, size_t len);
char *ngx_mail_capabilities(ngx_conf_t *cf, ngx_command_t *cmd, void *conf);
/* STUB */
void ngx_mail_proxy_init(ngx_mail_session_t *s, ngx_addr_t *peer);
void ngx_mail_auth_http_init(ngx_mail_session_t *s);
ngx_int_t ngx_mail_realip_handler(ngx_mail_session_t *s);
/**/
extern ngx_uint_t ngx_mail_max_module;
extern ngx_module_t ngx_mail_core_module;
#endif /* _NGX_MAIL_H_INCLUDED_ */ | c | github | https://github.com/nginx/nginx | src/mail/ngx_mail.h |
use crate::future::maybe_done::{maybe_done, MaybeDone};
use pin_project_lite::pin_project;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
pub(crate) fn try_join3<T1, F1, T2, F2, T3, F3, E>(
future1: F1,
future2: F2,
future3: F3,
) -> TryJoin3<F1, F2, F3>
where
F1: Future<Output = Result<T1, E>>,
F2: Future<Output = Result<T2, E>>,
F3: Future<Output = Result<T3, E>>,
{
TryJoin3 {
future1: maybe_done(future1),
future2: maybe_done(future2),
future3: maybe_done(future3),
}
}
pin_project! {
pub(crate) struct TryJoin3<F1, F2, F3>
where
F1: Future,
F2: Future,
F3: Future,
{
#[pin]
future1: MaybeDone<F1>,
#[pin]
future2: MaybeDone<F2>,
#[pin]
future3: MaybeDone<F3>,
}
}
impl<T1, F1, T2, F2, T3, F3, E> Future for TryJoin3<F1, F2, F3>
where
F1: Future<Output = Result<T1, E>>,
F2: Future<Output = Result<T2, E>>,
F3: Future<Output = Result<T3, E>>,
{
type Output = Result<(T1, T2, T3), E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut all_done = true;
let mut me = self.project();
if me.future1.as_mut().poll(cx).is_pending() {
all_done = false;
} else if me.future1.as_mut().output_mut().unwrap().is_err() {
return Poll::Ready(Err(me.future1.take_output().unwrap().err().unwrap()));
}
if me.future2.as_mut().poll(cx).is_pending() {
all_done = false;
} else if me.future2.as_mut().output_mut().unwrap().is_err() {
return Poll::Ready(Err(me.future2.take_output().unwrap().err().unwrap()));
}
if me.future3.as_mut().poll(cx).is_pending() {
all_done = false;
} else if me.future3.as_mut().output_mut().unwrap().is_err() {
return Poll::Ready(Err(me.future3.take_output().unwrap().err().unwrap()));
}
if all_done {
Poll::Ready(Ok((
me.future1.take_output().unwrap().ok().unwrap(),
me.future2.take_output().unwrap().ok().unwrap(),
me.future3.take_output().unwrap().ok().unwrap(),
)))
} else {
Poll::Pending
}
}
} | rust | github | https://github.com/tokio-rs/tokio | tokio/src/future/try_join.rs |
# -*- coding: utf-8 -*-
# (c) 2012 Canonical Ltd.
#
# Authors: Alberto Milone <alberto.milone@canonical.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from glob import glob
import os
import sys
import tempfile
import logging
import xkit.xutils
import xkit.xorgparser
import Quirks.quirkreader
import Quirks.quirkinfo
class QuirkChecker:
def __init__(self, handler, path='/usr/share/jockey/quirks'):
self._handler = handler
self.quirks_path = path
self._quirks = []
self.get_quirks_from_path()
self._system_info = self.get_system_info()
self._xorg_conf_d_path = '/usr/share/X11/xorg.conf.d'
def get_quirks_from_path(self):
'''check all the files in a directory looking for quirks'''
self._quirks = []
if os.path.isdir(self.quirks_path):
for f in glob(os.path.join(self.quirks_path, '*')):
if os.path.isfile(f):
logging.debug('Parsing %s' % f)
quirks = self.get_quirks_from_file(f)
self._quirks += quirks
else:
logging.debug('%s does not exist' % self.quirks_path)
return self._quirks
def get_quirks_from_file(self, quirk_file):
'''check all the files in a directory looking for quirks'''
# read other blacklist files (which we will not touch, but evaluate)
quirk_file = Quirks.quirkreader.ReadQuirk(quirk_file)
return quirk_file.get_quirks()
def get_system_info(self):
'''Get system info for the quirk'''
quirk_info = Quirks.quirkinfo.QuirkInfo()
return quirk_info.get_dmi_info()
def matches_tags(self, quirk):
'''See if tags match system info'''
result = True
for tag in quirk.match_tags.keys():
for val in quirk.match_tags[tag]:
if (self._system_info.get(tag) and self._system_info.get(tag) != val
and len(quirk.match_tags[tag]) <= 1):
logging.debug('Failure to match %s with %s' %
(self._system_info.get(tag), val))
return False
logging.debug('Success')
return result
def _check_quirks(self, enable=True):
'''Process quirks and do something with them'''
for quirk in self._quirks:
if self._handler.lower() in [x.lower().strip() for x in quirk.handler]:
logging.debug('Processing quirk %s' % quirk.id)
if self.matches_tags(quirk):
# Do something here
if enable:
logging.info('Applying quirk %s' % quirk.id)
self._apply_quirk(quirk)
else:
logging.info('Unapplying quirk %s' % quirk.id)
self._unapply_quirk(quirk)
else:
logging.debug('Quirk doesn\'t match')
def enable_quirks(self):
'''Enable all quirks for a handler'''
self._check_quirks(True)
def disable_quirks(self):
'''Disable all quirks for a handler'''
self._check_quirks(False)
def _get_destination_path(self, quirk):
'''Return the path to the X config file'''
return '%s/10-%s-%s.conf' % (self._xorg_conf_d_path,
self._handler, quirk.id.lower().replace(' ', '-'))
def _apply_quirk(self, quirk):
'''Get the xorg snippet and apply it'''
# Get the relevant x_snippet
# Write conf file to /usr/share/X11/xorg.conf.d/file.conf
destination = self._get_destination_path(quirk)
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
tmp_file.write(quirk.x_snippet)
tmp_file.close()
tmp_xkit = xkit.xorgparser.Parser(tmp_file.name)
# TODO: REMOVE THIS
logging.debug(tmp_xkit.globaldict)
os.unlink(tmp_file.name)
try:
logging.debug('Creating %s' % destination)
tmp_xkit.write(destination)
except IOError:
logging.exception('Error during write()')
return False
return True
def _unapply_quirk(self, quirk):
'''Remove the file with the xorg snippet'''
# Get the relevant x_snippet
# Write conf file to /usr/share/X11/xorg.conf.d/file.conf
destination = self._get_destination_path(quirk)
logging.debug('Removing %s ...' % destination)
try:
os.unlink(destination)
except (OSError, IOError):
logging.exception('Cannot unlink destination')
return False
return True
def main():
a = QuirkChecker('nvidia', path='/home/alberto/oem/jockey/quirks')
a.enable_quirks()
a.disable_quirks()
print(os.path.abspath( __file__ ))
#quirk_file = ReadQuirk("quirk_snippet.txt")
#quirks = quirk_file.get_quirks()
#for quirk in quirks:
#print 'Quirk id: "%s"' % quirk.id
#for tag in quirk.match_tags.keys():
#print 'Matching "%s" with value "%s"' % (tag, quirk.match_tags[tag])
#print quirk.x_snippet
#tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
#tmp_file.write(quirk.x_snippet)
#tmp_file.close()
#tmp_xkit = xkit.xorgparser.Parser(tmp_file.name)
#print tmp_xkit.globaldict
#os.unlink(tmp_file.name)
return 0
#if __name__ == '__main__':
#main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from heatclient import exc as heat_exc
from oslo_config import cfg
from oslo_log import log as logging
from sahara import conductor as c
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.service import engine as e
from sahara.service.heat import commons as heat_common
from sahara.service.heat import templates as ht
from sahara.service import volumes
from sahara.utils import cluster as c_u
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils.openstack import heat
conductor = c.API
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
CREATE_STAGES = [c_u.CLUSTER_STATUS_SPAWNING, c_u.CLUSTER_STATUS_WAITING,
c_u.CLUSTER_STATUS_PREPARING]
SCALE_STAGES = [c_u.CLUSTER_STATUS_SCALING_SPAWNING,
c_u.CLUSTER_STATUS_SCALING_WAITING,
c_u.CLUSTER_STATUS_SCALING_PREPARING]
ROLLBACK_STAGES = [c_u.CLUSTER_STATUS_ROLLBACK_SPAWNING,
c_u.CLUSTER_STATUS_ROLLBACK_WAITING,
c_u.CLUSTER_STATUS_ROLLBACK__PREPARING]
heat_engine_opts = [
cfg.ListOpt('heat_stack_tags', default=['data-processing-cluster'],
help="List of tags to be used during operating with stack.")
]
CONF.register_opts(heat_engine_opts)
class HeatEngine(e.Engine):
def get_type_and_version(self):
return heat_common.HEAT_ENGINE_VERSION
def create_cluster(self, cluster):
self._update_rollback_strategy(cluster, shutdown=True)
target_count = self._get_ng_counts(cluster)
self._nullify_ng_counts(cluster)
cluster = self._generate_heat_stack_name(cluster)
self._launch_instances(cluster, target_count, CREATE_STAGES)
self._update_rollback_strategy(cluster)
@staticmethod
def _generate_heat_stack_name(cluster):
cluster = conductor.cluster_get(context.ctx(), cluster)
hsn = cluster.name + cluster.id[:8]
extra = cluster.extra.to_dict() if cluster.extra else {}
extra['heat_stack_name'] = hsn
conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
return conductor.cluster_get(context.ctx(), cluster)
def _get_ng_counts(self, cluster):
count = {}
for node_group in cluster.node_groups:
count[node_group.id] = node_group.count
return count
def _nullify_ng_counts(self, cluster):
ctx = context.ctx()
for node_group in cluster.node_groups:
conductor.node_group_update(ctx, node_group, {"count": 0})
def scale_cluster(self, cluster, target_count):
ctx = context.ctx()
rollback_count = self._get_ng_counts(cluster)
self._update_rollback_strategy(cluster, rollback_count=rollback_count,
target_count=target_count)
inst_ids = self._launch_instances(
cluster, target_count, SCALE_STAGES,
update_stack=True, disable_rollback=False)
cluster = conductor.cluster_get(ctx, cluster)
c_u.clean_cluster_from_empty_ng(cluster)
self._update_rollback_strategy(cluster)
return inst_ids
def rollback_cluster(self, cluster, reason):
rollback_info = cluster.rollback_info or {}
self._update_rollback_strategy(cluster)
if rollback_info.get('shutdown', False):
self._rollback_cluster_creation(cluster, reason)
LOG.warning(_LW("Cluster creation rollback "
"(reason: {reason})").format(reason=reason))
return False
rollback_count = rollback_info.get('rollback_count', {}).copy()
target_count = rollback_info.get('target_count', {}).copy()
if rollback_count or target_count:
self._rollback_cluster_scaling(
cluster, rollback_count, target_count, reason)
LOG.warning(_LW("Cluster scaling rollback "
"(reason: {reason})").format(reason=reason))
return True
return False
def _update_rollback_strategy(self, cluster, shutdown=False,
rollback_count=None, target_count=None):
rollback_info = {}
if shutdown:
rollback_info['shutdown'] = shutdown
if rollback_count:
rollback_info['rollback_count'] = rollback_count
if target_count:
rollback_info['target_count'] = target_count
cluster = conductor.cluster_update(
context.ctx(), cluster, {'rollback_info': rollback_info})
return cluster
def _populate_cluster(self, cluster, stack):
ctx = context.ctx()
old_ids = [i.instance_id for i in c_u.get_instances(cluster)]
new_ids = []
for node_group in cluster.node_groups:
instances = stack.get_node_group_instances(node_group)
for instance in instances:
nova_id = instance['physical_id']
if nova_id not in old_ids:
name = instance['name']
inst = {
"instance_id": nova_id,
"instance_name": name
}
if cluster.use_designate_feature():
inst.update(
{"dns_hostname":
name + '.' + cluster.domain_name[:-1]})
instance_id = conductor.instance_add(ctx, node_group, inst)
new_ids.append(instance_id)
return new_ids
def _rollback_cluster_creation(self, cluster, ex):
"""Shutdown all instances and update cluster status."""
self.shutdown_cluster(cluster)
def _rollback_cluster_scaling(self, cluster, rollback_count,
target_count, ex):
"""Attempt to rollback cluster scaling.
Our rollback policy for scaling is as follows:
We shut down nodes created during scaling, but we don't try to
to get back decommissioned nodes. I.e. during the rollback
we only shut down nodes and not launch them. That approach should
maximize the chance of rollback success.
"""
for ng in rollback_count:
if rollback_count[ng] > target_count[ng]:
rollback_count[ng] = target_count[ng]
self._launch_instances(cluster, rollback_count, ROLLBACK_STAGES,
update_stack=True)
def shutdown_cluster(self, cluster):
"""Shutdown specified cluster and all related resources."""
try:
heat.delete_stack(cluster)
except heat_exc.HTTPNotFound:
LOG.warning(_LW('Did not find stack for cluster. Trying to delete '
'cluster manually.'))
# Stack not found. Trying to delete cluster like direct engine
# do it
self._shutdown_instances(cluster)
self._delete_aa_server_group(cluster)
self._clean_job_executions(cluster)
self._remove_db_objects(cluster)
@cpo.event_wrapper(
True, step=_('Create Heat stack'), param=('cluster', 1))
def _create_instances(self, cluster, target_count, update_stack=False,
disable_rollback=True):
stack = ht.ClusterStack(cluster)
self._update_instance_count(stack, cluster, target_count)
stack.instantiate(update_existing=update_stack,
disable_rollback=disable_rollback)
heat.wait_stack_completion(
cluster, is_update=update_stack,
last_updated_time=stack.last_updated_time)
return self._populate_cluster(cluster, stack)
def _launch_instances(self, cluster, target_count, stages,
update_stack=False, disable_rollback=True):
# create all instances
cluster = c_u.change_cluster_status(cluster, stages[0])
inst_ids = self._create_instances(
cluster, target_count, update_stack, disable_rollback)
# wait for all instances are up and networks ready
cluster = c_u.change_cluster_status(cluster, stages[1])
instances = c_u.get_instances(cluster, inst_ids)
self._await_networks(cluster, instances)
# prepare all instances
cluster = c_u.change_cluster_status(cluster, stages[2])
instances = c_u.get_instances(cluster, inst_ids)
volumes.mount_to_instances(instances)
self._configure_instances(cluster)
return inst_ids
def _update_instance_count(self, stack, cluster, target_count):
ctx = context.ctx()
for node_group in cluster.node_groups:
count = target_count[node_group.id]
stack.add_node_group_extra(node_group.id, count,
self._generate_user_data_script)
# if number of instances decreases, we need to drop
# the excessive ones
for i in range(count, node_group.count):
conductor.instance_remove(ctx, node_group.instances[i]) | unknown | codeparrot/codeparrot-clean | ||
CHANGELOG
=========
7.4
---
* Wire the `$profilerDumper` argument in `DumpListener`
4.1.0
-----
* Added the `server:dump` command to run a server collecting and displaying
dumps on a single place with multiple formats support | unknown | github | https://github.com/symfony/symfony | src/Symfony/Bundle/DebugBundle/CHANGELOG.md |
# -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.urls import generic_urlparse
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
def test_generic_urlparse():
url = 'https://ansible.com/blog'
parts = urlparse(url)
generic_parts = generic_urlparse(parts)
assert generic_parts.as_list() == list(parts)
assert urlunparse(generic_parts.as_list()) == url
def test_generic_urlparse_netloc():
url = 'https://ansible.com:443/blog'
parts = urlparse(url)
generic_parts = generic_urlparse(parts)
assert generic_parts.hostname == parts.hostname
assert generic_parts.hostname == 'ansible.com'
assert generic_parts.port == 443
assert urlunparse(generic_parts.as_list()) == url
def test_generic_urlparse_no_netloc():
url = 'https://user:passwd@ansible.com:443/blog'
parts = list(urlparse(url))
generic_parts = generic_urlparse(parts)
assert generic_parts.hostname == 'ansible.com'
assert generic_parts.port == 443
assert generic_parts.username == 'user'
assert generic_parts.password == 'passwd'
assert urlunparse(generic_parts.as_list()) == url
def test_generic_urlparse_no_netloc_no_auth():
url = 'https://ansible.com:443/blog'
parts = list(urlparse(url))
generic_parts = generic_urlparse(parts)
assert generic_parts.username is None
assert generic_parts.password is None
def test_generic_urlparse_no_netloc_no_host():
url = '/blog'
parts = list(urlparse(url))
generic_parts = generic_urlparse(parts)
assert generic_parts.username is None
assert generic_parts.password is None
assert generic_parts.port is None
assert generic_parts.hostname == '' | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: caronni@google.com (Germano Caronni)
"""Authenticode-specific ASN.1 data structures."""
from pkcs7 import DigestInfo
from pyasn1.type import char
from pyasn1.type import namedtype
from pyasn1.type import tag
from pyasn1.type import univ
import x509
class SpcAttributeTypeAndOptionalValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', x509.AttributeType()),
namedtype.OptionalNamedType('value', x509.AttributeValue()))
class SpcIndirectDataContent(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('data', SpcAttributeTypeAndOptionalValue()),
namedtype.NamedType('messageDigest', DigestInfo()))
class SpcUuid(univ.OctetString):
pass
class SpcSerializedObject(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('classId', SpcUuid()),
namedtype.NamedType('serializedData', univ.OctetString()))
class SpcString(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('unicode', char.BMPString().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 0))),
namedtype.NamedType('ascii', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 1))))
class SpcLink(univ.Choice):
"""According to Authenticode specification."""
componentType = namedtype.NamedTypes(
namedtype.NamedType('url', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 0))),
namedtype.NamedType('moniker', SpcSerializedObject().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 1))),
namedtype.NamedType('file', SpcString().subtype(
explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 2))))
class SpcSpOpusInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('programName', SpcString().subtype(
explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('moreInfo', SpcLink().subtype(
explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 1)))) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
# Dear future self,
#
# You're looking at this file because
# the parse function finally broke.
#
# It's not fixable. You have to rewrite it.
# Sincerely, past self
#
# Also, it's probably at least
# 2013. Did you ever take
# that trip to Iceland?
import re
def get_type_link(typ, file):
from gen_doc import objects
if typ == '':
return "void"
else:
if typ in objects:
return "cinnamon-js-" + objects[typ].prefix
elif file.name + "." + typ in objects:
return "cinnamon-js-" + objects[file.name + "." + typ].prefix
elif typ.endswith("s") and typ[:-1] in objects:
return "cinnamon-js-" + objects[typ[:-1]].prefix
elif typ.endswith("s") and file.name + "." + typ[:-1] in objects:
return "cinnamon-js-" + objects[file.name + "." + typ[:-1]].prefix
elif typ.startswith("Gio"):
return typ.replace("Gio.", "G")
elif typ.startswith("GLib"):
return typ.replace("GLib.", "G")
else:
return typ.replace('.', '')
def markup(line, obj):
line = re.sub('@(\w*)', '<code>\g<1></code>', line)
line = re.sub('`([^`]*)`', '<code>\g<1></code>', line)
line = re.sub('\*\*([^*]*)\*\*', '<emphasis role="strong">\g<1></emphasis>', line)
line = re.sub('\*([^*]*)\*', '<emphasis>\g<1></emphasis>', line)
def format_type_link(match):
res = match.group(1)
return '<link linkend="{link}"><code>{name}</code></link>'.format(
link = get_type_link(res, obj.file),
name = res)
line = re.sub('#(([\w]*\.)?[\w]+)', format_type_link, line)
def format_ext_link(match):
if match.group(1):
full = match.group(1) + match.group(3)
else:
full = match.group(3)
if match.group(4):
full += match.group(4)
owner = match.group(1)
if owner:
owner = owner[:-1] # remove trailing .
else:
owner = "this"
thing = match.group(3)
from gen_doc import objects
object = None
if owner == "this":
object = obj.object
if owner in objects:
object = objects[owner]
elif obj.file.name + "." + owner in objects:
object = objects[obj.file.name + "." + owner]
if object is None:
return '<code>{name}</code>'.format(name = full)
func_names = [x.name for x in object.functions]
enum_names = [x.name for x in object.enums]
prop_names = [x.name for x in object.properties]
if thing in prop_names and not full.endswith("()"):
return '<link linkend="cinnamon-js-{prefix}--{thing}"><code>{full}</code></link>'.format(
prefix = object.prefix,
thing = thing,
full = full)
elif thing in func_names or (thing in enum_names and not full.endswith("()")):
return '<link linkend="cinnamon-js-{prefix}-{thing}"><code>{full}</code></link>'.format(
prefix = object.prefix,
thing = thing,
full = full)
else:
return '<code>{name}</code>'.format(name = full)
line = re.sub('%(([\w]+\.)?[\w]+\.)?([\w]+)(\(\))?', format_ext_link, line)
return line
class JSThing():
def append_description(self, desc):
self.description += desc.replace('<', '<').replace('>', '>')
def get_xml_description(self, description = None):
if description is None:
description = self.description
stuff = description.split('\n')
joined = ['']
in_code = False
in_list = False
for line in stuff:
if line.strip() == '```':
if in_code:
joined[-1] += '```'
joined.append('')
else:
if in_list:
joined[-1] += '\n```'
else:
joined.append('```\n')
in_code = not in_code
continue
if in_code:
joined[-1] += '\n' + line
continue
line = line.strip()
if line == '\\' and in_list:
joined[-1] += '\n\n'
elif len(line) == 0 or line == '\\':
# New line if empty
joined.append('')
in_list = False
else:
if joined[-1] == '' and line.startswith('- '):
in_list = True
if line.startswith('- '):
joined.append('')
joined[-1] += ' ' + line
description = ''
in_list = False
list_buffer = []
for line in joined:
if line.split('\n')[0].strip() == '```':
description += '<informalexample><programlisting>{0}</programlisting></informalexample>'\
.format(line.replace('```', ''))
continue
if line == '':
continue
line = line.strip()
if line.startswith('-'):
in_list = True
list_buffer.append(self.get_xml_description(line[1:]))
continue
if in_list:
description += '<itemizedlist>' + \
'\n'.join('<listitem>{0}</listitem>'.format(item) for item in list_buffer) + \
'</itemizedlist>'
list_buffer = []
in_list = False
line = markup(line, self)
description += '<para>{0}</para>'.format(line)
if in_list:
description += '<itemizedlist>' + \
'\n'.join('<listitem>{0}</listitem>'.format(item) for item in list_buffer) + \
'</itemizedlist>'
list_buffer = []
return description
def add_property(self, prop):
if prop.name == "short_description":
self.short_description = prop
else:
self.properties.append(prop)
prop.file = self.file
prop.object = self.object
class JSSignal(JSThing):
def __init__ (self, name):
self.name = name
self.description = ''
self.short_description = JSProperty(None, '', '')
self.properties = []
class JSFunction(JSThing):
def __init__ (self, name):
self.name = name
self.description = ''
self.short_description = JSProperty(None, '', '')
self.properties = []
self.return_value = JSProperty(None, '', '')
def set_return(self, retval):
self.return_value = retval
retval.file = self.file
retval.obj = self.object
class JSProperty(JSThing):
def __init__ (self, name, arg_type, desc):
self.name = name
self.arg_type = arg_type if arg_type else ''
self.description = ''
self.append_description(desc + "\n")
class JSFile(JSThing):
def __init__ (self, directory, name):
self.directory = directory
self.name = name[0].capitalize() + name[1:]
self.orig_name = self.name
self.imports = "imports.{0}.{1}".format(directory, name)
self.prefix = directory + "-" + name
self.description = ''
self.short_description = JSProperty(None, '', '')
self.properties = []
self.objects = []
self.signals = []
self.enums = []
self.functions = []
self.file = self
self.object = self
def is_interesting(self):
return len(self.functions) + len(self.properties) + len(self.description) > 0
def add_function(self, func):
self.functions.append(func)
func.file = self
func.object = self
def add_object(self, obj):
self.objects.append(obj)
obj.parent = self
obj.directory = self.directory
obj.prefix = self.prefix + "-" + obj.name
obj.name = self.name + "-" + obj.name
obj.file = self
def add_enum(self, obj):
self.enums.append(obj)
obj.parent = self
obj.directory = self.directory
obj.prefix = self.prefix + "-" + obj.name
obj.file = self
class JSObject(JSThing):
def __init__ (self, name):
self.name = name
self.orig_name = name
self.inherit = ''
self.description = ''
self.short_description = JSProperty(None, '', '')
self.parent = None
self.directory = None
self.prefix = None
self.functions = []
self.properties = []
self.signals = []
self.enums = []
self.object = self
def add_function(self, func):
self.functions.append(func)
func.file = self.file
func.object = self
def add_signal(self, signal):
self.signals.append(signal)
signal.file = self
signal.object = self
def set_inherit(self, inherit):
self.inherit = inherit
class JSEnum(JSThing):
def __init__ (self, name):
self.name = name
self.description = ''
self.short_description = JSProperty(None, '', '')
self.properties = []
self.object = self
SGML_FORMAT = '''\
<?xml version='1.0'?>
<!DOCTYPE book PUBLIC '-//OASIS//DTD DocBook XML V4.3//EN'
'http://www.oasis-open.org/docbook/xml/4.3/docbookx.dtd'
[
<!ENTITY % local.common.attrib "xmlns:xi CDATA #FIXED 'http://www.w3.org/2003/XInclude'">
]>
<book id='index'>
<bookinfo>
<title>Cinnamon Javascript Reference Manual</title>
<releaseinfo>
This document is for Cinnamon {version}.
The latest version of this documentation can be found online at <ulink role="online-location" url="http://developer.linuxmint.com/reference/git/cinnamon-js/">http://developer.linuxmint.com/reference/git/cinnamon-js/</ulink>.
</releaseinfo>
</bookinfo>
{chapters}
</book>'''
SGML_CHAPTER_FORMAT = '''
<chapter id="cinnamon-js-{prefix}-section">
<title>{title}</title>
{entries}
</chapter>'''
SGML_ENTRY_FORMAT = '<xi:include href="{directory}/{name}.xml"/>'
FILE_FORMAT = '''\
<?xml version='1.0'?>
<!DOCTYPE refentry PUBLIC '-//OASIS//DTD DocBook XML V4.3//EN'
'http://www.oasis-open.org/docbook/xml/4.3/docbookx.dtd'
[
<!ENTITY % local.common.attrib "xmlns:xi CDATA #FIXED 'http://www.w3.org/2003/XInclude'">
]>
<refentry id="cinnamon-js-{prefix}">
<refmeta>
<refentrytitle role="top_of_page" id="cinnamon-js-{prefix}.top_of_page">{name}</refentrytitle>
<manvolnum>3</manvolnum>
<refmiscinfo>
{name}
</refmiscinfo>
</refmeta>
<refnamediv>
<refname>{name}</refname>
<refpurpose>{short_description}</refpurpose>
</refnamediv>
{func_header}
{prop_header}
{signal_header}
{enum_header}
{hierarchy}
{description}
{functions}
{properties}
{signals}
{enums}
</refentry>
'''
FUNCTION_HEADER_FORMAT = '''
<refsect1 id="cinnamon-js-{prefix}.functions" role="functions_proto">
<title role="functions_proto.title">Functions</title>
<informaltable pgwide="1" frame="none">
<tgroup cols="2">
<colspec colname="functions_return" colwidth="150px"/>
<colspec colname="functions_name"/>
<tbody>
{function_headers}
</tbody>
</tgroup>
</informaltable>
</refsect1>
'''
FUNCTION_HEADER_ITEM_FORMAT = '''
<row>
<entry role="function_type">
<link linkend="{return_link}">
<returnvalue>{return_name}</returnvalue>
</link>
</entry>
<entry role="function_name">
<link linkend="cinnamon-js-{prefix}-{name}">{name}</link> <phrase role="c_punctuation">()</phrase>
</entry>
</row>
'''
PROPERTY_HEADER_FORMAT = '''
<refsect1 id="cinnamon-js-{prefix}.properties" role="properties">
<title role="properties.title">Properties</title>
<informaltable frame="none">
<tgroup cols="3">
<colspec colname="properties_type" colwidth="150px"/>
<colspec colname="properties_name" colwidth="300px"/>
<tbody>
{property_headers}
</tbody>
</tgroup>
</informaltable>
</refsect1>
'''
SIGNAL_HEADER_FORMAT = '''
<refsect1 id="cinnamon-js-{prefix}.signals" role="signal_proto">
<title role="signal_proto.title">Signals</title>
<informaltable frame="none">
<tgroup cols="3">
<colspec colname="signals_return" colwidth="150px" />
<colspec colname="signals_name" colwidth="300px" />
<tbody>
{signal_headers}
</tbody>
</tgroup>
</informaltable>
</refsect1>
'''
SIGNAL_HEADER_ITEM_FORMAT = '''
<row>
<entry role="signal_type">
</entry>
<entry role="signal_name">
<link linkend="cinnamon-js-{prefix}-{name}-signal">{name}</link>
</entry>
</row>
'''
ENUM_HEADER_FORMAT = '''
<refsect1 id="cinnamon-js-{prefix}.other" role="other_proto">
<title role="other_proto.title">Types and Values</title>
<informaltable role="enum_members_table" pgwide="1" frame="none">
<tgroup cols="2">
<colspec colname="name" colwidth="150px"/>
<colspec colname="description"/>
<tbody>
{enum_headers}
</tbody>
</tgroup>
</informaltable>
</refsect1>
'''
ENUM_HEADER_ITEM_FORMAT = '''
<row>
<entry role="datatype_keyword">enum</entry>
<entry role="function_name">
<link linkend="cinnamon-js-{prefix}-{name}">{name}</link>
</entry>
</row>
'''
PROPERTY_HEADER_ITEM_FORMAT = '''
<row>
<entry role="property_type">
<link linkend="{type_link}"><type>{type_name}</type></link>
</entry>
<entry role="property_name">
<link linkend="cinnamon-js-{prefix}--{name}">{name}</link>
</entry>
</row>
'''
HIERARCHY_FORMAT = '''
<refsect1 id="cinnamon-js-{prefix}.object-hierarchy" role="object_hierarchy">
<title role="object_hierarchy.title">Object Hierarchy</title>
<screen>
<link linkend="Object">Object</link>
{hierarchy}
</screen>
</refsect1>
'''
HIERARCHY_ITEM_FORMAT = '{spacing}<phrase role="lineart">╰──</phrase> <link linkend="cinnamon-js-{prefix}">{name}</link>'
DESCRIPTION_FORMAT = '''
<refsect1 id="cinnamon-js-{prefix}.description" role="desc">
<title role="desc.title">Description</title>
{description}
</refsect1>
'''
FUNCTIONS_FORMAT = '''
<refsect1 id="cinnamon-js-{prefix}.functions_details" role="details">
<title role="details.title">Functions</title>
{functions}
</refsect1>
'''
FUNCTION_ITEM_FORMAT = '''
<refsect2 id="cinnamon-js-{prefix}-{name}" role="function">
<title>{name} ()</title>
<indexterm zone="cinnamon-js-{prefix}-{name}"><primary>{name}</primary></indexterm>
<programlisting language="javascript">
<link linkend="{return_link}"><returnvalue>{return_type}</returnvalue></link>
{name} ({inline_params});</programlisting>
{description}
{params}
{return_desc}
</refsect2>
'''
SIGNALS_FORMAT = '''
<refsect1 id="cinnamon-js-{prefix}.signal-details" role="details">
<title role="details.title">Signal details</title>
{signals}
</refsect1>
'''
SIGNAL_ITEM_FORMAT = '''
<refsect2 id="cinnamon-js-{prefix}-{name}-signal" role="signal">
<title>The <literal>“{name}”</literal> signal</title>
<indexterm zone="cinnamon-js-{prefix}-{name}-signal"><primary>{prefix}::{name}</primary></indexterm>
<programlisting language="javascript">
user_function ({inline_params});</programlisting>
{description}
{params}
</refsect2>
'''
FUNC_PARAMETERS_FORMAT = '''
<refsect3 role="parameters">
<title>Parameters</title>
<informaltable role="parameters_table" pgwide="1" frame="none">
<tgroup cols="3">
<colspec colname="parameters_name" colwidth="150px"/>
<colspec colname="parameters_description"/>
<colspec colname="parameters_annotations" colwidth="200px"/>
<tbody>
{param_items}
</tbody>
</tgroup>
</informaltable>
</refsect3>
'''
INLINE_PARAMETER_FORMAT = '<parameter><link linkend="{type_link}"><type>{type_name}</type></link>{name}</parameter>'
FUNC_PARAMETERS_ITEM_FORMAT = '''
<row>
<entry role="parameter_name"><para>{name}</para></entry>
<entry role="parameter_description">{description}</entry>
<entry role="parameter_annotations"></entry>
</row>
'''
FUNC_RETURN_FORMAT = '''
<refsect3 role="returns">
<title>Returns</title>
{desc}
</refsect3>
'''
PROPERTIES_FORMAT = '''
<refsect1 id="cinnamon-js-{prefix}.property-details" role="property_details">
<title role="property_details.title">Property Details</title>
{properties}
</refsect1>
'''
PROPERTIES_ITEM_FORMAT = '''
<refsect2 id="cinnamon-js-{prefix}--{name}" role="property">
<title>The <literal>“{name}”</literal> property</title>
<indexterm zone="cinnamon-js-{prefix}--{name}">
<primary>cinnamon-js-{prefix}:{name}</primary>
</indexterm>
<programlisting> {disp_name} <link linkend="{type_link}"><type>{type_name}</type></link></programlisting>
{description}
</refsect2>
'''
ENUMS_FORMAT = '''
<refsect1 id="CinnamonGlobal.other_details" role="details">
<title role="details.title">Types and Values</title>
{enums}
</refsect1>
'''
ENUMS_ITEM_FORMAT = '''
<refsect2 id="cinnamon-js-{prefix}" role="enum">
<title>enum {name}</title>
<indexterm zone="{name}"><primary>{name}</primary></indexterm>
{description}
<refsect3 role="enum_members">
<title>Members</title>
<informaltable role="enum_members_table" pgwide="1" frame="none">
<tgroup cols="2">
<colspec colname="enum_members_name" colwidth="300px"/>
<colspec colname="enum_members_description"/>
<tbody>
{enum_items}
</tbody>
</tgroup>
</informaltable>
</refsect3>
</refsect2>
'''
ENUMS_ITEM_ROW_FORMAT = '''
<row role="constant">
<entry role="enum_member_name"><para id="{name}:CAPS">{name}</para></entry>
<entry role="enum_member_description">{description}</entry>
</row>
'''
def write_sgml(files, version):
sgml = open('cinnamon-js-docs.sgml', 'w')
chapters = []
for _file in files:
if not _file.is_interesting() and len(_file.objects) == 0:
continue
entries = []
if _file.is_interesting():
_file.objects.insert(0, _file)
entries = [SGML_ENTRY_FORMAT.format(
directory = _file.directory,
name = obj.name) for obj in _file.objects]
chapters.append(SGML_CHAPTER_FORMAT.format(
prefix = _file.prefix,
title = _file.imports,
entries = "\n".join(entries)))
sgml.write(SGML_FORMAT.format(
version = version,
chapters = "\n".join(chapters)))
def create_file(obj):
file_obj = open('{0}/{1}.xml'.format(obj.directory, obj.name), 'w')
short_description = obj.short_description.description.replace("\n", " ").strip()
file_obj.write(FILE_FORMAT.format(
prefix = obj.prefix,
name = obj.name.replace("-", "."),
short_description = markup(short_description, obj),
func_header = get_function_header(obj),
signal_header = get_signal_header(obj),
prop_header = get_properties_header(obj),
enum_header = get_enum_header(obj),
hierarchy = get_hierarchy(obj),
description = get_description(obj),
functions = get_functions(obj),
signals = get_signals(obj),
properties = get_properties(obj),
enums = get_enums(obj)))
file_obj.close()
def get_function_header(obj):
if len(obj.functions) == 0:
return ""
functions = [FUNCTION_HEADER_ITEM_FORMAT.format(
return_link = get_type_link(func.return_value.arg_type, obj.file),
return_name = func.return_value.arg_type,
prefix = obj.prefix,
name = func.name) for func in obj.functions]
return FUNCTION_HEADER_FORMAT.format(
prefix = obj.prefix,
function_headers = "\n".join(functions))
def get_signal_header(obj):
if len(obj.signals) == 0:
return ""
signals = [SIGNAL_HEADER_ITEM_FORMAT.format(
prefix = obj.prefix,
name = sig.name) for sig in obj.signals]
return SIGNAL_HEADER_FORMAT.format(
prefix = obj.prefix,
signal_headers = "\n".join(signals))
def get_properties_header(obj):
if len(obj.properties) == 0:
return ""
properties = [PROPERTY_HEADER_ITEM_FORMAT.format(
type_link = get_type_link(prop.arg_type, obj.file),
type_name = prop.arg_type,
prefix = obj.prefix,
name = prop.name) for prop in obj.properties]
return PROPERTY_HEADER_FORMAT.format(
prefix = obj.prefix,
property_headers = "\n".join(properties))
def get_enum_header(obj):
if len(obj.enums) == 0:
return ""
enums = [ENUM_HEADER_ITEM_FORMAT.format(
prefix = obj.prefix,
name = enum.name) for enum in obj.enums]
return ENUM_HEADER_FORMAT.format(
prefix = obj.prefix,
enum_headers = "\n".join(enums))
def get_hierarchy(obj):
from gen_doc import objects
if isinstance(obj, JSFile):
return ""
name = obj.name.replace('-', '.')
hierarchy = []
try:
while True:
name = objects[name].inherit
if name in hierarchy:
break
if name:
hierarchy.insert(0, name)
except KeyError:
pass
count = 1
hierarchy_strs = []
for item in hierarchy:
try:
hierarchy_strs.append(HIERARCHY_ITEM_FORMAT.format(
spacing = ' ' * count * 4,
prefix = objects[item].prefix,
name = item))
except KeyError:
hierarchy_strs.append(HIERARCHY_ITEM_FORMAT.format(
spacing = ' ' * count * 4,
prefix = "void",
name = item))
count += 1
hierarchy_strs.append(HIERARCHY_ITEM_FORMAT.format(
spacing = ' ' * count * 4,
prefix = "void",
name = obj.name.replace('-', '.')))
return HIERARCHY_FORMAT.format(
prefix = obj.prefix,
hierarchy = "\n".join(hierarchy_strs))
def get_description(obj):
if len(obj.description) == 0:
return ""
return DESCRIPTION_FORMAT.format(
prefix=obj.prefix,
description = obj.get_xml_description())
def get_functions(obj):
if len(obj.functions) == 0:
return ""
functions = []
for func in obj.functions:
inline_params = ""
params = ""
if len(func.properties) > 0:
# Calculate how long the argument types are and make the arguments
# align
max_length = max(len(x.arg_type) for x in func.properties) + 3
# If no parameter has argument types, don't show that silly
# whitespace
if max_length == 3:
max_length = 0
inline_params = [INLINE_PARAMETER_FORMAT.format(
type_link = get_type_link(param.arg_type, obj.file),
type_name = param.arg_type,
name = " " * (max_length - len(param.arg_type)) + param.name) for param in func.properties]
inline_params = (',\n' + ' ' * (len(func.name) + 2)).join(inline_params)
params = [FUNC_PARAMETERS_ITEM_FORMAT.format(
name = param.name,
description = param.get_xml_description()) for param in func.properties]
params = FUNC_PARAMETERS_FORMAT.format(param_items = '\n'.join(params))
return_desc = ""
if func.return_value.name is not None:
return_desc = FUNC_RETURN_FORMAT.format(desc=func.return_value.get_xml_description())
functions.append(FUNCTION_ITEM_FORMAT.format(
prefix = obj.prefix,
name = func.name,
return_link = get_type_link(func.return_value.arg_type, obj.file),
return_type = func.return_value.arg_type,
description = func.get_xml_description(),
inline_params = inline_params,
params = params,
return_desc = return_desc))
return FUNCTIONS_FORMAT.format(
prefix = obj.prefix,
functions = "\n".join(functions))
def get_signals(obj):
if len(obj.signals) == 0:
return ""
signals = []
for sig in obj.signals:
inline_params = ""
params = ""
if len(sig.properties) > 0:
# Calculate how long the argument types are and make the arguments
# align
max_length = max(len(x.arg_type) for x in sig.properties) + 3
# If no parameter has argument types, don't show that silly
# whitespace
if max_length == 3:
max_length = 0
inline_params = [INLINE_PARAMETER_FORMAT.format(
type_link = get_type_link(param.arg_type, obj.file),
type_name = param.arg_type,
name = " " * (max_length - len(param.arg_type)) + param.name) for param in sig.properties]
inline_params = (',\n' + ' ' * (len(sig.name) + 2)).join(inline_params)
params = [FUNC_PARAMETERS_ITEM_FORMAT.format(
name = param.name,
description = param.get_xml_description()) for param in sig.properties]
params = FUNC_PARAMETERS_FORMAT.format(param_items = '\n'.join(params))
signals.append(SIGNAL_ITEM_FORMAT.format(
prefix = obj.prefix,
name = sig.name,
description = sig.get_xml_description(),
inline_params = inline_params,
params = params))
return SIGNALS_FORMAT.format(
prefix = obj.prefix,
signals = "\n".join(signals))
def get_properties(obj):
if len(obj.properties) == 0:
return ""
properties = [PROPERTIES_ITEM_FORMAT.format(
prefix = obj.prefix,
name = prop.name,
disp_name = ('“' + prop.name + '”').ljust(25),
type_link = get_type_link(prop.arg_type, obj.file),
type_name = prop.arg_type,
description = prop.get_xml_description()) for prop in obj.properties]
return PROPERTIES_FORMAT.format(
prefix = obj.prefix,
properties = "\n".join(properties))
def get_enums(obj):
if len(obj.enums) == 0:
return ""
enums = []
for enum in obj.enums:
items = [ENUMS_ITEM_ROW_FORMAT.format(
name = item.name,
description = item.get_xml_description()) for item in enum.properties]
enums.append(ENUMS_ITEM_FORMAT.format(
prefix = enum.prefix,
name = enum.name,
description = enum.get_xml_description(),
enum_items = "\n".join(items)))
return ENUMS_FORMAT.format(
prefix = obj.prefix,
enums = "\n".join(enums)) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2021 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hamcrest.CoreMatchers.endsWith;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.nullValue;
import org.junit.Assume;
import org.junit.Test;
import org.mockito.internal.configuration.plugins.DefaultMockitoPlugins;
import org.mockito.internal.configuration.plugins.Plugins;
import org.mockito.plugins.MemberAccessor;
import org.mockito.plugins.MockMaker;
public class MockitoEnvTest {
@Test
public void uses_default_mock_maker_from_env() {
final String mockMaker = System.getenv("MOCK_MAKER");
Assume.assumeThat(mockMaker, not(nullValue()));
Assume.assumeThat(mockMaker, endsWith("default"));
assertThat(DefaultMockitoPlugins.getDefaultPluginClass(MockMaker.class.getName()))
.isEqualTo(Plugins.getMockMaker().getClass().getName());
}
@Test
public void uses_mock_maker_from_env() {
final String mockMaker = System.getenv("MOCK_MAKER");
Assume.assumeThat(mockMaker, not(nullValue()));
Assume.assumeThat(mockMaker, not(endsWith("default")));
assertThat(DefaultMockitoPlugins.getDefaultPluginClass(mockMaker))
.isEqualTo(Plugins.getMockMaker().getClass().getName());
}
@Test
public void uses_default_member_accessor_from_env() {
final String memberAccessor = System.getenv("MEMBER_ACCESSOR");
Assume.assumeThat(memberAccessor, not(nullValue()));
Assume.assumeThat(memberAccessor, endsWith("default"));
assertThat(DefaultMockitoPlugins.getDefaultPluginClass(MemberAccessor.class.getName()))
.isEqualTo(Plugins.getMemberAccessor().getClass().getName());
}
@Test
public void uses_member_accessor_from_env() {
final String memberAccessor = System.getenv("MEMBER_ACCESSOR");
Assume.assumeThat(memberAccessor, not(nullValue()));
Assume.assumeThat(memberAccessor, not(endsWith("default")));
assertThat(DefaultMockitoPlugins.getDefaultPluginClass(memberAccessor))
.isEqualTo(Plugins.getMemberAccessor().getClass().getName());
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/test/java/org/mockito/MockitoEnvTest.java |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.layers.python.layers.encoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import encoders
from tensorflow.contrib.layers.python.ops import sparse_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _get_const_var(name, shape, value):
return variable_scope.get_variable(
name, shape, initializer=init_ops.constant_initializer(value))
class EncodersTest(test.TestCase):
def testBowEncoderSparse(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3]]
enc = encoders.bow_encoder(docs, 4, 3)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([2, 3], enc.eval().shape)
def testBowEncoderSparseTensor(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3]]
sparse_docs = sparse_ops.dense_to_sparse_tensor(docs)
enc = encoders.bow_encoder(sparse_docs, 4, 3)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([2, 3], enc.eval().shape)
def testBowEncoderSparseEmptyRow(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3], [0, 0]]
enc = encoders.bow_encoder(docs, 4, 5)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([3, 5], enc.eval().shape)
def testBowEncoderDense(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3], [0, 0], [0, 0]]
enc = encoders.bow_encoder(docs, 4, 3, sparse_lookup=False)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([4, 3], enc.eval().shape)
def testBowEncoderSparseTensorDenseLookup(self):
with self.test_session():
docs = [[0, 1]]
sparse_docs = sparse_ops.dense_to_sparse_tensor(docs)
with self.assertRaises(TypeError):
encoders.bow_encoder(sparse_docs, 4, 3, sparse_lookup=False)
def testBowEncodersSharingEmbeddings(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3]]
enc_1 = encoders.bow_encoder(docs, 4, 3, scope='test')
enc_2 = encoders.bow_encoder(docs, 4, 3, scope='test', reuse=True)
sess.run(variables.global_variables_initializer())
avg_1, avg_2 = sess.run([enc_1, enc_2])
self.assertAllEqual(avg_1, avg_2)
def testBowEncodersSharingEmbeddingsInheritedScopes(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3]]
with variable_scope.variable_scope('test'):
enc_1 = encoders.bow_encoder(docs, 4, 3)
with variable_scope.variable_scope('test', reuse=True):
enc_2 = encoders.bow_encoder(docs, 4, 3)
sess.run(variables.global_variables_initializer())
avg_1, avg_2 = sess.run([enc_1, enc_2])
self.assertAllEqual(avg_1, avg_2)
def testBowEncodersSharingEmbeddingsSharedScope(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3]]
enc_1 = encoders.bow_encoder(docs, 4, 3, scope='bow')
variable_scope.get_variable_scope().reuse_variables()
enc_2 = encoders.bow_encoder(docs, 4, 3, scope='bow')
sess.run(variables.global_variables_initializer())
avg_1, avg_2 = sess.run([enc_1, enc_2])
self.assertAllEqual(avg_1, avg_2)
def testBowEncoderReuseEmbeddingsVariable(self):
with self.test_session() as sess:
docs = [[1, 1], [2, 3]]
with variable_scope.variable_scope('test'):
v = _get_const_var('embeddings', (4, 3),
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
self.assertEqual(v.name, 'test/embeddings:0')
enc = encoders.bow_encoder(docs, 4, 3, scope='test', reuse=True)
sess.run(variables.global_variables_initializer())
self.assertAllClose([[3., 4., 5.], [7.5, 8.5, 9.5]], enc.eval())
def testEmbedSequence(self):
with self.test_session() as sess:
docs = [[1, 1], [2, 3]]
with variable_scope.variable_scope('test'):
v = _get_const_var('embeddings', (4, 3),
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
self.assertEqual(v.name, 'test/embeddings:0')
emb = encoders.embed_sequence(docs, 4, 3, scope='test', reuse=True)
sess.run(variables.global_variables_initializer())
self.assertAllClose(
[[[3., 4., 5.], [3., 4., 5.]], [[6., 7., 8.], [9., 10., 11.]]],
emb.eval())
if __name__ == '__main__':
test.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env bash
# Copyright 2021 The Cockroach Authors.
#
# Use of this software is governed by the CockroachDB Software License
# included in the /LICENSE file.
set -xeuo pipefail
if [ -z "$1" ]
then
echo 'Usage: build_impl.sh CONFIG'
exit 1
fi
CONFIG="$1"
EXTRA_TARGETS=
# Extra targets to build on Linux x86_64 only.
if [ "$CONFIG" == "crosslinux" ]
then
DOC_TARGETS=$(grep '^//' docs/generated/bazel_targets.txt)
BINARY_TARGETS="@com_github_cockroachdb_go_test_teamcity//:go-test-teamcity"
EXTRA_TARGETS="$DOC_TARGETS $BINARY_TARGETS"
fi
# Extra targets to build on Unix only.
if [ "$CONFIG" != "crosswindows" ]
then
EXTRA_TARGETS="$EXTRA_TARGETS //pkg/cmd/roachprod //pkg/cmd/workload //pkg/cmd/dev //pkg/cmd/bazci //pkg/cmd/bazci/process-bep-file //pkg/cmd/bazci/bazel-github-helper"
fi
EXTRA_ARGS=
# GEOS does not compile on windows.
GEOS_TARGET=//c-deps:libgeos
if [ "$CONFIG" == "crosswindows" ]
then
EXTRA_ARGS=--enable_runfiles
GEOS_TARGET=
fi
bazel build //pkg/cmd/bazci
BAZEL_BIN=$(bazel info bazel-bin)
"$BAZEL_BIN/pkg/cmd/bazci/bazci_/bazci" -- build -c opt \
--config "$CONFIG" $EXTRA_ARGS \
//pkg/cmd/cockroach-short //pkg/cmd/cockroach \
//pkg/cmd/cockroach-sql $GEOS_TARGET $EXTRA_TARGETS | unknown | github | https://github.com/cockroachdb/cockroach | build/teamcity/cockroach/ci/builds/build_impl.sh |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from shinken.log import logger
def episode_iv():
hst = 'towel.blinkenlights.nl'
from telnetlib import Telnet
t = Telnet(hst)
while True:
buf = t.read_until('mesfesses', 0.1)
logger.info(buf)
def perdu():
import urllib
f = urllib.urlopen("http://www.perdu.com")
logger.info(f.read())
def myip():
import urllib
f = urllib.urlopen("http://whatismyip.org/")
logger.info(f.read())
def naheulbeuk():
import os
import urllib2
from cStringIO import StringIO
from PIL import Image
import aalib
if os.getenv('TERM') == 'linux':
screen = aalib.LinuxScreen
else:
screen = aalib.AnsiScreen
screen = screen(width=128, height=128)
fp = StringIO(urllib2.urlopen(
'http://www.penofchaos.com/warham/bd/images/NBK-win7portrait-Nain02.JPG').read())
image = Image.open(fp).convert('L').resize(screen.virtual_size)
screen.put_image((0, 0), image)
logger.info(screen.render())
def what_it_make_me_think(subject):
import hashlib
if hashlib.md5(subject.lower()).hexdigest() == '6376e9755f8047391621b577ae03966a':
print "Thanks to %s now I feel like this: https://youtu.be/efTZslkr5Fs?t=60" % subject
def dark():
r"""
.-.
|_:_|
/(_Y_)\
( \/M\/ )
'. _.'-/'-'\-'._
': _/.--'[[[[]'--.\_
': /_' : |::"| : '.\
': // ./ |oUU| \.' :\
': _:'..' \_|___|_/ : :|
':. .' |_[___]_| :.':\
[::\ | : | | : ; : \
'-' \/'.| |.' \ .;.' |
|\_ \ '-' : |
| \ \ .: : | |
| \ | '. : \ |
/ \ :. .; |
/ | | :__/ : \\
| | | \: | \ | ||
/ \ : : |: / |__| /|
snd | : : :_/_| /'._\ '--|_\
/___.-/_|-' \ \
'-'
"""
logger.info(dark.__doc__)
def get_coffee():
r"""
(
) (
___...(-------)-....___
.-"" ) ( ""-.
.-'``'|-._ ) _.-|
/ .--.| `""---...........---""` |
/ / | |
| | | |
\ \ | |
`\ `\ | |
`\ `| |
_/ /\ /
(__/ \ /
_..---""` \ /`""---.._
.-' \ / '-.
: `-.__ __.-' :
: ) ""---...---"" ( :
'._ `"--...___...--"` _.'
jgs \""--..__ __..--""/
'._ "'"----.....______.....----"'" _.'
`""--..,,_____ _____,,..--""`
`"'"----"'"`
"""
logger.info(get_coffee.__doc__) | unknown | codeparrot/codeparrot-clean | ||
"use strict";
const { describeCases } = require("./TestCases.template");
describe("TestCases", () => {
describeCases({
name: "devtool-eval-cheap-source-map",
devtool: "eval-cheap-source-map"
});
}); | javascript | github | https://github.com/webpack/webpack | test/TestCasesDevtoolEvalCheapSourceMap.test.js |
from cement.core.foundation import TestApp
from cement.utils.misc import init_defaults
def test_dummy_output():
with TestApp() as app:
app.run()
app.render({'foo': 'bar'})
assert app.last_rendered == ({'foo': 'bar'}, None)
def test_dummy_template(tmp):
with TestApp() as app:
app.run()
res = app.template.render('{{ foo }}', {'foo': 'bar'})
assert res is None
app.template.copy('/path/to/src', '/path/to/dest', {})
def test_dummy_mail():
with TestApp() as app:
app.run()
res = app.mail.send("Test",
to=['me@localhost'],
from_addr='me@localhost')
assert res
def test_dummy_mail_with_subject_prefix():
defaults = init_defaults('mail.dummy')
defaults['mail.dummy']['subject_prefix'] = 'TEST PREFIX'
with TestApp(config_defaults=defaults) as app:
app.run()
res = app.mail.send("Test",
to=['me@localhost'],
from_addr='me@localhost',
)
assert res | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"github.com/spf13/pflag"
deploymentconfig "k8s.io/kubernetes/pkg/controller/deployment/config"
)
// DeploymentControllerOptions holds the DeploymentController options.
type DeploymentControllerOptions struct {
*deploymentconfig.DeploymentControllerConfiguration
}
// AddFlags adds flags related to DeploymentController for controller manager to the specified FlagSet.
func (o *DeploymentControllerOptions) AddFlags(fs *pflag.FlagSet) {
if o == nil {
return
}
fs.Int32Var(&o.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", o.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load")
}
// ApplyTo fills up DeploymentController config with options.
func (o *DeploymentControllerOptions) ApplyTo(cfg *deploymentconfig.DeploymentControllerConfiguration) error {
if o == nil {
return nil
}
cfg.ConcurrentDeploymentSyncs = o.ConcurrentDeploymentSyncs
return nil
}
// Validate checks validation of DeploymentControllerOptions.
func (o *DeploymentControllerOptions) Validate() []error {
if o == nil {
return nil
}
errs := []error{}
return errs
} | go | github | https://github.com/kubernetes/kubernetes | cmd/kube-controller-manager/app/options/deploymentcontroller.go |
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Arno Renevier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class WeatherTest(BackendTest):
BACKEND = 'weather'
def test_cities(self):
paris = self.backend.iter_city_search('crappything¶m=;drop database')
self.assertTrue(len(list(paris)) == 0)
paris = self.backend.iter_city_search('paris')
self.assertTrue(len(list(paris)) >= 1)
paris = self.backend.iter_city_search('paris france')
self.assertTrue(len(list(paris)) == 1)
current = self.backend.get_current(paris[0].id)
self.assertTrue(current.temp.value is float(current.temp.value))
forecasts = list(self.backend.iter_forecast(paris[0].id))
self.assertTrue(len(forecasts) == 10) | unknown | codeparrot/codeparrot-clean | ||
<?php declare(strict_types=1);
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Composer\Util\Http;
use Composer\Downloader\TransportException;
/**
* @internal
* @author John Stevenson <john-stevenson@blueyonder.co.uk>
*
* @phpstan-type contextOptions array{http: array{proxy: string, header?: string, request_fulluri?: bool}}
*/
class RequestProxy
{
/** @var ?contextOptions */
private $contextOptions;
/** @var ?non-empty-string */
private $status;
/** @var ?non-empty-string */
private $url;
/** @var ?non-empty-string */
private $auth;
/**
* @param ?non-empty-string $url The proxy url, without authorization
* @param ?non-empty-string $auth Authorization for curl
* @param ?contextOptions $contextOptions
* @param ?non-empty-string $status
*/
public function __construct(?string $url, ?string $auth, ?array $contextOptions, ?string $status)
{
$this->url = $url;
$this->auth = $auth;
$this->contextOptions = $contextOptions;
$this->status = $status;
}
public static function none(): RequestProxy
{
return new self(null, null, null, null);
}
public static function noProxy(): RequestProxy
{
return new self(null, null, null, 'excluded by no_proxy');
}
/**
* Returns the context options to use for this request, otherwise null
*
* @return ?contextOptions
*/
public function getContextOptions(): ?array
{
return $this->contextOptions;
}
/**
* Returns an array of curl proxy options
*
* @param array<string, string|int> $sslOptions
* @return array<int, string|int>
*/
public function getCurlOptions(array $sslOptions): array
{
if ($this->isSecure() && !$this->supportsSecureProxy()) {
throw new TransportException('Cannot use an HTTPS proxy. PHP >= 7.3 and cUrl >= 7.52.0 are required.');
}
// Always set a proxy url, even an empty value, because it tells curl
// to ignore proxy environment variables
$options = [CURLOPT_PROXY => (string) $this->url];
// If using a proxy, tell curl to ignore no_proxy environment variables
if ($this->url !== null) {
$options[CURLOPT_NOPROXY] = '';
}
// Set any authorization
if ($this->auth !== null) {
$options[CURLOPT_PROXYAUTH] = CURLAUTH_BASIC;
$options[CURLOPT_PROXYUSERPWD] = $this->auth;
}
if ($this->isSecure()) {
if (isset($sslOptions['cafile'])) {
$options[CURLOPT_PROXY_CAINFO] = $sslOptions['cafile'];
}
if (isset($sslOptions['capath'])) {
$options[CURLOPT_PROXY_CAPATH] = $sslOptions['capath'];
}
}
return $options;
}
/**
* Returns proxy info associated with this request
*
* An empty return value means that the user has not set a proxy.
* A non-empty value will either be the sanitized proxy url if a proxy is
* required, or a message indicating that a no_proxy value has disabled the
* proxy.
*
* @param ?string $format Output format specifier
*/
public function getStatus(?string $format = null): string
{
if ($this->status === null) {
return '';
}
$format = $format ?? '%s';
if (strpos($format, '%s') !== false) {
return sprintf($format, $this->status);
}
throw new \InvalidArgumentException('String format specifier is missing');
}
/**
* Returns true if the request url has been excluded by a no_proxy value
*
* A false value can also mean that the user has not set a proxy.
*/
public function isExcludedByNoProxy(): bool
{
return $this->status !== null && $this->url === null;
}
/**
* Returns true if this is a secure (HTTPS) proxy
*
* A false value means that this is either an HTTP proxy, or that a proxy
* is not required for this request, or that the user has not set a proxy.
*/
public function isSecure(): bool
{
return 0 === strpos((string) $this->url, 'https://');
}
/**
* Returns true if an HTTPS proxy can be used.
*
* This depends on PHP7.3+ for CURL_VERSION_HTTPS_PROXY
* and curl including the feature (from version 7.52.0)
*/
public function supportsSecureProxy(): bool
{
if (false === ($version = curl_version()) || !defined('CURL_VERSION_HTTPS_PROXY')) {
return false;
}
$features = $version['features'];
return (bool) ($features & CURL_VERSION_HTTPS_PROXY);
}
} | php | github | https://github.com/composer/composer | src/Composer/Util/Http/RequestProxy.php |
/*
* By downloading, copying, installing or using the software you agree to this license.
* If you do not agree to this license, do not download, install,
* copy or use the software.
*
*
* License Agreement
* For Open Source Computer Vision Library
* (3-clause BSD License)
*
* Copyright (C) 2014, NVIDIA Corporation, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the names of the copyright holders nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* This software is provided by the copyright holders and contributors "as is" and
* any express or implied warranties, including, but not limited to, the implied
* warranties of merchantability and fitness for a particular purpose are disclaimed.
* In no event shall copyright holders or contributors be liable for any direct,
* indirect, incidental, special, exemplary, or consequential damages
* (including, but not limited to, procurement of substitute goods or services;
* loss of use, data, or profits; or business interruption) however caused
* and on any theory of liability, whether in contract, strict liability,
* or tort (including negligence or otherwise) arising in any way out of
* the use of this software, even if advised of the possibility of such damage.
*/
#include <cstdlib>
#include <iostream>
#include "common.hpp"
namespace CAROTENE_NS {
bool isSupportedConfiguration()
{
#ifdef CAROTENE_NEON
return true;
#else
return false;
#endif
}
namespace internal {
void assertSupportedConfiguration(bool parametersSupported)
{
if (!isSupportedConfiguration()) {
std::cerr << "internal error: attempted to use an unavailable function" << std::endl;
std::abort();
}
if (!parametersSupported) {
std::cerr << "internal error: attempted to use a function with unsupported parameters" << std::endl;
std::abort();
}
}
ptrdiff_t borderInterpolate(ptrdiff_t _p, size_t _len, BORDER_MODE borderType, size_t startMargin, size_t endMargin)
{
ptrdiff_t p = _p + (ptrdiff_t)startMargin;
size_t len = _len + startMargin + endMargin;
if( (size_t)p < len )
return _p;
else if( borderType == BORDER_MODE_REPLICATE )
p = p < 0 ? 0 : (ptrdiff_t)len - 1;
else if( borderType == BORDER_MODE_REFLECT || borderType == BORDER_MODE_REFLECT101 )
{
s32 delta = borderType == BORDER_MODE_REFLECT101;
if( len == 1 )
return 0;
do
{
if( p < 0 )
p = -p - 1 + delta;
else
p = (ptrdiff_t)len - 1 - (p - (ptrdiff_t)len) - delta;
}
while( (size_t)p >= len );
}
else if( borderType == BORDER_MODE_WRAP )
{
if( p < 0 )
p -= ((p-(ptrdiff_t)len+1)/(ptrdiff_t)len)*(ptrdiff_t)len;
if( p >= (ptrdiff_t)len )
p %= (ptrdiff_t)len;
}
else if( borderType == BORDER_MODE_CONSTANT )
p = -1;
else
internal::assertSupportedConfiguration(false);
return p - (ptrdiff_t)startMargin;
}
} // namespace internal
} // namespace CAROTENE_NS | cpp | github | https://github.com/opencv/opencv | hal/carotene/src/common.cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.