code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
@import "@sass/abstracts/vars";
@import "@sass/abstracts/mixins";
.promo-shadow {
max-width: 960px;
padding: 0;
border-top-width: 3px;
border-top-color: $promo-shadow-border;
border-style: solid;
overflow: visible;
position: relative;
&.promo {
float: left;
}
> .component-content {
@include respond-to(all-mobile) {
margin: 0 10px 30px 10px;
}
padding: 15px;
margin: 0 0 30px 0;
&:before,
&:after {
opacity: 0.7;
box-shadow: 0 17px 10px rgba(0, 0, 0, 0.7);
position: absolute;
z-index: -1;
height: 20%;
max-height: 100px;
max-width: 460px;
width: 47%;
content: "";
bottom: 10px;
}
&:before {
left: 2%;
transform: rotate(-3deg);
}
&:after {
right: 2%;
transform: rotate(3deg);
}
}
} | unknown | github | https://github.com/vercel/next.js | examples/cms-sitecore-xmcloud/src/assets/sass/components/promo/_promo-shadow.scss |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
from sickbeard.common import countryList
from sickbeard.helpers import sanitizeSceneName
from sickbeard.scene_exceptions import get_scene_exceptions
from sickbeard import logger
from sickbeard import db
import re
import datetime
from name_parser.parser import NameParser, InvalidNameException
resultFilters = ["sub(pack|s|bed)", "nlsub(bed|s)?", "swesub(bed)?",
"(dir|sample|sub|nfo)fix", "sample", "(dvd)?extras",
"dub(bed)?"]
def filterBadReleases(name):
"""
Filters out non-english and just all-around stupid releases by comparing them
to the resultFilters contents.
name: the release name to check
Returns: True if the release name is OK, False if it's bad.
"""
try:
fp = NameParser()
parse_result = fp.parse(name)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + name + " into a valid episode", logger.WARNING)
return False
# use the extra info and the scene group to filter against
check_string = ''
if parse_result.extra_info:
check_string = parse_result.extra_info
if parse_result.release_group:
if check_string:
check_string = check_string + '-' + parse_result.release_group
else:
check_string = parse_result.release_group
# if there's no info after the season info then assume it's fine
if not check_string:
return True
# if any of the bad strings are in the name then say no
for x in resultFilters + sickbeard.IGNORE_WORDS.split(','):
if re.search('(^|[\W_])' + x.strip() + '($|[\W_])', check_string, re.I):
logger.log(u"Invalid scene release: " + name + " contains " + x + ", ignoring it", logger.DEBUG)
return False
return True
def sceneToNormalShowNames(name):
"""
Takes a show name from a scene dirname and converts it to a more "human-readable" format.
name: The show name to convert
Returns: a list of all the possible "normal" names
"""
if not name:
return []
name_list = [name]
# use both and and &
new_name = re.sub('(?i)([\. ])and([\. ])', '\\1&\\2', name, re.I)
if new_name not in name_list:
name_list.append(new_name)
results = []
for cur_name in name_list:
# add brackets around the year
results.append(re.sub('(\D)(\d{4})$', '\\1(\\2)', cur_name))
# add brackets around the country
country_match_str = '|'.join(countryList.values())
results.append(re.sub('(?i)([. _-])(' + country_match_str + ')$', '\\1(\\2)', cur_name))
results += name_list
return list(set(results))
def makeSceneShowSearchStrings(show):
showNames = allPossibleShowNames(show)
# scenify the names
return map(sanitizeSceneName, showNames)
def makeSceneSeasonSearchString(show, segment, extraSearchType=None):
myDB = db.DBConnection()
if show.air_by_date:
numseasons = 0
# the search string for air by date shows is just
seasonStrings = [segment]
else:
numseasonsSQlResult = myDB.select("SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [show.tvdbid])
numseasons = int(numseasonsSQlResult[0][0])
seasonStrings = ["S%02d" % segment]
showNames = set(makeSceneShowSearchStrings(show))
toReturn = []
# search each show name
for curShow in showNames:
# most providers all work the same way
if not extraSearchType:
# if there's only one season then we can just use the show name straight up
if numseasons == 1:
toReturn.append(curShow)
# for providers that don't allow multiple searches in one request we only search for Sxx style stuff
else:
for cur_season in seasonStrings:
toReturn.append(curShow + "." + cur_season)
return toReturn
def makeSceneSearchString(episode):
myDB = db.DBConnection()
numseasonsSQlResult = myDB.select("SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [episode.show.tvdbid])
numseasons = int(numseasonsSQlResult[0][0])
numepisodesSQlResult = myDB.select("SELECT COUNT(episode) as numepisodes FROM tv_episodes WHERE showid = ? and season != 0", [episode.show.tvdbid])
numepisodes = int(numepisodesSQlResult[0][0])
# see if we should use dates instead of episodes
if episode.show.air_by_date and episode.airdate != datetime.date.fromordinal(1):
epStrings = [str(episode.airdate)]
else:
epStrings = ["S%02iE%02i" % (int(episode.season), int(episode.episode)),
"%ix%02i" % (int(episode.season), int(episode.episode))]
# for single-season shows just search for the show name -- if total ep count (exclude s0) is less than 11
# due to the amount of qualities and releases, it is easy to go over the 50 result limit on rss feeds otherwise
if numseasons == 1 and numepisodes < 11:
epStrings = ['']
showNames = set(makeSceneShowSearchStrings(episode.show))
toReturn = []
for curShow in showNames:
for curEpString in epStrings:
toReturn.append(curShow + '.' + curEpString)
return toReturn
def isGoodResult(name, show, log=True):
"""
Use an automatically-created regex to make sure the result actually is the show it claims to be
"""
all_show_names = allPossibleShowNames(show)
showNames = map(sanitizeSceneName, all_show_names) + all_show_names
for curName in set(showNames):
escaped_name = re.sub('\\\\[\\s.-]', '\W+', re.escape(curName))
if show.startyear:
escaped_name += "(?:\W+" + str(show.startyear) + ")?"
curRegex = '^' + escaped_name + '\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)[\._ -]?(\d|[ivx]))|Season\W+\d+\W+|E\d+\W+)'
if log:
logger.log(u"Checking if show " + name + " matches " + curRegex, logger.DEBUG)
match = re.search(curRegex, name, re.I)
if match:
logger.log(u"Matched " + curRegex + " to " + name, logger.DEBUG)
return True
if log:
logger.log(u"Provider gave result " + name + " but that doesn't seem like a valid result for " + show.name + " so I'm ignoring it")
return False
def allPossibleShowNames(show):
"""
Figures out every possible variation of the name for a particular show. Includes TVDB name, TVRage name,
country codes on the end, eg. "Show Name (AU)", and any scene exception names.
show: a TVShow object that we should get the names of
Returns: a list of all the possible show names
"""
showNames = [show.name]
showNames += [name for name in get_scene_exceptions(show.tvdbid)]
# if we have a tvrage name then use it
if show.tvrname != "" and show.tvrname != None:
showNames.append(show.tvrname)
newShowNames = []
country_list = countryList
country_list.update(dict(zip(countryList.values(), countryList.keys())))
# if we have "Show Name Australia" or "Show Name (Australia)" this will add "Show Name (AU)" for
# any countries defined in common.countryList
# (and vice versa)
for curName in set(showNames):
if not curName:
continue
for curCountry in country_list:
if curName.endswith(' ' + curCountry):
newShowNames.append(curName.replace(' ' + curCountry, ' (' + country_list[curCountry] + ')'))
elif curName.endswith(' (' + curCountry + ')'):
newShowNames.append(curName.replace(' (' + curCountry + ')', ' (' + country_list[curCountry] + ')'))
showNames += newShowNames
return showNames | unknown | codeparrot/codeparrot-clean | ||
from datetime import datetime
import pytest
from pandas.tseries.holiday import (
after_nearest_workday,
before_nearest_workday,
nearest_workday,
next_monday,
next_monday_or_tuesday,
next_workday,
previous_friday,
previous_workday,
sunday_to_monday,
weekend_to_monday,
)
_WEDNESDAY = datetime(2014, 4, 9)
_THURSDAY = datetime(2014, 4, 10)
_FRIDAY = datetime(2014, 4, 11)
_SATURDAY = datetime(2014, 4, 12)
_SUNDAY = datetime(2014, 4, 13)
_MONDAY = datetime(2014, 4, 14)
_TUESDAY = datetime(2014, 4, 15)
_NEXT_WEDNESDAY = datetime(2014, 4, 16)
@pytest.mark.parametrize("day", [_SATURDAY, _SUNDAY])
def test_next_monday(day):
assert next_monday(day) == _MONDAY
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _TUESDAY), (_MONDAY, _TUESDAY)]
)
def test_next_monday_or_tuesday(day, expected):
assert next_monday_or_tuesday(day) == expected
@pytest.mark.parametrize("day", [_SATURDAY, _SUNDAY])
def test_previous_friday(day):
assert previous_friday(day) == _FRIDAY
def test_sunday_to_monday():
assert sunday_to_monday(_SUNDAY) == _MONDAY
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _FRIDAY), (_SUNDAY, _MONDAY), (_MONDAY, _MONDAY)]
)
def test_nearest_workday(day, expected):
assert nearest_workday(day) == expected
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _MONDAY), (_MONDAY, _MONDAY)]
)
def test_weekend_to_monday(day, expected):
assert weekend_to_monday(day) == expected
@pytest.mark.parametrize(
"day,expected",
[
(_WEDNESDAY, _THURSDAY),
(_THURSDAY, _FRIDAY),
(_SATURDAY, _MONDAY),
(_SUNDAY, _MONDAY),
(_MONDAY, _TUESDAY),
(_TUESDAY, _NEXT_WEDNESDAY), # WED is same week as TUE
],
)
def test_next_workday(day, expected):
assert next_workday(day) == expected
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _FRIDAY), (_SUNDAY, _FRIDAY), (_TUESDAY, _MONDAY)]
)
def test_previous_workday(day, expected):
assert previous_workday(day) == expected
@pytest.mark.parametrize(
"day,expected",
[
(_THURSDAY, _WEDNESDAY),
(_FRIDAY, _THURSDAY),
(_SATURDAY, _THURSDAY),
(_SUNDAY, _FRIDAY),
(_MONDAY, _FRIDAY), # last week Friday
(_TUESDAY, _MONDAY),
(_NEXT_WEDNESDAY, _TUESDAY), # WED is same week as TUE
],
)
def test_before_nearest_workday(day, expected):
assert before_nearest_workday(day) == expected
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _TUESDAY), (_FRIDAY, _MONDAY)]
)
def test_after_nearest_workday(day, expected):
assert after_nearest_workday(day) == expected | python | github | https://github.com/pandas-dev/pandas | pandas/tests/tseries/holiday/test_observance.py |
import unittest
from unittest.mock import (
call, _Call, create_autospec, MagicMock,
Mock, ANY, _CallList, patch, PropertyMock
)
from datetime import datetime
class SomeClass(object):
def one(self, a, b):
pass
def two(self):
pass
def three(self, a=None):
pass
class AnyTest(unittest.TestCase):
def test_any(self):
self.assertEqual(ANY, object())
mock = Mock()
mock(ANY)
mock.assert_called_with(ANY)
mock = Mock()
mock(foo=ANY)
mock.assert_called_with(foo=ANY)
def test_repr(self):
self.assertEqual(repr(ANY), '<ANY>')
self.assertEqual(str(ANY), '<ANY>')
def test_any_and_datetime(self):
mock = Mock()
mock(datetime.now(), foo=datetime.now())
mock.assert_called_with(ANY, foo=ANY)
def test_any_mock_calls_comparison_order(self):
mock = Mock()
d = datetime.now()
class Foo(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
for d in datetime.now(), Foo():
mock.reset_mock()
mock(d, foo=d, bar=d)
mock.method(d, zinga=d, alpha=d)
mock().method(a1=d, z99=d)
expected = [
call(ANY, foo=ANY, bar=ANY),
call.method(ANY, zinga=ANY, alpha=ANY),
call(), call().method(a1=ANY, z99=ANY)
]
self.assertEqual(expected, mock.mock_calls)
self.assertEqual(mock.mock_calls, expected)
class CallTest(unittest.TestCase):
def test_call_with_call(self):
kall = _Call()
self.assertEqual(kall, _Call())
self.assertEqual(kall, _Call(('',)))
self.assertEqual(kall, _Call(((),)))
self.assertEqual(kall, _Call(({},)))
self.assertEqual(kall, _Call(('', ())))
self.assertEqual(kall, _Call(('', {})))
self.assertEqual(kall, _Call(('', (), {})))
self.assertEqual(kall, _Call(('foo',)))
self.assertEqual(kall, _Call(('bar', ())))
self.assertEqual(kall, _Call(('baz', {})))
self.assertEqual(kall, _Call(('spam', (), {})))
kall = _Call(((1, 2, 3),))
self.assertEqual(kall, _Call(((1, 2, 3),)))
self.assertEqual(kall, _Call(('', (1, 2, 3))))
self.assertEqual(kall, _Call(((1, 2, 3), {})))
self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(((1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(('foo', (1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
kall = _Call(({'a': 3},))
self.assertEqual(kall, _Call(('', (), {'a': 3})))
self.assertEqual(kall, _Call(('', {'a': 3})))
self.assertEqual(kall, _Call(((), {'a': 3})))
self.assertEqual(kall, _Call(({'a': 3},)))
def test_empty__Call(self):
args = _Call()
self.assertEqual(args, ())
self.assertEqual(args, ('foo',))
self.assertEqual(args, ((),))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertEqual(args, ({},))
def test_named_empty_call(self):
args = _Call(('foo', (), {}))
self.assertEqual(args, ('foo',))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertNotEqual(args, ((),))
self.assertNotEqual(args, ())
self.assertNotEqual(args, ({},))
self.assertNotEqual(args, ('bar',))
self.assertNotEqual(args, ('bar', ()))
self.assertNotEqual(args, ('bar', {}))
def test_call_with_args(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3),))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3), {}))
def test_named_call_with_args(self):
args = _Call(('foo', (1, 2, 3), {}))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertNotEqual(args, ((1, 2, 3),))
self.assertNotEqual(args, ((1, 2, 3), {}))
def test_call_with_kwargs(self):
args = _Call(((), dict(a=3, b=4)))
self.assertEqual(args, (dict(a=3, b=4),))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ((), dict(a=3, b=4)))
def test_named_call_with_kwargs(self):
args = _Call(('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertNotEqual(args, (dict(a=3, b=4),))
self.assertNotEqual(args, ((), dict(a=3, b=4)))
def test_call_with_args_call_empty_name(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, call(1, 2, 3))
self.assertEqual(call(1, 2, 3), args)
self.assertTrue(call(1, 2, 3) in [args])
def test_call_ne(self):
self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
def test_call_non_tuples(self):
kall = _Call(((1, 2, 3),))
for value in 1, None, self, int:
self.assertNotEqual(kall, value)
self.assertFalse(kall == value)
def test_repr(self):
self.assertEqual(repr(_Call()), 'call()')
self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
"call(1, 2, 3, a='b')")
self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
"call.bar(1, 2, 3, a='b')")
self.assertEqual(repr(call), 'call')
self.assertEqual(str(call), 'call')
self.assertEqual(repr(call()), 'call()')
self.assertEqual(repr(call(1)), 'call(1)')
self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
self.assertEqual(repr(call().foo), 'call().foo')
self.assertEqual(repr(call(1).foo.bar(a=3).bing),
'call().foo.bar().bing')
self.assertEqual(
repr(call().foo(1, 2, a=3)),
"call().foo(1, 2, a=3)"
)
self.assertEqual(repr(call()()), "call()()")
self.assertEqual(repr(call(1)(2)), "call()(2)")
self.assertEqual(
repr(call()().bar().baz.beep(1)),
"call()().bar().baz.beep(1)"
)
def test_call(self):
self.assertEqual(call(), ('', (), {}))
self.assertEqual(call('foo', 'bar', one=3, two=4),
('', ('foo', 'bar'), {'one': 3, 'two': 4}))
mock = Mock()
mock(1, 2, 3)
mock(a=3, b=6)
self.assertEqual(mock.call_args_list,
[call(1, 2, 3), call(a=3, b=6)])
def test_attribute_call(self):
self.assertEqual(call.foo(1), ('foo', (1,), {}))
self.assertEqual(call.bar.baz(fish='eggs'),
('bar.baz', (), {'fish': 'eggs'}))
mock = Mock()
mock.foo(1, 2 ,3)
mock.bar.baz(a=3, b=6)
self.assertEqual(mock.method_calls,
[call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
def test_extended_call(self):
result = call(1).foo(2).bar(3, a=4)
self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
mock = MagicMock()
mock(1, 2, a=3, b=4)
self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
self.assertNotEqual(mock.call_args, call(1, 2, 3))
self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
mock = MagicMock()
mock.foo(1).bar()().baz.beep(a=6)
last_call = call.foo(1).bar()().baz.beep(a=6)
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock.mock_calls, last_call.call_list())
def test_call_list(self):
mock = MagicMock()
mock(1)
self.assertEqual(call(1).call_list(), mock.mock_calls)
mock = MagicMock()
mock(1).method(2)
self.assertEqual(call(1).method(2).call_list(),
mock.mock_calls)
mock = MagicMock()
mock(1).method(2)(3)
self.assertEqual(call(1).method(2)(3).call_list(),
mock.mock_calls)
mock = MagicMock()
int(mock(1).method(2)(3).foo.bar.baz(4)(5))
kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
self.assertEqual(kall.call_list(), mock.mock_calls)
def test_call_any(self):
self.assertEqual(call, ANY)
m = MagicMock()
int(m)
self.assertEqual(m.mock_calls, [ANY])
self.assertEqual([ANY], m.mock_calls)
def test_two_args_call(self):
args = _Call(((1, 2), {'a': 3}), two=True)
self.assertEqual(len(args), 2)
self.assertEqual(args[0], (1, 2))
self.assertEqual(args[1], {'a': 3})
other_args = _Call(((1, 2), {'a': 3}))
self.assertEqual(args, other_args)
class SpecSignatureTest(unittest.TestCase):
def _check_someclass_mock(self, mock):
self.assertRaises(AttributeError, getattr, mock, 'foo')
mock.one(1, 2)
mock.one.assert_called_with(1, 2)
self.assertRaises(AssertionError,
mock.one.assert_called_with, 3, 4)
self.assertRaises(TypeError, mock.one, 1)
mock.two()
mock.two.assert_called_with()
self.assertRaises(AssertionError,
mock.two.assert_called_with, 3)
self.assertRaises(TypeError, mock.two, 1)
mock.three()
mock.three.assert_called_with()
self.assertRaises(AssertionError,
mock.three.assert_called_with, 3)
self.assertRaises(TypeError, mock.three, 3, 2)
mock.three(1)
mock.three.assert_called_with(1)
mock.three(a=1)
mock.three.assert_called_with(a=1)
def test_basic(self):
for spec in (SomeClass, SomeClass()):
mock = create_autospec(spec)
self._check_someclass_mock(mock)
def test_create_autospec_return_value(self):
def f():
pass
mock = create_autospec(f, return_value='foo')
self.assertEqual(mock(), 'foo')
class Foo(object):
pass
mock = create_autospec(Foo, return_value='foo')
self.assertEqual(mock(), 'foo')
def test_autospec_reset_mock(self):
m = create_autospec(int)
int(m)
m.reset_mock()
self.assertEqual(m.__int__.call_count, 0)
def test_mocking_unbound_methods(self):
class Foo(object):
def foo(self, foo):
pass
p = patch.object(Foo, 'foo')
mock_foo = p.start()
Foo().foo(1)
mock_foo.assert_called_with(1)
def test_create_autospec_unbound_methods(self):
# see mock issue 128
# this is expected to fail until the issue is fixed
return
class Foo(object):
def foo(self):
pass
klass = create_autospec(Foo)
instance = klass()
self.assertRaises(TypeError, instance.foo, 1)
# Note: no type checking on the "self" parameter
klass.foo(1)
klass.foo.assert_called_with(1)
self.assertRaises(TypeError, klass.foo)
def test_create_autospec_keyword_arguments(self):
class Foo(object):
a = 3
m = create_autospec(Foo, a='3')
self.assertEqual(m.a, '3')
def test_create_autospec_keyword_only_arguments(self):
def foo(a, *, b=None):
pass
m = create_autospec(foo)
m(1)
m.assert_called_with(1)
self.assertRaises(TypeError, m, 1, 2)
m(2, b=3)
m.assert_called_with(2, b=3)
def test_function_as_instance_attribute(self):
obj = SomeClass()
def f(a):
pass
obj.f = f
mock = create_autospec(obj)
mock.f('bing')
mock.f.assert_called_with('bing')
def test_spec_as_list(self):
# because spec as a list of strings in the mock constructor means
# something very different we treat a list instance as the type.
mock = create_autospec([])
mock.append('foo')
mock.append.assert_called_with('foo')
self.assertRaises(AttributeError, getattr, mock, 'foo')
class Foo(object):
foo = []
mock = create_autospec(Foo)
mock.foo.append(3)
mock.foo.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
def test_attributes(self):
class Sub(SomeClass):
attr = SomeClass()
sub_mock = create_autospec(Sub)
for mock in (sub_mock, sub_mock.attr):
self._check_someclass_mock(mock)
def test_builtin_functions_types(self):
# we could replace builtin functions / methods with a function
# with *args / **kwargs signature. Using the builtin method type
# as a spec seems to work fairly well though.
class BuiltinSubclass(list):
def bar(self, arg):
pass
sorted = sorted
attr = {}
mock = create_autospec(BuiltinSubclass)
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.append, 'foo')
mock.bar('foo')
mock.bar.assert_called_with('foo')
self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
mock.sorted([1, 2])
mock.sorted.assert_called_with([1, 2])
self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
mock.attr.pop(3)
mock.attr.pop.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
def test_method_calls(self):
class Sub(SomeClass):
attr = SomeClass()
mock = create_autospec(Sub)
mock.one(1, 2)
mock.two()
mock.three(3)
expected = [call.one(1, 2), call.two(), call.three(3)]
self.assertEqual(mock.method_calls, expected)
mock.attr.one(1, 2)
mock.attr.two()
mock.attr.three(3)
expected.extend(
[call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
)
self.assertEqual(mock.method_calls, expected)
def test_magic_methods(self):
class BuiltinSubclass(list):
attr = {}
mock = create_autospec(BuiltinSubclass)
self.assertEqual(list(mock), [])
self.assertRaises(TypeError, int, mock)
self.assertRaises(TypeError, int, mock.attr)
self.assertEqual(list(mock), [])
self.assertIsInstance(mock['foo'], MagicMock)
self.assertIsInstance(mock.attr['foo'], MagicMock)
def test_spec_set(self):
class Sub(SomeClass):
attr = SomeClass()
for spec in (Sub, Sub()):
mock = create_autospec(spec, spec_set=True)
self._check_someclass_mock(mock)
self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
def test_descriptors(self):
class Foo(object):
@classmethod
def f(cls, a, b):
pass
@staticmethod
def g(a, b):
pass
class Bar(Foo):
pass
class Baz(SomeClass, Bar):
pass
for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
mock = create_autospec(spec)
mock.f(1, 2)
mock.f.assert_called_once_with(1, 2)
mock.g(3, 4)
mock.g.assert_called_once_with(3, 4)
def test_recursive(self):
class A(object):
def a(self):
pass
foo = 'foo bar baz'
bar = foo
A.B = A
mock = create_autospec(A)
mock()
self.assertFalse(mock.B.called)
mock.a()
mock.B.a()
self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
self.assertIs(A.foo, A.bar)
self.assertIsNot(mock.foo, mock.bar)
mock.foo.lower()
self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
def test_spec_inheritance_for_classes(self):
class Foo(object):
def a(self):
pass
class Bar(object):
def f(self):
pass
class_mock = create_autospec(Foo)
self.assertIsNot(class_mock, class_mock())
for this_mock in class_mock, class_mock():
this_mock.a()
this_mock.a.assert_called_with()
self.assertRaises(TypeError, this_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, this_mock, 'b')
instance_mock = create_autospec(Foo())
instance_mock.a()
instance_mock.a.assert_called_with()
self.assertRaises(TypeError, instance_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, instance_mock, 'b')
# The return value isn't isn't callable
self.assertRaises(TypeError, instance_mock)
instance_mock.Bar.f()
instance_mock.Bar.f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
instance_mock.Bar().f()
instance_mock.Bar().f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
def test_inherit(self):
class Foo(object):
a = 3
Foo.Foo = Foo
# class
mock = create_autospec(Foo)
instance = mock()
self.assertRaises(AttributeError, getattr, instance, 'b')
attr_instance = mock.Foo()
self.assertRaises(AttributeError, getattr, attr_instance, 'b')
# instance
mock = create_autospec(Foo())
self.assertRaises(AttributeError, getattr, mock, 'b')
self.assertRaises(TypeError, mock)
# attribute instance
call_result = mock.Foo()
self.assertRaises(AttributeError, getattr, call_result, 'b')
def test_builtins(self):
# used to fail with infinite recursion
create_autospec(1)
create_autospec(int)
create_autospec('foo')
create_autospec(str)
create_autospec({})
create_autospec(dict)
create_autospec([])
create_autospec(list)
create_autospec(set())
create_autospec(set)
create_autospec(1.0)
create_autospec(float)
create_autospec(1j)
create_autospec(complex)
create_autospec(False)
create_autospec(True)
def test_function(self):
def f(a, b):
pass
mock = create_autospec(f)
self.assertRaises(TypeError, mock)
mock(1, 2)
mock.assert_called_with(1, 2)
f.f = f
mock = create_autospec(f)
self.assertRaises(TypeError, mock.f)
mock.f(3, 4)
mock.f.assert_called_with(3, 4)
def test_skip_attributeerrors(self):
class Raiser(object):
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance')
class RaiserClass(object):
raiser = Raiser()
@staticmethod
def existing(a, b):
return a + b
s = create_autospec(RaiserClass)
self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
s.existing(1, 2)
self.assertRaises(AttributeError, lambda: s.nonexisting)
# check we can fetch the raiser attribute and it has no spec
obj = s.raiser
obj.foo, obj.bar
def test_signature_class(self):
class Foo(object):
def __init__(self, a, b=3):
pass
mock = create_autospec(Foo)
self.assertRaises(TypeError, mock)
mock(1)
mock.assert_called_once_with(1)
mock(4, 5)
mock.assert_called_with(4, 5)
def test_class_with_no_init(self):
# this used to raise an exception
# due to trying to get a signature from object.__init__
class Foo(object):
pass
create_autospec(Foo)
def test_signature_callable(self):
class Callable(object):
def __init__(self):
pass
def __call__(self, a):
pass
mock = create_autospec(Callable)
mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
instance = mock()
self.assertRaises(TypeError, instance)
instance(a='a')
instance.assert_called_once_with(a='a')
instance('a')
instance.assert_called_with('a')
mock = create_autospec(Callable())
mock(a='a')
mock.assert_called_once_with(a='a')
self.assertRaises(TypeError, mock)
mock('a')
mock.assert_called_with('a')
def test_signature_noncallable(self):
class NonCallable(object):
def __init__(self):
pass
mock = create_autospec(NonCallable)
instance = mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
self.assertRaises(TypeError, instance)
self.assertRaises(TypeError, instance, 'a')
mock = create_autospec(NonCallable())
self.assertRaises(TypeError, mock)
self.assertRaises(TypeError, mock, 'a')
def test_create_autospec_none(self):
class Foo(object):
bar = None
mock = create_autospec(Foo)
none = mock.bar
self.assertNotIsInstance(none, type(None))
none.foo()
none.foo.assert_called_once_with()
def test_autospec_functions_with_self_in_odd_place(self):
class Foo(object):
def f(a, self):
pass
a = create_autospec(Foo)
a.f(self=10)
a.f.assert_called_with(self=10)
def test_autospec_property(self):
class Foo(object):
@property
def foo(self):
return 3
foo = create_autospec(Foo)
mock_property = foo.foo
# no spec on properties
self.assertTrue(isinstance(mock_property, MagicMock))
mock_property(1, 2, 3)
mock_property.abc(4, 5, 6)
mock_property.assert_called_once_with(1, 2, 3)
mock_property.abc.assert_called_once_with(4, 5, 6)
def test_autospec_slots(self):
class Foo(object):
__slots__ = ['a']
foo = create_autospec(Foo)
mock_slot = foo.a
# no spec on slots
mock_slot(1, 2, 3)
mock_slot.abc(4, 5, 6)
mock_slot.assert_called_once_with(1, 2, 3)
mock_slot.abc.assert_called_once_with(4, 5, 6)
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
mock = Mock()
self.assertIsInstance(mock.call_args_list, _CallList)
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
self.assertTrue(kall in mock.call_args_list)
calls = [call(a=3), call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(1, 2), call(a=3)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4), call(b=6)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
self.assertFalse(call('fish') in mock.call_args_list)
self.assertFalse([call('fish')] in mock.call_args_list)
def test_call_list_str(self):
mock = Mock()
mock(1, 2)
mock.foo(a=3)
mock.foo.bar().baz('fish', cat='dog')
expected = (
"[call(1, 2),\n"
" call.foo(a=3),\n"
" call.foo.bar(),\n"
" call.foo.bar().baz('fish', cat='dog')]"
)
self.assertEqual(str(mock.mock_calls), expected)
def test_propertymock(self):
p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
mock = p.start()
try:
SomeClass.one
mock.assert_called_once_with()
s = SomeClass()
s.one
mock.assert_called_with()
self.assertEqual(mock.mock_calls, [call(), call()])
s.one = 3
self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
finally:
p.stop()
def test_propertymock_returnvalue(self):
m = MagicMock()
p = PropertyMock()
type(m).foo = p
returned = m.foo
p.assert_called_once_with()
self.assertIsInstance(returned, MagicMock)
self.assertNotIsInstance(returned, PropertyMock)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AffectedUserByGroup'
db.create_table(
'sentry_affecteduserbygroup', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), (
'project',
self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'])
), (
'group',
self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Group'])
), ('ident', self.gf('django.db.models.fields.CharField')(max_length=200)),
('times_seen',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0)), (
'last_seen', self.gf('django.db.models.fields.DateTimeField')(
default=datetime.datetime.now, db_index=True
)
), (
'first_seen', self.gf('django.db.models.fields.DateTimeField')(
default=datetime.datetime.now, db_index=True
)
),
)
)
db.send_create_signal('sentry', ['AffectedUserByGroup'])
# Adding unique constraint on 'AffectedUserByGroup', fields ['project', 'ident', 'group']
db.create_unique('sentry_affecteduserbygroup', ['project_id', 'ident', 'group_id'])
def backwards(self, orm):
# Removing unique constraint on 'AffectedUserByGroup', fields ['project', 'ident', 'group']
db.delete_unique('sentry_affecteduserbygroup', ['project_id', 'ident', 'group_id'])
# Deleting model 'AffectedUserByGroup'
db.delete_table('sentry_affecteduserbygroup')
models = {
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '30'
})
},
'contenttypes.contenttype': {
'Meta': {
'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"
},
'app_label': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'model': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '100'
})
},
'sentry.affecteduserbygroup': {
'Meta': {
'unique_together': "(('project', 'ident', 'group'),)",
'object_name': 'AffectedUserByGroup'
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'server_name': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'time_spent': ('django.db.models.fields.FloatField', [], {
'null': 'True'
})
},
'sentry.filterkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'FilterKey'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
})
},
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
),
'users_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.messagecountbyminute': {
'Meta': {
'unique_together': "(('project', 'group', 'date'),)",
'object_name': 'MessageCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {
'db_index': 'True'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.messagefiltervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'MessageFilterValue'
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.messageindex': {
'Meta': {
'unique_together': "(('column', 'value', 'object_id'),)",
'object_name': 'MessageIndex'
},
'column': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {
'unique_together': "(('team', 'email'),)",
'object_name': 'PendingTeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'pending_member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '0'
})
},
'sentry.project': {
'Meta': {
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'owner': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_owned_project_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'slug': (
'django.db.models.fields.SlugField', [], {
'max_length': '50',
'unique': 'True',
'null': 'True'
}
),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']",
'null': 'True'
}
)
},
'sentry.projectcountbyminute': {
'Meta': {
'unique_together': "(('project', 'date'),)",
'object_name': 'ProjectCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
),
'user_added': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'keys_added_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {
'unique_together': "(('project', 'group'),)",
'object_name': 'SearchDocument'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_changed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
})
},
'sentry.searchtoken': {
'Meta': {
'unique_together': "(('document', 'field', 'token'),)",
'object_name': 'SearchToken'
},
'document': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'token_set'",
'to': "orm['sentry.SearchDocument']"
}
),
'field':
('django.db.models.fields.CharField', [], {
'default': "'text'",
'max_length': '64'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
}),
'token': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.team': {
'Meta': {
'object_name': 'Team'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
})
},
'sentry.teammember': {
'Meta': {
'unique_together': "(('team', 'user'),)",
'object_name': 'TeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_teammember_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry'] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from academicmarkdown import build
from libopensesame import misc
md = u"""
<div class='page-notification'>This page is generated automatically from item-specific help pages. These pages can be viewed in OpenSesame by clicking on the help icon in the top-right of the tab area.</div>
## Overview
%%--
toc:
mindepth: 2
exclude: [Overview]
--%%
## Commonly used items
%s
## Plug-ins
%s
"""
plugin_msg = u"\n<div class='page-notification'>This is a plug-in and may not be installed by default. For plug-in installation instructions, see <a href='/plug-ins/installation'>here</a>.</div>\n"
exclude_list = [u'general.md', u'variables.md', u'stdout.md', u'missing.md',
u'auto_example.md', u'remote_logger.md', u'pool.md', u'video_player.md']
def collect(folder):
"""
Recursively collects a list of Markdown help files from a specified folder.
Arguments:
folder -- The source folder.
Returns:
A list of path names of help files.
"""
src = []
for fname in os.listdir(folder):
path = os.path.join(folder, fname)
if os.path.isdir(path):
print('Entering %s' % path)
src += collect(path)
continue
if fname in exclude_list or not fname.endswith(u'.md'):
continue
print('Adding %s' % path)
src.append(path)
return sorted(src)
def helpify(folder, msg=u''):
"""
Recursively builds a help page from Markdown help files in a source folder.
Arguments:
folder -- The source folder.
Keyword arguments:
msg -- An informative message to include after each help file.
Returns:
A help page.
"""
src = collect(folder)
md = u''
for path in src:
_md = u'\n' + msg + open(path).read().decode(u'utf-8') + u'\n<hr />\n'
_md = _md.replace(u'\n#', u'\n###')
md += _md
return md
md = md % (helpify(u'help'), helpify(u'plugins', plugin_msg))
html = build.HTML(md, standalone=False)
open('../osdoc/content/_includes/item-help', 'w').write(html.encode('utf-8')) | unknown | codeparrot/codeparrot-clean | ||
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_PARENTVIRTUALCALLCHECK_H
#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_PARENTVIRTUALCALLCHECK_H
#include "../ClangTidyCheck.h"
namespace clang::tidy::bugprone {
/// Finds calls to grand..-parent virtual methods instead of parent's.
///
/// For the user-facing documentation see:
/// https://clang.llvm.org/extra/clang-tidy/checks/bugprone/parent-virtual-call.html
class ParentVirtualCallCheck : public ClangTidyCheck {
public:
ParentVirtualCallCheck(StringRef Name, ClangTidyContext *Context)
: ClangTidyCheck(Name, Context) {}
bool isLanguageVersionSupported(const LangOptions &LangOpts) const override {
return LangOpts.CPlusPlus;
}
void registerMatchers(ast_matchers::MatchFinder *Finder) override;
void check(const ast_matchers::MatchFinder::MatchResult &Result) override;
};
} // namespace clang::tidy::bugprone
#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_PARENTVIRTUALCALLCHECK_H | c | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/bugprone/ParentVirtualCallCheck.h |
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
/**
* Provides APIs to create windows, communicate with other windows and manipulate the current window.
*
* #### Window events
*
* Events can be listened to using {@link Window.listen}:
* ```typescript
* import { getCurrentWindow } from "@tauri-apps/api/window";
* getCurrentWindow().listen("my-window-event", ({ event, payload }) => { });
* ```
*
* @module
*/
import {
LogicalPosition,
LogicalSize,
PhysicalPosition,
PhysicalSize,
Position,
Size
} from './dpi'
import type { Event, EventName, EventCallback, UnlistenFn } from './event'
import {
TauriEvent,
// imported for documentation purposes
type EventTarget,
emit,
emitTo,
listen,
once
} from './event'
import { invoke } from './core'
import { WebviewWindow } from './webviewWindow'
import type { DragDropEvent } from './webview'
import { Image, transformImage } from './image'
/**
* Allows you to retrieve information about a given monitor.
*
* @since 1.0.0
*/
export interface Monitor {
/** Human-readable name of the monitor */
name: string | null
/** The monitor's resolution. */
size: PhysicalSize
/** the Top-left corner position of the monitor relative to the larger full screen area. */
position: PhysicalPosition
/** The monitor's work area. */
workArea: {
position: PhysicalPosition
size: PhysicalSize
}
/** The scale factor that can be used to map physical pixels to logical pixels. */
scaleFactor: number
}
type Theme = 'light' | 'dark'
type TitleBarStyle = 'visible' | 'transparent' | 'overlay'
type ResizeDirection =
| 'East'
| 'North'
| 'NorthEast'
| 'NorthWest'
| 'South'
| 'SouthEast'
| 'SouthWest'
| 'West'
/**
* The payload for the `scaleChange` event.
*
* @since 1.0.2
*/
interface ScaleFactorChanged {
/** The new window scale factor. */
scaleFactor: number
/** The new window size */
size: PhysicalSize
}
/**
* Attention type to request on a window.
*
* @since 1.0.0
*/
enum UserAttentionType {
/**
* #### Platform-specific
* - **macOS:** Bounces the dock icon until the application is in focus.
* - **Windows:** Flashes both the window and the taskbar button until the application is in focus.
*/
Critical = 1,
/**
* #### Platform-specific
* - **macOS:** Bounces the dock icon once.
* - **Windows:** Flashes the taskbar button until the application is in focus.
*/
Informational
}
class CloseRequestedEvent {
/** Event name */
event: EventName
/** Event identifier used to unlisten */
id: number
private _preventDefault = false
constructor(event: Event<unknown>) {
this.event = event.event
this.id = event.id
}
preventDefault(): void {
this._preventDefault = true
}
isPreventDefault(): boolean {
return this._preventDefault
}
}
export type CursorIcon =
| 'default'
| 'crosshair'
| 'hand'
| 'arrow'
| 'move'
| 'text'
| 'wait'
| 'help'
| 'progress'
// something cannot be done
| 'notAllowed'
| 'contextMenu'
| 'cell'
| 'verticalText'
| 'alias'
| 'copy'
| 'noDrop'
// something can be grabbed
| 'grab'
/// something is grabbed
| 'grabbing'
| 'allScroll'
| 'zoomIn'
| 'zoomOut'
// edge is to be moved
| 'eResize'
| 'nResize'
| 'neResize'
| 'nwResize'
| 'sResize'
| 'seResize'
| 'swResize'
| 'wResize'
| 'ewResize'
| 'nsResize'
| 'neswResize'
| 'nwseResize'
| 'colResize'
| 'rowResize'
export enum ProgressBarStatus {
/**
* Hide progress bar.
*/
None = 'none',
/**
* Normal state.
*/
Normal = 'normal',
/**
* Indeterminate state. **Treated as Normal on Linux and macOS**
*/
Indeterminate = 'indeterminate',
/**
* Paused state. **Treated as Normal on Linux**
*/
Paused = 'paused',
/**
* Error state. **Treated as Normal on linux**
*/
Error = 'error'
}
export interface WindowSizeConstraints {
minWidth?: number
minHeight?: number
maxWidth?: number
maxHeight?: number
}
export interface ProgressBarState {
/**
* The progress bar status.
*/
status?: ProgressBarStatus
/**
* The progress bar progress. This can be a value ranging from `0` to `100`
*/
progress?: number
}
/**
* Get an instance of `Window` for the current window.
*
* @since 1.0.0
*/
function getCurrentWindow(): Window {
return new Window(window.__TAURI_INTERNALS__.metadata.currentWindow.label, {
// @ts-expect-error `skip` is not defined in the public API but it is handled by the constructor
skip: true
})
}
/**
* Gets a list of instances of `Window` for all available windows.
*
* @since 1.0.0
*/
async function getAllWindows(): Promise<Window[]> {
return invoke<string[]>('plugin:window|get_all_windows').then((windows) =>
windows.map(
(w) =>
new Window(w, {
// @ts-expect-error `skip` is not defined in the public API but it is handled by the constructor
skip: true
})
)
)
}
/** @ignore */
// events that are emitted right here instead of by the created window
const localTauriEvents = ['tauri://created', 'tauri://error']
/** @ignore */
export type WindowLabel = string
/**
* Create new window or get a handle to an existing one.
*
* Windows are identified by a *label* a unique identifier that can be used to reference it later.
* It may only contain alphanumeric characters `a-zA-Z` plus the following special characters `-`, `/`, `:` and `_`.
*
* @example
* ```typescript
* import { Window } from "@tauri-apps/api/window"
*
* const appWindow = new Window('theUniqueLabel');
*
* appWindow.once('tauri://created', function () {
* // window successfully created
* });
* appWindow.once('tauri://error', function (e) {
* // an error happened creating the window
* });
*
* // emit an event to the backend
* await appWindow.emit("some-event", "data");
* // listen to an event from the backend
* const unlisten = await appWindow.listen("event-name", e => {});
* unlisten();
* ```
*
* @since 2.0.0
*/
class Window {
/** The window label. It is a unique identifier for the window, can be used to reference it later. */
label: WindowLabel
/** Local event listeners. */
// eslint-disable-next-line @typescript-eslint/no-explicit-any
listeners: Record<string, Array<EventCallback<any>>>
/**
* Creates a new Window.
* @example
* ```typescript
* import { Window } from '@tauri-apps/api/window';
* const appWindow = new Window('my-label');
* appWindow.once('tauri://created', function () {
* // window successfully created
* });
* appWindow.once('tauri://error', function (e) {
* // an error happened creating the window
* });
* ```
*
* @param label The unique window label. Must be alphanumeric: `a-zA-Z-/:_`.
* @returns The {@link Window} instance to communicate with the window.
*/
constructor(label: WindowLabel, options: WindowOptions = {}) {
this.label = label
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.listeners = Object.create(null)
// @ts-expect-error `skip` is not a public API so it is not defined in WindowOptions
if (!options?.skip) {
invoke('plugin:window|create', {
options: {
...options,
parent:
typeof options.parent === 'string'
? options.parent
: options.parent?.label,
label
}
})
.then(async () => this.emit('tauri://created'))
.catch(async (e: string) => this.emit('tauri://error', e))
}
}
/**
* Gets the Window associated with the given label.
* @example
* ```typescript
* import { Window } from '@tauri-apps/api/window';
* const mainWindow = Window.getByLabel('main');
* ```
*
* @param label The window label.
* @returns The Window instance to communicate with the window or null if the window doesn't exist.
*/
static async getByLabel(label: string): Promise<Window | null> {
return (await getAllWindows()).find((w) => w.label === label) ?? null
}
/**
* Get an instance of `Window` for the current window.
*/
static getCurrent(): Window {
return getCurrentWindow()
}
/**
* Gets a list of instances of `Window` for all available windows.
*/
static async getAll(): Promise<Window[]> {
return getAllWindows()
}
/**
* Gets the focused window.
* @example
* ```typescript
* import { Window } from '@tauri-apps/api/window';
* const focusedWindow = Window.getFocusedWindow();
* ```
*
* @returns The Window instance or `undefined` if there is not any focused window.
*/
static async getFocusedWindow(): Promise<Window | null> {
for (const w of await getAllWindows()) {
if (await w.isFocused()) {
return w
}
}
return null
}
/**
* Listen to an emitted event on this window.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const unlisten = await getCurrentWindow().listen<string>('state-changed', (event) => {
* console.log(`Got error: ${payload}`);
* });
*
* // you need to call unlisten if your handler goes out of scope e.g. the component is unmounted
* unlisten();
* ```
*
* @param event Event name. Must include only alphanumeric characters, `-`, `/`, `:` and `_`.
* @param handler Event handler.
* @returns A promise resolving to a function to unlisten to the event.
* Note that removing the listener is required if your listener goes out of scope e.g. the component is unmounted.
*/
async listen<T>(
event: EventName,
handler: EventCallback<T>
): Promise<UnlistenFn> {
if (this._handleTauriEvent(event, handler)) {
return () => {
// eslint-disable-next-line security/detect-object-injection
const listeners = this.listeners[event]
listeners.splice(listeners.indexOf(handler), 1)
}
}
return listen(event, handler, {
target: { kind: 'Window', label: this.label }
})
}
/**
* Listen to an emitted event on this window only once.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const unlisten = await getCurrentWindow().once<null>('initialized', (event) => {
* console.log(`Window initialized!`);
* });
*
* // you need to call unlisten if your handler goes out of scope e.g. the component is unmounted
* unlisten();
* ```
*
* @param event Event name. Must include only alphanumeric characters, `-`, `/`, `:` and `_`.
* @param handler Event handler.
* @returns A promise resolving to a function to unlisten to the event.
* Note that removing the listener is required if your listener goes out of scope e.g. the component is unmounted.
*/
async once<T>(
event: EventName,
handler: EventCallback<T>
): Promise<UnlistenFn> {
if (this._handleTauriEvent(event, handler)) {
return () => {
// eslint-disable-next-line security/detect-object-injection
const listeners = this.listeners[event]
listeners.splice(listeners.indexOf(handler), 1)
}
}
return once(event, handler, {
target: { kind: 'Window', label: this.label }
})
}
/**
* Emits an event to all {@link EventTarget|targets}.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().emit('window-loaded', { loggedIn: true, token: 'authToken' });
* ```
*
* @param event Event name. Must include only alphanumeric characters, `-`, `/`, `:` and `_`.
* @param payload Event payload.
*/
async emit<T>(event: string, payload?: T): Promise<void> {
if (localTauriEvents.includes(event)) {
// eslint-disable-next-line
for (const handler of this.listeners[event] || []) {
handler({
event,
id: -1,
payload
})
}
return
}
return emit<T>(event, payload)
}
/**
* Emits an event to all {@link EventTarget|targets} matching the given target.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().emit('main', 'window-loaded', { loggedIn: true, token: 'authToken' });
* ```
* @param target Label of the target Window/Webview/WebviewWindow or raw {@link EventTarget} object.
* @param event Event name. Must include only alphanumeric characters, `-`, `/`, `:` and `_`.
* @param payload Event payload.
*/
async emitTo<T>(
target: string | EventTarget,
event: string,
payload?: T
): Promise<void> {
if (localTauriEvents.includes(event)) {
// eslint-disable-next-line security/detect-object-injection
for (const handler of this.listeners[event] || []) {
handler({
event,
id: -1,
payload
})
}
return
}
return emitTo<T>(target, event, payload)
}
/** @ignore */
_handleTauriEvent<T>(event: string, handler: EventCallback<T>): boolean {
if (localTauriEvents.includes(event)) {
if (!(event in this.listeners)) {
// eslint-disable-next-line
this.listeners[event] = [handler]
} else {
// eslint-disable-next-line
this.listeners[event].push(handler)
}
return true
}
return false
}
// Getters
/**
* The scale factor that can be used to map physical pixels to logical pixels.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const factor = await getCurrentWindow().scaleFactor();
* ```
*
* @returns The window's monitor scale factor.
*/
async scaleFactor(): Promise<number> {
return invoke('plugin:window|scale_factor', {
label: this.label
})
}
/**
* The position of the top-left hand corner of the window's client area relative to the top-left hand corner of the desktop.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const position = await getCurrentWindow().innerPosition();
* ```
*
* @returns The window's inner position.
*/
async innerPosition(): Promise<PhysicalPosition> {
return invoke<{ x: number; y: number }>('plugin:window|inner_position', {
label: this.label
}).then((p) => new PhysicalPosition(p))
}
/**
* The position of the top-left hand corner of the window relative to the top-left hand corner of the desktop.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const position = await getCurrentWindow().outerPosition();
* ```
*
* @returns The window's outer position.
*/
async outerPosition(): Promise<PhysicalPosition> {
return invoke<{ x: number; y: number }>('plugin:window|outer_position', {
label: this.label
}).then((p) => new PhysicalPosition(p))
}
/**
* The physical size of the window's client area.
* The client area is the content of the window, excluding the title bar and borders.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const size = await getCurrentWindow().innerSize();
* ```
*
* @returns The window's inner size.
*/
async innerSize(): Promise<PhysicalSize> {
return invoke<{ width: number; height: number }>(
'plugin:window|inner_size',
{
label: this.label
}
).then((s) => new PhysicalSize(s))
}
/**
* The physical size of the entire window.
* These dimensions include the title bar and borders. If you don't want that (and you usually don't), use inner_size instead.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const size = await getCurrentWindow().outerSize();
* ```
*
* @returns The window's outer size.
*/
async outerSize(): Promise<PhysicalSize> {
return invoke<{ width: number; height: number }>(
'plugin:window|outer_size',
{
label: this.label
}
).then((s) => new PhysicalSize(s))
}
/**
* Gets the window's current fullscreen state.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const fullscreen = await getCurrentWindow().isFullscreen();
* ```
*
* @returns Whether the window is in fullscreen mode or not.
*/
async isFullscreen(): Promise<boolean> {
return invoke('plugin:window|is_fullscreen', {
label: this.label
})
}
/**
* Gets the window's current minimized state.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const minimized = await getCurrentWindow().isMinimized();
* ```
*/
async isMinimized(): Promise<boolean> {
return invoke('plugin:window|is_minimized', {
label: this.label
})
}
/**
* Gets the window's current maximized state.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const maximized = await getCurrentWindow().isMaximized();
* ```
*
* @returns Whether the window is maximized or not.
*/
async isMaximized(): Promise<boolean> {
return invoke('plugin:window|is_maximized', {
label: this.label
})
}
/**
* Gets the window's current focus state.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const focused = await getCurrentWindow().isFocused();
* ```
*
* @returns Whether the window is focused or not.
*/
async isFocused(): Promise<boolean> {
return invoke('plugin:window|is_focused', {
label: this.label
})
}
/**
* Gets the window's current decorated state.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const decorated = await getCurrentWindow().isDecorated();
* ```
*
* @returns Whether the window is decorated or not.
*/
async isDecorated(): Promise<boolean> {
return invoke('plugin:window|is_decorated', {
label: this.label
})
}
/**
* Gets the window's current resizable state.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const resizable = await getCurrentWindow().isResizable();
* ```
*
* @returns Whether the window is resizable or not.
*/
async isResizable(): Promise<boolean> {
return invoke('plugin:window|is_resizable', {
label: this.label
})
}
/**
* Gets the window's native maximize button state.
*
* #### Platform-specific
*
* - **Linux / iOS / Android:** Unsupported.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const maximizable = await getCurrentWindow().isMaximizable();
* ```
*
* @returns Whether the window's native maximize button is enabled or not.
*/
async isMaximizable(): Promise<boolean> {
return invoke('plugin:window|is_maximizable', {
label: this.label
})
}
/**
* Gets the window's native minimize button state.
*
* #### Platform-specific
*
* - **Linux / iOS / Android:** Unsupported.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const minimizable = await getCurrentWindow().isMinimizable();
* ```
*
* @returns Whether the window's native minimize button is enabled or not.
*/
async isMinimizable(): Promise<boolean> {
return invoke('plugin:window|is_minimizable', {
label: this.label
})
}
/**
* Gets the window's native close button state.
*
* #### Platform-specific
*
* - **iOS / Android:** Unsupported.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const closable = await getCurrentWindow().isClosable();
* ```
*
* @returns Whether the window's native close button is enabled or not.
*/
async isClosable(): Promise<boolean> {
return invoke('plugin:window|is_closable', {
label: this.label
})
}
/**
* Gets the window's current visible state.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const visible = await getCurrentWindow().isVisible();
* ```
*
* @returns Whether the window is visible or not.
*/
async isVisible(): Promise<boolean> {
return invoke('plugin:window|is_visible', {
label: this.label
})
}
/**
* Gets the window's current title.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const title = await getCurrentWindow().title();
* ```
*/
async title(): Promise<string> {
return invoke('plugin:window|title', {
label: this.label
})
}
/**
* Gets the window's current theme.
*
* #### Platform-specific
*
* - **macOS:** Theme was introduced on macOS 10.14. Returns `light` on macOS 10.13 and below.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const theme = await getCurrentWindow().theme();
* ```
*
* @returns The window theme.
*/
async theme(): Promise<Theme | null> {
return invoke('plugin:window|theme', {
label: this.label
})
}
/**
* Whether the window is configured to be always on top of other windows or not.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* const alwaysOnTop = await getCurrentWindow().isAlwaysOnTop();
* ```
*
* @returns Whether the window is visible or not.
*/
async isAlwaysOnTop(): Promise<boolean> {
return invoke('plugin:window|is_always_on_top', {
label: this.label
})
}
// Setters
/**
* Centers the window.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().center();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async center(): Promise<void> {
return invoke('plugin:window|center', {
label: this.label
})
}
/**
* Requests user attention to the window, this has no effect if the application
* is already focused. How requesting for user attention manifests is platform dependent,
* see `UserAttentionType` for details.
*
* Providing `null` will unset the request for user attention. Unsetting the request for
* user attention might not be done automatically by the WM when the window receives input.
*
* #### Platform-specific
*
* - **macOS:** `null` has no effect.
* - **Linux:** Urgency levels have the same effect.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().requestUserAttention();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async requestUserAttention(
requestType: UserAttentionType | null
): Promise<void> {
let requestType_ = null
if (requestType) {
if (requestType === UserAttentionType.Critical) {
requestType_ = { type: 'Critical' }
} else {
requestType_ = { type: 'Informational' }
}
}
return invoke('plugin:window|request_user_attention', {
label: this.label,
value: requestType_
})
}
/**
* Updates the window resizable flag.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setResizable(false);
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async setResizable(resizable: boolean): Promise<void> {
return invoke('plugin:window|set_resizable', {
label: this.label,
value: resizable
})
}
/**
* Enable or disable the window.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setEnabled(false);
* ```
*
* @returns A promise indicating the success or failure of the operation.
*
* @since 2.0.0
*/
async setEnabled(enabled: boolean): Promise<void> {
return invoke('plugin:window|set_enabled', {
label: this.label,
value: enabled
})
}
/**
* Whether the window is enabled or disabled.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setEnabled(false);
* ```
*
* @returns A promise indicating the success or failure of the operation.
*
* @since 2.0.0
*/
async isEnabled(): Promise<boolean> {
return invoke('plugin:window|is_enabled', {
label: this.label
})
}
/**
* Sets whether the window's native maximize button is enabled or not.
* If resizable is set to false, this setting is ignored.
*
* #### Platform-specific
*
* - **macOS:** Disables the "zoom" button in the window titlebar, which is also used to enter fullscreen mode.
* - **Linux / iOS / Android:** Unsupported.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setMaximizable(false);
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async setMaximizable(maximizable: boolean): Promise<void> {
return invoke('plugin:window|set_maximizable', {
label: this.label,
value: maximizable
})
}
/**
* Sets whether the window's native minimize button is enabled or not.
*
* #### Platform-specific
*
* - **Linux / iOS / Android:** Unsupported.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setMinimizable(false);
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async setMinimizable(minimizable: boolean): Promise<void> {
return invoke('plugin:window|set_minimizable', {
label: this.label,
value: minimizable
})
}
/**
* Sets whether the window's native close button is enabled or not.
*
* #### Platform-specific
*
* - **Linux:** GTK+ will do its best to convince the window manager not to show a close button. Depending on the system, this function may not have any effect when called on a window that is already visible
* - **iOS / Android:** Unsupported.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setClosable(false);
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async setClosable(closable: boolean): Promise<void> {
return invoke('plugin:window|set_closable', {
label: this.label,
value: closable
})
}
/**
* Sets the window title.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setTitle('Tauri');
* ```
*
* @param title The new title
* @returns A promise indicating the success or failure of the operation.
*/
async setTitle(title: string): Promise<void> {
return invoke('plugin:window|set_title', {
label: this.label,
value: title
})
}
/**
* Maximizes the window.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().maximize();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async maximize(): Promise<void> {
return invoke('plugin:window|maximize', {
label: this.label
})
}
/**
* Unmaximizes the window.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().unmaximize();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async unmaximize(): Promise<void> {
return invoke('plugin:window|unmaximize', {
label: this.label
})
}
/**
* Toggles the window maximized state.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().toggleMaximize();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async toggleMaximize(): Promise<void> {
return invoke('plugin:window|toggle_maximize', {
label: this.label
})
}
/**
* Minimizes the window.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().minimize();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async minimize(): Promise<void> {
return invoke('plugin:window|minimize', {
label: this.label
})
}
/**
* Unminimizes the window.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().unminimize();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async unminimize(): Promise<void> {
return invoke('plugin:window|unminimize', {
label: this.label
})
}
/**
* Sets the window visibility to true.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().show();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async show(): Promise<void> {
return invoke('plugin:window|show', {
label: this.label
})
}
/**
* Sets the window visibility to false.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().hide();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async hide(): Promise<void> {
return invoke('plugin:window|hide', {
label: this.label
})
}
/**
* Closes the window.
*
* Note this emits a closeRequested event so you can intercept it. To force window close, use {@link Window.destroy}.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().close();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async close(): Promise<void> {
return invoke('plugin:window|close', {
label: this.label
})
}
/**
* Destroys the window. Behaves like {@link Window.close} but forces the window close instead of emitting a closeRequested event.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().destroy();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async destroy(): Promise<void> {
return invoke('plugin:window|destroy', {
label: this.label
})
}
/**
* Whether the window should have borders and bars.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setDecorations(false);
* ```
*
* @param decorations Whether the window should have borders and bars.
* @returns A promise indicating the success or failure of the operation.
*/
async setDecorations(decorations: boolean): Promise<void> {
return invoke('plugin:window|set_decorations', {
label: this.label,
value: decorations
})
}
/**
* Whether or not the window should have shadow.
*
* #### Platform-specific
*
* - **Windows:**
* - `false` has no effect on decorated window, shadows are always ON.
* - `true` will make undecorated window have a 1px white border,
* and on Windows 11, it will have a rounded corners.
* - **Linux:** Unsupported.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setShadow(false);
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async setShadow(enable: boolean): Promise<void> {
return invoke('plugin:window|set_shadow', {
label: this.label,
value: enable
})
}
/**
* Set window effects.
*/
async setEffects(effects: Effects): Promise<void> {
return invoke('plugin:window|set_effects', {
label: this.label,
value: effects
})
}
/**
* Clear any applied effects if possible.
*/
async clearEffects(): Promise<void> {
return invoke('plugin:window|set_effects', {
label: this.label,
value: null
})
}
/**
* Whether the window should always be on top of other windows.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setAlwaysOnTop(true);
* ```
*
* @param alwaysOnTop Whether the window should always be on top of other windows or not.
* @returns A promise indicating the success or failure of the operation.
*/
async setAlwaysOnTop(alwaysOnTop: boolean): Promise<void> {
return invoke('plugin:window|set_always_on_top', {
label: this.label,
value: alwaysOnTop
})
}
/**
* Whether the window should always be below other windows.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setAlwaysOnBottom(true);
* ```
*
* @param alwaysOnBottom Whether the window should always be below other windows or not.
* @returns A promise indicating the success or failure of the operation.
*/
async setAlwaysOnBottom(alwaysOnBottom: boolean): Promise<void> {
return invoke('plugin:window|set_always_on_bottom', {
label: this.label,
value: alwaysOnBottom
})
}
/**
* Prevents the window contents from being captured by other apps.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setContentProtected(true);
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async setContentProtected(protected_: boolean): Promise<void> {
return invoke('plugin:window|set_content_protected', {
label: this.label,
value: protected_
})
}
/**
* Resizes the window with a new inner size.
* @example
* ```typescript
* import { getCurrentWindow, LogicalSize } from '@tauri-apps/api/window';
* await getCurrentWindow().setSize(new LogicalSize(600, 500));
* ```
*
* @param size The logical or physical inner size.
* @returns A promise indicating the success or failure of the operation.
*/
async setSize(size: LogicalSize | PhysicalSize | Size): Promise<void> {
return invoke('plugin:window|set_size', {
label: this.label,
value: size instanceof Size ? size : new Size(size)
})
}
/**
* Sets the window minimum inner size. If the `size` argument is not provided, the constraint is unset.
* @example
* ```typescript
* import { getCurrentWindow, PhysicalSize } from '@tauri-apps/api/window';
* await getCurrentWindow().setMinSize(new PhysicalSize(600, 500));
* ```
*
* @param size The logical or physical inner size, or `null` to unset the constraint.
* @returns A promise indicating the success or failure of the operation.
*/
async setMinSize(
size: LogicalSize | PhysicalSize | Size | null | undefined
): Promise<void> {
return invoke('plugin:window|set_min_size', {
label: this.label,
value: size instanceof Size ? size : size ? new Size(size) : null
})
}
/**
* Sets the window maximum inner size. If the `size` argument is undefined, the constraint is unset.
* @example
* ```typescript
* import { getCurrentWindow, LogicalSize } from '@tauri-apps/api/window';
* await getCurrentWindow().setMaxSize(new LogicalSize(600, 500));
* ```
*
* @param size The logical or physical inner size, or `null` to unset the constraint.
* @returns A promise indicating the success or failure of the operation.
*/
async setMaxSize(
size: LogicalSize | PhysicalSize | Size | null | undefined
): Promise<void> {
return invoke('plugin:window|set_max_size', {
label: this.label,
value: size instanceof Size ? size : size ? new Size(size) : null
})
}
/**
* Sets the window inner size constraints.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setSizeConstraints({ minWidth: 300 });
* ```
*
* @param constraints The logical or physical inner size, or `null` to unset the constraint.
* @returns A promise indicating the success or failure of the operation.
*/
async setSizeConstraints(
constraints: WindowSizeConstraints | null | undefined
): Promise<void> {
function logical(pixel?: number): { Logical: number } | null {
return pixel ? { Logical: pixel } : null
}
return invoke('plugin:window|set_size_constraints', {
label: this.label,
value: {
minWidth: logical(constraints?.minWidth),
minHeight: logical(constraints?.minHeight),
maxWidth: logical(constraints?.maxWidth),
maxHeight: logical(constraints?.maxHeight)
}
})
}
/**
* Sets the window outer position.
* @example
* ```typescript
* import { getCurrentWindow, LogicalPosition } from '@tauri-apps/api/window';
* await getCurrentWindow().setPosition(new LogicalPosition(600, 500));
* ```
*
* @param position The new position, in logical or physical pixels.
* @returns A promise indicating the success or failure of the operation.
*/
async setPosition(
position: LogicalPosition | PhysicalPosition | Position
): Promise<void> {
return invoke('plugin:window|set_position', {
label: this.label,
value: position instanceof Position ? position : new Position(position)
})
}
/**
* Sets the window fullscreen state.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setFullscreen(true);
* ```
*
* @param fullscreen Whether the window should go to fullscreen or not.
* @returns A promise indicating the success or failure of the operation.
*/
async setFullscreen(fullscreen: boolean): Promise<void> {
return invoke('plugin:window|set_fullscreen', {
label: this.label,
value: fullscreen
})
}
/**
* On macOS, Toggles a fullscreen mode that doesn’t require a new macOS space. Returns a boolean indicating whether the transition was successful (this won’t work if the window was already in the native fullscreen).
* This is how fullscreen used to work on macOS in versions before Lion. And allows the user to have a fullscreen window without using another space or taking control over the entire monitor.
*
* On other platforms, this is the same as {@link Window.setFullscreen}.
*
* @param fullscreen Whether the window should go to simple fullscreen or not.
* @returns A promise indicating the success or failure of the operation.
*/
async setSimpleFullscreen(fullscreen: boolean): Promise<void> {
return invoke('plugin:window|set_simple_fullscreen', {
label: this.label,
value: fullscreen
})
}
/**
* Bring the window to front and focus.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setFocus();
* ```
*
* @returns A promise indicating the success or failure of the operation.
*/
async setFocus(): Promise<void> {
return invoke('plugin:window|set_focus', {
label: this.label
})
}
/**
* Sets whether the window can be focused.
*
* #### Platform-specific
*
* - **macOS**: If the window is already focused, it is not possible to unfocus it after calling `set_focusable(false)`.
* In this case, you might consider calling {@link Window.setFocus} but it will move the window to the back i.e. at the bottom in terms of z-order.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setFocusable(true);
* ```
*
* @param focusable Whether the window can be focused.
* @returns A promise indicating the success or failure of the operation.
*/
async setFocusable(focusable: boolean): Promise<void> {
return invoke('plugin:window|set_focusable', {
label: this.label,
value: focusable
})
}
/**
* Sets the window icon.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setIcon('/tauri/awesome.png');
* ```
*
* Note that you may need the `image-ico` or `image-png` Cargo features to use this API.
* To enable it, change your Cargo.toml file:
* ```toml
* [dependencies]
* tauri = { version = "...", features = ["...", "image-png"] }
* ```
*
* @param icon Icon bytes or path to the icon file.
* @returns A promise indicating the success or failure of the operation.
*/
async setIcon(
icon: string | Image | Uint8Array | ArrayBuffer | number[]
): Promise<void> {
return invoke('plugin:window|set_icon', {
label: this.label,
value: transformImage(icon)
})
}
/**
* Whether the window icon should be hidden from the taskbar or not.
*
* #### Platform-specific
*
* - **macOS:** Unsupported.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setSkipTaskbar(true);
* ```
*
* @param skip true to hide window icon, false to show it.
* @returns A promise indicating the success or failure of the operation.
*/
async setSkipTaskbar(skip: boolean): Promise<void> {
return invoke('plugin:window|set_skip_taskbar', {
label: this.label,
value: skip
})
}
/**
* Grabs the cursor, preventing it from leaving the window.
*
* There's no guarantee that the cursor will be hidden. You should
* hide it by yourself if you want so.
*
* #### Platform-specific
*
* - **Linux:** Unsupported.
* - **macOS:** This locks the cursor in a fixed location, which looks visually awkward.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setCursorGrab(true);
* ```
*
* @param grab `true` to grab the cursor icon, `false` to release it.
* @returns A promise indicating the success or failure of the operation.
*/
async setCursorGrab(grab: boolean): Promise<void> {
return invoke('plugin:window|set_cursor_grab', {
label: this.label,
value: grab
})
}
/**
* Modifies the cursor's visibility.
*
* #### Platform-specific
*
* - **Windows:** The cursor is only hidden within the confines of the window.
* - **macOS:** The cursor is hidden as long as the window has input focus, even if the cursor is
* outside of the window.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setCursorVisible(false);
* ```
*
* @param visible If `false`, this will hide the cursor. If `true`, this will show the cursor.
* @returns A promise indicating the success or failure of the operation.
*/
async setCursorVisible(visible: boolean): Promise<void> {
return invoke('plugin:window|set_cursor_visible', {
label: this.label,
value: visible
})
}
/**
* Modifies the cursor icon of the window.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setCursorIcon('help');
* ```
*
* @param icon The new cursor icon.
* @returns A promise indicating the success or failure of the operation.
*/
async setCursorIcon(icon: CursorIcon): Promise<void> {
return invoke('plugin:window|set_cursor_icon', {
label: this.label,
value: icon
})
}
/**
* Sets the window background color.
*
* #### Platform-specific:
*
* - **Windows:** alpha channel is ignored.
* - **iOS / Android:** Unsupported.
*
* @returns A promise indicating the success or failure of the operation.
*
* @since 2.1.0
*/
async setBackgroundColor(color: Color): Promise<void> {
return invoke('plugin:window|set_background_color', { color })
}
/**
* Changes the position of the cursor in window coordinates.
* @example
* ```typescript
* import { getCurrentWindow, LogicalPosition } from '@tauri-apps/api/window';
* await getCurrentWindow().setCursorPosition(new LogicalPosition(600, 300));
* ```
*
* @param position The new cursor position.
* @returns A promise indicating the success or failure of the operation.
*/
async setCursorPosition(
position: LogicalPosition | PhysicalPosition | Position
): Promise<void> {
return invoke('plugin:window|set_cursor_position', {
label: this.label,
value: position instanceof Position ? position : new Position(position)
})
}
/**
* Changes the cursor events behavior.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setIgnoreCursorEvents(true);
* ```
*
* @param ignore `true` to ignore the cursor events; `false` to process them as usual.
* @returns A promise indicating the success or failure of the operation.
*/
async setIgnoreCursorEvents(ignore: boolean): Promise<void> {
return invoke('plugin:window|set_ignore_cursor_events', {
label: this.label,
value: ignore
})
}
/**
* Starts dragging the window.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().startDragging();
* ```
*
* @return A promise indicating the success or failure of the operation.
*/
async startDragging(): Promise<void> {
return invoke('plugin:window|start_dragging', {
label: this.label
})
}
/**
* Starts resize-dragging the window.
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().startResizeDragging();
* ```
*
* @return A promise indicating the success or failure of the operation.
*/
async startResizeDragging(direction: ResizeDirection): Promise<void> {
return invoke('plugin:window|start_resize_dragging', {
label: this.label,
value: direction
})
}
/**
* Sets the badge count. It is app wide and not specific to this window.
*
* #### Platform-specific
*
* - **Windows**: Unsupported. Use @{linkcode Window.setOverlayIcon} instead.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setBadgeCount(5);
* ```
*
* @param count The badge count. Use `undefined` to remove the badge.
* @return A promise indicating the success or failure of the operation.
*/
async setBadgeCount(count?: number): Promise<void> {
return invoke('plugin:window|set_badge_count', {
label: this.label,
value: count
})
}
/**
* Sets the badge cont **macOS only**.
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setBadgeLabel("Hello");
* ```
*
* @param label The badge label. Use `undefined` to remove the badge.
* @return A promise indicating the success or failure of the operation.
*/
async setBadgeLabel(label?: string): Promise<void> {
return invoke('plugin:window|set_badge_label', {
label: this.label,
value: label
})
}
/**
* Sets the overlay icon. **Windows only**
* The overlay icon can be set for every window.
*
*
* Note that you may need the `image-ico` or `image-png` Cargo features to use this API.
* To enable it, change your Cargo.toml file:
*
* ```toml
* [dependencies]
* tauri = { version = "...", features = ["...", "image-png"] }
* ```
*
* @example
* ```typescript
* import { getCurrentWindow } from '@tauri-apps/api/window';
* await getCurrentWindow().setOverlayIcon("/tauri/awesome.png");
* ```
*
* @param icon Icon bytes or path to the icon file. Use `undefined` to remove the overlay icon.
* @return A promise indicating the success or failure of the operation.
*/
async setOverlayIcon(
icon?: string | Image | Uint8Array | ArrayBuffer | number[]
): Promise<void> {
return invoke('plugin:window|set_overlay_icon', {
label: this.label,
value: icon ? transformImage(icon) : undefined
})
}
/**
* Sets the taskbar progress state.
*
* #### Platform-specific
*
* - **Linux / macOS**: Progress bar is app-wide and not specific to this window.
* - **Linux**: Only supported desktop environments with `libunity` (e.g. GNOME).
*
* @example
* ```typescript
* import { getCurrentWindow, ProgressBarStatus } from '@tauri-apps/api/window';
* await getCurrentWindow().setProgressBar({
* status: ProgressBarStatus.Normal,
* progress: 50,
* });
* ```
*
* @return A promise indicating the success or failure of the operation.
*/
async setProgressBar(state: ProgressBarState): Promise<void> {
return invoke('plugin:window|set_progress_bar', {
label: this.label,
value: state
})
}
/**
* Sets whether the window should be visible on all workspaces or virtual desktops.
*
* #### Platform-specific
*
* - **Windows / iOS / Android:** Unsupported.
*
* @since 2.0.0
*/
async setVisibleOnAllWorkspaces(visible: boolean): Promise<void> {
return invoke('plugin:window|set_visible_on_all_workspaces', {
label: this.label,
value: visible
})
}
/**
* Sets the title bar style. **macOS only**.
*
* @since 2.0.0
*/
async setTitleBarStyle(style: TitleBarStyle): Promise<void> {
return invoke('plugin:window|set_title_bar_style', {
label: this.label,
value: style
})
}
/**
* Set window theme, pass in `null` or `undefined` to follow system theme
*
* #### Platform-specific
*
* - **Linux / macOS**: Theme is app-wide and not specific to this window.
* - **iOS / Android:** Unsupported.
*
* @since 2.0.0
*/
async setTheme(theme?: Theme | null): Promise<void> {
return invoke('plugin:window|set_theme', {
label: this.label,
value: theme
})
}
// Listeners
/**
* Listen to window resize.
*
* @example
* ```typescript
* import { getCurrentWindow } from "@tauri-apps/api/window";
* const unlisten = await getCurrentWindow().onResized(({ payload: size }) => {
* console.log('Window resized', size);
* });
*
* // you need to call unlisten if your handler goes out of scope e.g. the component is unmounted
* unlisten();
* ```
*
* @returns A promise resolving to a function to unlisten to the event.
* Note that removing the listener is required if your listener goes out of scope e.g. the component is unmounted.
*/
async onResized(handler: EventCallback<PhysicalSize>): Promise<UnlistenFn> {
return this.listen<PhysicalSize>(TauriEvent.WINDOW_RESIZED, (e) => {
e.payload = new PhysicalSize(e.payload)
handler(e)
})
}
/**
* Listen to window move.
*
* @example
* ```typescript
* import { getCurrentWindow } from "@tauri-apps/api/window";
* const unlisten = await getCurrentWindow().onMoved(({ payload: position }) => {
* console.log('Window moved', position);
* });
*
* // you need to call unlisten if your handler goes out of scope e.g. the component is unmounted
* unlisten();
* ```
*
* @returns A promise resolving to a function to unlisten to the event.
* Note that removing the listener is required if your listener goes out of scope e.g. the component is unmounted.
*/
async onMoved(handler: EventCallback<PhysicalPosition>): Promise<UnlistenFn> {
return this.listen<PhysicalPosition>(TauriEvent.WINDOW_MOVED, (e) => {
e.payload = new PhysicalPosition(e.payload)
handler(e)
})
}
/**
* Listen to window close requested. Emitted when the user requests to closes the window.
*
* @example
* ```typescript
* import { getCurrentWindow } from "@tauri-apps/api/window";
* import { confirm } from '@tauri-apps/api/dialog';
* const unlisten = await getCurrentWindow().onCloseRequested(async (event) => {
* const confirmed = await confirm('Are you sure?');
* if (!confirmed) {
* // user did not confirm closing the window; let's prevent it
* event.preventDefault();
* }
* });
*
* // you need to call unlisten if your handler goes out of scope e.g. the component is unmounted
* unlisten();
* ```
*
* @returns A promise resolving to a function to unlisten to the event.
* Note that removing the listener is required if your listener goes out of scope e.g. the component is unmounted.
*/
async onCloseRequested(
handler: (event: CloseRequestedEvent) => void | Promise<void>
): Promise<UnlistenFn> {
// eslint-disable-next-line @typescript-eslint/no-misused-promises
return this.listen(TauriEvent.WINDOW_CLOSE_REQUESTED, async (event) => {
const evt = new CloseRequestedEvent(event)
await handler(evt)
if (!evt.isPreventDefault()) {
await this.destroy()
}
})
}
/**
* Listen to a file drop event.
* The listener is triggered when the user hovers the selected files on the webview,
* drops the files or cancels the operation.
*
* @example
* ```typescript
* import { getCurrentWindow } from "@tauri-apps/api/webview";
* const unlisten = await getCurrentWindow().onDragDropEvent((event) => {
* if (event.payload.type === 'over') {
* console.log('User hovering', event.payload.position);
* } else if (event.payload.type === 'drop') {
* console.log('User dropped', event.payload.paths);
* } else {
* console.log('File drop cancelled');
* }
* });
*
* // you need to call unlisten if your handler goes out of scope e.g. the component is unmounted
* unlisten();
* ```
*
* @returns A promise resolving to a function to unlisten to the event.
* Note that removing the listener is required if your listener goes out of scope e.g. the component is unmounted.
*/
async onDragDropEvent(
handler: EventCallback<DragDropEvent>
): Promise<UnlistenFn> {
type DragPayload = { paths: string[]; position: PhysicalPosition }
const unlistenDrag = await this.listen<DragPayload>(
TauriEvent.DRAG_ENTER,
(event) => {
handler({
...event,
payload: {
type: 'enter',
paths: event.payload.paths,
position: new PhysicalPosition(event.payload.position)
}
})
}
)
const unlistenDragOver = await this.listen<DragPayload>(
TauriEvent.DRAG_OVER,
(event) => {
handler({
...event,
payload: {
type: 'over',
position: new PhysicalPosition(event.payload.position)
}
})
}
)
const unlistenDrop = await this.listen<DragPayload>(
TauriEvent.DRAG_DROP,
(event) => {
handler({
...event,
payload: {
type: 'drop',
paths: event.payload.paths,
position: new PhysicalPosition(event.payload.position)
}
})
}
)
const unlistenCancel = await this.listen<null>(
TauriEvent.DRAG_LEAVE,
(event) => {
handler({ ...event, payload: { type: 'leave' } })
}
)
return () => {
unlistenDrag()
unlistenDrop()
unlistenDragOver()
unlistenCancel()
}
}
/**
* Listen to window focus change.
*
* @example
* ```typescript
* import { getCurrentWindow } from "@tauri-apps/api/window";
* const unlisten = await getCurrentWindow().onFocusChanged(({ payload: focused }) => {
* console.log('Focus changed, window is focused? ' + focused);
* });
*
* // you need to call unlisten if your handler goes out of scope e.g. the component is unmounted
* unlisten();
* ```
*
* @returns A promise resolving to a function to unlisten to the event.
* Note that removing the listener is required if your listener goes out of scope e.g. the component is unmounted.
*/
async onFocusChanged(handler: EventCallback<boolean>): Promise<UnlistenFn> {
const unlistenFocus = await this.listen<PhysicalPosition>(
TauriEvent.WINDOW_FOCUS,
(event) => {
handler({ ...event, payload: true })
}
)
const unlistenBlur = await this.listen<PhysicalPosition>(
TauriEvent.WINDOW_BLUR,
(event) => {
handler({ ...event, payload: false })
}
)
return () => {
unlistenFocus()
unlistenBlur()
}
}
/**
* Listen to window scale change. Emitted when the window's scale factor has changed.
* The following user actions can cause DPI changes:
* - Changing the display's resolution.
* - Changing the display's scale factor (e.g. in Control Panel on Windows).
* - Moving the window to a display with a different scale factor.
*
* @example
* ```typescript
* import { getCurrentWindow } from "@tauri-apps/api/window";
* const unlisten = await getCurrentWindow().onScaleChanged(({ payload }) => {
* console.log('Scale changed', payload.scaleFactor, payload.size);
* });
*
* // you need to call unlisten if your handler goes out of scope e.g. the component is unmounted
* unlisten();
* ```
*
* @returns A promise resolving to a function to unlisten to the event.
* Note that removing the listener is required if your listener goes out of scope e.g. the component is unmounted.
*/
async onScaleChanged(
handler: EventCallback<ScaleFactorChanged>
): Promise<UnlistenFn> {
return this.listen<ScaleFactorChanged>(
TauriEvent.WINDOW_SCALE_FACTOR_CHANGED,
handler
)
}
/**
* Listen to the system theme change.
*
* @example
* ```typescript
* import { getCurrentWindow } from "@tauri-apps/api/window";
* const unlisten = await getCurrentWindow().onThemeChanged(({ payload: theme }) => {
* console.log('New theme: ' + theme);
* });
*
* // you need to call unlisten if your handler goes out of scope e.g. the component is unmounted
* unlisten();
* ```
*
* @returns A promise resolving to a function to unlisten to the event.
* Note that removing the listener is required if your listener goes out of scope e.g. the component is unmounted.
*/
async onThemeChanged(handler: EventCallback<Theme>): Promise<UnlistenFn> {
return this.listen<Theme>(TauriEvent.WINDOW_THEME_CHANGED, handler)
}
}
/**
* An RGBA color. Each value has minimum of 0 and maximum of 255.
*
* It can be either a string `#ffffff`, an array of 3 or 4 elements or an object.
*
* @since 2.0.0
*/
type Color =
| [number, number, number]
| [number, number, number, number]
| { red: number; green: number; blue: number; alpha: number }
| string
/**
* Background throttling policy
*
* @since 2.0.0
*/
enum BackgroundThrottlingPolicy {
Disabled = 'disabled',
Throttle = 'throttle',
Suspend = 'suspend'
}
/**
* The scrollbar style to use in the webview.
*
* ## Platform-specific
*
* **Windows**: This option must be given the same value for all webviews.
*
* @since 2.8.0
*/
enum ScrollBarStyle {
/**
* The default scrollbar style for the webview.
*/
Default = 'default',
/**
* Fluent UI style overlay scrollbars. **Windows Only**
*
* Requires WebView2 Runtime version 125.0.2535.41 or higher, does nothing on older versions,
* see https://learn.microsoft.com/en-us/microsoft-edge/webview2/release-notes/?tabs=dotnetcsharp#10253541
*/
FluentOverlay = 'fluentOverlay'
}
/**
* Platform-specific window effects
*
* @since 2.0.0
*/
enum Effect {
/**
* A default material appropriate for the view's effectiveAppearance. **macOS 10.14-**
*
* @deprecated since macOS 10.14. You should instead choose an appropriate semantic material.
*/
AppearanceBased = 'appearanceBased',
/**
* **macOS 10.14-**
*
* @deprecated since macOS 10.14. Use a semantic material instead.
*/
Light = 'light',
/**
* **macOS 10.14-**
*
* @deprecated since macOS 10.14. Use a semantic material instead.
*/
Dark = 'dark',
/**
* **macOS 10.14-**
*
* @deprecated since macOS 10.14. Use a semantic material instead.
*/
MediumLight = 'mediumLight',
/**
* **macOS 10.14-**
*
* @deprecated since macOS 10.14. Use a semantic material instead.
*/
UltraDark = 'ultraDark',
/**
* **macOS 10.10+**
*/
Titlebar = 'titlebar',
/**
* **macOS 10.10+**
*/
Selection = 'selection',
/**
* **macOS 10.11+**
*/
Menu = 'menu',
/**
* **macOS 10.11+**
*/
Popover = 'popover',
/**
* **macOS 10.11+**
*/
Sidebar = 'sidebar',
/**
* **macOS 10.14+**
*/
HeaderView = 'headerView',
/**
* **macOS 10.14+**
*/
Sheet = 'sheet',
/**
* **macOS 10.14+**
*/
WindowBackground = 'windowBackground',
/**
* **macOS 10.14+**
*/
HudWindow = 'hudWindow',
/**
* **macOS 10.14+**
*/
FullScreenUI = 'fullScreenUI',
/**
* **macOS 10.14+**
*/
Tooltip = 'tooltip',
/**
* **macOS 10.14+**
*/
ContentBackground = 'contentBackground',
/**
* **macOS 10.14+**
*/
UnderWindowBackground = 'underWindowBackground',
/**
* **macOS 10.14+**
*/
UnderPageBackground = 'underPageBackground',
/**
* **Windows 11 Only**
*/
Mica = 'mica',
/**
* **Windows 7/10/11(22H1) Only**
*
* #### Notes
*
* This effect has bad performance when resizing/dragging the window on Windows 11 build 22621.
*/
Blur = 'blur',
/**
* **Windows 10/11**
*
* #### Notes
*
* This effect has bad performance when resizing/dragging the window on Windows 10 v1903+ and Windows 11 build 22000.
*/
Acrylic = 'acrylic',
/**
* Tabbed effect that matches the system dark preference **Windows 11 Only**
*/
Tabbed = 'tabbed',
/**
* Tabbed effect with dark mode but only if dark mode is enabled on the system **Windows 11 Only**
*/
TabbedDark = 'tabbedDark',
/**
* Tabbed effect with light mode **Windows 11 Only**
*/
TabbedLight = 'tabbedLight'
}
/**
* Window effect state **macOS only**
*
* @see https://developer.apple.com/documentation/appkit/nsvisualeffectview/state
*
* @since 2.0.0
*/
enum EffectState {
/**
* Make window effect state follow the window's active state **macOS only**
*/
FollowsWindowActiveState = 'followsWindowActiveState',
/**
* Make window effect state always active **macOS only**
*/
Active = 'active',
/**
* Make window effect state always inactive **macOS only**
*/
Inactive = 'inactive'
}
/** The window effects configuration object
*
* @since 2.0.0
*/
interface Effects {
/**
* List of Window effects to apply to the Window.
* Conflicting effects will apply the first one and ignore the rest.
*/
effects: Effect[]
/**
* Window effect state **macOS Only**
*/
state?: EffectState
/**
* Window effect corner radius **macOS Only**
*/
radius?: number
/**
* Window effect color. Affects {@link Effect.Blur} and {@link Effect.Acrylic} only
* on Windows 10 v1903+. Doesn't have any effect on Windows 7 or Windows 11.
*/
color?: Color
}
/**
* Minimum margin to work area
*/
interface PreventOverflowMargin {
width: number
height: number
}
/**
* Configuration for the window to create.
*
* @since 1.0.0
*/
interface WindowOptions {
/** Show window in the center of the screen.. */
center?: boolean
/** The initial vertical position in logical pixels. Only applies if `y` is also set. */
x?: number
/** The initial horizontal position in logical pixels. Only applies if `x` is also set. */
y?: number
/** The initial width in logical pixels. */
width?: number
/** The initial height in logical pixels. */
height?: number
/** The minimum width in logical pixels. Only applies if `minHeight` is also set. */
minWidth?: number
/** The minimum height in logical pixels. Only applies if `minWidth` is also set. */
minHeight?: number
/** The maximum width in logical pixels. Only applies if `maxHeight` is also set. */
maxWidth?: number
/** The maximum height in logical pixels. Only applies if `maxWidth` is also set. */
maxHeight?: number
/**
* Prevent the window from overflowing the working area (e.g. monitor size - taskbar size)
* on creation, which means the window size will be limited to `monitor size - taskbar size`
*
* Can either be set to `true` or to a {@link PreventOverflowMargin} object to set an additional margin
* that should be considered to determine the working area
* (in this case the window size will be limited to `monitor size - taskbar size - margin`)
*
* **NOTE**: The overflow check is only performed on window creation, resizes can still overflow
*
* #### Platform-specific
*
* - **iOS / Android:** Unsupported.
*/
preventOverflow?: boolean | PreventOverflowMargin
/** Whether the window is resizable or not. */
resizable?: boolean
/** Window title. */
title?: string
/** Whether the window is in fullscreen mode or not. */
fullscreen?: boolean
/** Whether the window will be initially focused or not. */
focus?: boolean
/** Whether the window can be focused or not. */
focusable?: boolean
/**
* Whether the window is transparent or not.
* Note that on `macOS` this requires the `macos-private-api` feature flag, enabled under `tauri.conf.json > app > macOSPrivateApi`.
* WARNING: Using private APIs on `macOS` prevents your application from being accepted to the `App Store`.
*/
transparent?: boolean
/** Whether the window should be maximized upon creation or not. */
maximized?: boolean
/** Whether the window should be immediately visible upon creation or not. */
visible?: boolean
/** Whether the window should have borders and bars or not. */
decorations?: boolean
/** Whether the window should always be on top of other windows or not. */
alwaysOnTop?: boolean
/** Whether the window should always be below other windows. */
alwaysOnBottom?: boolean
/** Prevents the window contents from being captured by other apps. */
contentProtected?: boolean
/** Whether or not the window icon should be added to the taskbar. */
skipTaskbar?: boolean
/**
* Whether or not the window has shadow.
*
* #### Platform-specific
*
* - **Windows:**
* - `false` has no effect on decorated window, shadows are always ON.
* - `true` will make undecorated window have a 1px white border,
* and on Windows 11, it will have a rounded corners.
* - **Linux:** Unsupported.
*
* @since 2.0.0
*/
shadow?: boolean
/**
* The initial window theme. Defaults to the system theme.
*
* Only implemented on Windows and macOS 10.14+.
*/
theme?: Theme
/**
* The style of the macOS title bar.
*/
titleBarStyle?: TitleBarStyle
/**
* The position of the window controls on macOS.
*
* Requires `titleBarStyle: 'overlay'` and `decorations: true`.
*
* @since 2.4.0
*/
trafficLightPosition?: LogicalPosition
/**
* If `true`, sets the window title to be hidden on macOS.
*/
hiddenTitle?: boolean
/**
* Defines the window [tabbing identifier](https://developer.apple.com/documentation/appkit/nswindow/1644704-tabbingidentifier) on macOS.
*
* Windows with the same tabbing identifier will be grouped together.
* If the tabbing identifier is not set, automatic tabbing will be disabled.
*/
tabbingIdentifier?: string
/**
* Whether the window's native maximize button is enabled or not. Defaults to `true`.
*/
maximizable?: boolean
/**
* Whether the window's native minimize button is enabled or not. Defaults to `true`.
*/
minimizable?: boolean
/**
* Whether the window's native close button is enabled or not. Defaults to `true`.
*/
closable?: boolean
/**
* Sets a parent to the window to be created. Can be either a {@linkcode Window} or a label of the window.
*
* #### Platform-specific
*
* - **Windows**: This sets the passed parent as an owner window to the window to be created.
* From [MSDN owned windows docs](https://docs.microsoft.com/en-us/windows/win32/winmsg/window-features#owned-windows):
* - An owned window is always above its owner in the z-order.
* - The system automatically destroys an owned window when its owner is destroyed.
* - An owned window is hidden when its owner is minimized.
* - **Linux**: This makes the new window transient for parent, see <https://docs.gtk.org/gtk3/method.Window.set_transient_for.html>
* - **macOS**: This adds the window as a child of parent, see <https://developer.apple.com/documentation/appkit/nswindow/1419152-addchildwindow?language=objc>
*/
parent?: Window | WebviewWindow | string
/** Whether the window should be visible on all workspaces or virtual desktops.
*
* #### Platform-specific
*
* - **Windows / iOS / Android:** Unsupported.
*
* @since 2.0.0
*/
visibleOnAllWorkspaces?: boolean
/**
* Window effects.
*
* Requires the window to be transparent.
*
* #### Platform-specific:
*
* - **Windows**: If using decorations or shadows, you may want to try this workaround <https://github.com/tauri-apps/tao/issues/72#issuecomment-975607891>
* - **Linux**: Unsupported
*/
windowEffects?: Effects
/**
* Set the window background color.
*
* #### Platform-specific:
*
* - **Android / iOS:** Unsupported.
* - **Windows**: alpha channel is ignored.
*
* @since 2.1.0
*/
backgroundColor?: Color
/** Change the default background throttling behaviour.
*
* ## Platform-specific
*
* - **Linux / Windows / Android**: Unsupported. Workarounds like a pending WebLock transaction might suffice.
* - **iOS**: Supported since version 17.0+.
* - **macOS**: Supported since version 14.0+.
*
* see https://github.com/tauri-apps/tauri/issues/5250#issuecomment-2569380578
*
* @since 2.3.0
*/
backgroundThrottling?: BackgroundThrottlingPolicy
/**
* Whether we should disable JavaScript code execution on the webview or not.
*/
javascriptDisabled?: boolean
/**
* on macOS and iOS there is a link preview on long pressing links, this is enabled by default.
* see https://docs.rs/objc2-web-kit/latest/objc2_web_kit/struct.WKWebView.html#method.allowsLinkPreview
*/
allowLinkPreview?: boolean
/**
* Allows disabling the input accessory view on iOS.
*
* The accessory view is the view that appears above the keyboard when a text input element is focused.
* It usually displays a view with "Done", "Next" buttons.
*/
disableInputAccessoryView?: boolean
/**
* Specifies the native scrollbar style to use with the webview.
* CSS styles that modify the scrollbar are applied on top of the native appearance configured here.
*
* Defaults to `default`, which is the browser default.
*
* ## Platform-specific
*
* - **Windows**:
* - `fluentOverlay` requires WebView2 Runtime version 125.0.2535.41 or higher, and does nothing
* on older versions.
* - This option must be given the same value for all webviews.
* - **Linux / Android / iOS / macOS**: Unsupported. Only supports `Default` and performs no operation.
*/
scrollBarStyle?: ScrollBarStyle
}
function mapMonitor(m: Monitor | null): Monitor | null {
return m === null
? null
: {
name: m.name,
scaleFactor: m.scaleFactor,
position: new PhysicalPosition(m.position),
size: new PhysicalSize(m.size),
workArea: {
position: new PhysicalPosition(m.workArea.position),
size: new PhysicalSize(m.workArea.size)
}
}
}
/**
* Returns the monitor on which the window currently resides.
* Returns `null` if current monitor can't be detected.
* @example
* ```typescript
* import { currentMonitor } from '@tauri-apps/api/window';
* const monitor = await currentMonitor();
* ```
*
* @since 1.0.0
*/
async function currentMonitor(): Promise<Monitor | null> {
return invoke<Monitor | null>('plugin:window|current_monitor').then(
mapMonitor
)
}
/**
* Returns the primary monitor of the system.
* Returns `null` if it can't identify any monitor as a primary one.
* @example
* ```typescript
* import { primaryMonitor } from '@tauri-apps/api/window';
* const monitor = await primaryMonitor();
* ```
*
* @since 1.0.0
*/
async function primaryMonitor(): Promise<Monitor | null> {
return invoke<Monitor | null>('plugin:window|primary_monitor').then(
mapMonitor
)
}
/**
* Returns the monitor that contains the given point. Returns `null` if can't find any.
* @example
* ```typescript
* import { monitorFromPoint } from '@tauri-apps/api/window';
* const monitor = await monitorFromPoint(100.0, 200.0);
* ```
*
* @since 1.0.0
*/
async function monitorFromPoint(x: number, y: number): Promise<Monitor | null> {
return invoke<Monitor | null>('plugin:window|monitor_from_point', {
x,
y
}).then(mapMonitor)
}
/**
* Returns the list of all the monitors available on the system.
* @example
* ```typescript
* import { availableMonitors } from '@tauri-apps/api/window';
* const monitors = await availableMonitors();
* ```
*
* @since 1.0.0
*/
async function availableMonitors(): Promise<Monitor[]> {
return invoke<Monitor[]>('plugin:window|available_monitors').then(
(ms) => ms.map(mapMonitor) as Monitor[]
)
}
/**
* Get the cursor position relative to the top-left hand corner of the desktop.
*
* Note that the top-left hand corner of the desktop is not necessarily the same as the screen.
* If the user uses a desktop with multiple monitors,
* the top-left hand corner of the desktop is the top-left hand corner of the main monitor on Windows and macOS
* or the top-left of the leftmost monitor on X11.
*
* The coordinates can be negative if the top-left hand corner of the window is outside of the visible screen region.
*/
async function cursorPosition(): Promise<PhysicalPosition> {
return invoke<PhysicalPosition>('plugin:window|cursor_position').then(
(v) => new PhysicalPosition(v)
)
}
export {
Window,
CloseRequestedEvent,
getCurrentWindow,
getAllWindows,
LogicalSize,
PhysicalSize,
LogicalPosition,
PhysicalPosition,
UserAttentionType,
Effect,
EffectState,
currentMonitor,
monitorFromPoint,
primaryMonitor,
availableMonitors,
cursorPosition
}
export type {
Effects,
Theme,
TitleBarStyle,
ScaleFactorChanged,
WindowOptions,
Color,
BackgroundThrottlingPolicy,
DragDropEvent,
ScrollBarStyle
} | typescript | github | https://github.com/tauri-apps/tauri | packages/api/src/window.ts |
# flake8: NOQA
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
"""
Logging configuration (formatters, handlers..)
Note:
-----
This file doe's not include the actual loggers.
The loggers are configured in the config.yaml file
in order to expose them to cli users.
"""
LOGGER = {
"version": 1,
"formatters": {
"file": {
"format": "%(asctime)s [%(levelname)s] %(message)s"
},
"console": {
"format": "%(message)s"
}
},
"handlers": {
"file": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "file",
"maxBytes": "5000000",
"backupCount": "20"
},
"console": {
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
"formatter": "console"
}
}
} | unknown | codeparrot/codeparrot-clean | ||
import { assert, describe, it } from 'vitest';
import { parseCss } from 'svelte/compiler';
describe('parseCss', () => {
it('parses a simple rule', () => {
const ast = parseCss('div { color: red; }');
assert.equal(ast.type, 'StyleSheetFile');
assert.equal(ast.children.length, 1);
assert.equal(ast.children[0].type, 'Rule');
});
it('parses at-rules', () => {
const ast = parseCss('@media (min-width: 800px) { div { color: red; } }');
assert.equal(ast.children.length, 1);
assert.equal(ast.children[0].type, 'Atrule');
if (ast.children[0].type === 'Atrule') {
assert.equal(ast.children[0].name, 'media');
}
});
it('parses @import', () => {
const ast = parseCss("@import 'foo.css';");
assert.equal(ast.children.length, 1);
assert.equal(ast.children[0].type, 'Atrule');
if (ast.children[0].type === 'Atrule') {
assert.equal(ast.children[0].name, 'import');
assert.equal(ast.children[0].block, null);
}
});
it('parses multiple rules', () => {
const ast = parseCss('div { color: red; } span { color: blue; }');
assert.equal(ast.children.length, 2);
});
it('has correct start/end positions', () => {
const ast = parseCss('div { color: red; }');
assert.equal(ast.start, 0);
assert.equal(ast.end, 19);
});
it('strips BOM', () => {
const ast = parseCss('\uFEFFdiv { color: red; }');
assert.equal(ast.start, 0);
assert.equal(ast.end, 19);
});
it('parses nested rules', () => {
const ast = parseCss('div { color: red; span { color: blue; } }');
assert.equal(ast.children.length, 1);
const rule = ast.children[0];
assert.equal(rule.type, 'Rule');
if (rule.type === 'Rule') {
assert.equal(rule.block.children.length, 2); // declaration + nested rule
}
});
it('parses empty stylesheet', () => {
const ast = parseCss('');
assert.equal(ast.type, 'StyleSheetFile');
assert.equal(ast.children.length, 0);
assert.equal(ast.start, 0);
assert.equal(ast.end, 0);
});
it('parses whitespace-only stylesheet', () => {
const ast = parseCss(' \n\t ');
assert.equal(ast.children.length, 0);
});
it('parses comments', () => {
const ast = parseCss('/* comment */ div { color: red; }');
assert.equal(ast.children.length, 1);
assert.equal(ast.children[0].type, 'Rule');
});
it('parses complex selectors', () => {
const ast = parseCss('div > span + p ~ a { color: red; }');
assert.equal(ast.children.length, 1);
const rule = ast.children[0];
if (rule.type === 'Rule') {
assert.equal(rule.prelude.type, 'SelectorList');
assert.equal(rule.prelude.children.length, 1);
// div > span + p ~ a has 4 relative selectors
assert.equal(rule.prelude.children[0].children.length, 4);
}
});
it('parses pseudo-classes and pseudo-elements', () => {
const ast = parseCss('div:hover::before { color: red; }');
assert.equal(ast.children.length, 1);
const rule = ast.children[0];
if (rule.type === 'Rule') {
const selectors = rule.prelude.children[0].children[0].selectors;
assert.equal(selectors.length, 3); // div, :hover, ::before
assert.equal(selectors[0].type, 'TypeSelector');
assert.equal(selectors[1].type, 'PseudoClassSelector');
assert.equal(selectors[2].type, 'PseudoElementSelector');
}
});
it('parses @keyframes', () => {
const ast = parseCss('@keyframes fade { from { opacity: 0; } to { opacity: 1; } }');
assert.equal(ast.children.length, 1);
assert.equal(ast.children[0].type, 'Atrule');
if (ast.children[0].type === 'Atrule') {
assert.equal(ast.children[0].name, 'keyframes');
assert.notEqual(ast.children[0].block, null);
}
});
it('parses class and id selectors', () => {
const ast = parseCss('.foo#bar { color: red; }');
assert.equal(ast.children.length, 1);
const rule = ast.children[0];
if (rule.type === 'Rule') {
const selectors = rule.prelude.children[0].children[0].selectors;
assert.equal(selectors.length, 2);
assert.equal(selectors[0].type, 'ClassSelector');
assert.equal(selectors[1].type, 'IdSelector');
}
});
it('parses attribute selectors', () => {
const ast = parseCss('[data-foo="bar"] { color: red; }');
assert.equal(ast.children.length, 1);
const rule = ast.children[0];
if (rule.type === 'Rule') {
const selectors = rule.prelude.children[0].children[0].selectors;
assert.equal(selectors.length, 1);
assert.equal(selectors[0].type, 'AttributeSelector');
if (selectors[0].type === 'AttributeSelector') {
assert.equal(selectors[0].name, 'data-foo');
assert.equal(selectors[0].value, 'bar');
}
}
});
it('parses escaped characters', () => {
const ast = parseCss("div { background: url('./example.png?\\''); }");
assert.equal(ast.type, 'StyleSheetFile');
assert.equal(ast.children.length, 1);
const rule = ast.children[0];
assert.equal(rule.type, 'Rule');
if (rule.type === 'Rule') {
const declaration = rule.block.children[0];
assert.equal(declaration.type, 'Declaration');
if (declaration.type === 'Declaration') {
assert.equal(declaration.value, "url('./example.png?\\'')");
}
}
});
}); | typescript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/css-parse.test.ts |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import defaultdict
from ducktape.mark.resource import cluster
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka import KafkaService
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int
class JmxTool(JmxMixin, KafkaPathResolverMixin):
"""
Simple helper class for using the JmxTool directly instead of as a mix-in
"""
def __init__(self, text_context, *args, **kwargs):
JmxMixin.__init__(self, num_nodes=1, *args, **kwargs)
self.context = text_context
@property
def logger(self):
return self.context.logger
class FetchFromFollowerTest(ProduceConsumeValidateTest):
RACK_AWARE_REPLICA_SELECTOR = "org.apache.kafka.common.replica.RackAwareReplicaSelector"
METADATA_MAX_AGE_MS = 3000
def __init__(self, test_context):
super(FetchFromFollowerTest, self).__init__(test_context=test_context)
self.jmx_tool = JmxTool(test_context, jmx_poll_ms=100)
self.topic = "test_topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context,
num_nodes=3,
zk=self.zk,
topics={
self.topic: {
"partitions": 1,
"replication-factor": 3,
"configs": {"min.insync.replicas": 1}},
},
server_prop_overides=[
["replica.selector.class", self.RACK_AWARE_REPLICA_SELECTOR]
],
per_node_server_prop_overrides={
1: [("broker.rack", "rack-a")],
2: [("broker.rack", "rack-b")],
3: [("broker.rack", "rack-c")]
})
self.producer_throughput = 1000
self.num_producers = 1
self.num_consumers = 1
def min_cluster_size(self):
return super(FetchFromFollowerTest, self).min_cluster_size() + self.num_producers * 2 + self.num_consumers * 2
def setUp(self):
self.zk.start()
self.kafka.start()
@cluster(num_nodes=9)
def test_consumer_preferred_read_replica(self):
"""
This test starts up brokers with "broker.rack" and "replica.selector.class" configurations set. The replica
selector is set to the rack-aware implementation. One of the brokers has a different rack than the other two.
We then use a console consumer with the "client.rack" set to the same value as the differing broker. After
producing some records, we verify that the client has been informed of the preferred replica and that all the
records are properly consumed.
"""
# Find the leader, configure consumer to be on a different rack
leader_node = self.kafka.leader(self.topic, 0)
leader_idx = self.kafka.idx(leader_node)
non_leader_idx = 2 if leader_idx != 2 else 1
non_leader_rack = "rack-b" if leader_idx != 2 else "rack-a"
self.logger.debug("Leader %d %s" % (leader_idx, leader_node))
self.logger.debug("Non-Leader %d %s" % (non_leader_idx, non_leader_rack))
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka, self.topic,
throughput=self.producer_throughput)
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
client_id="console-consumer", group_id="test-consumer-group-1",
consumer_timeout_ms=60000, message_validator=is_int,
consumer_properties={"client.rack": non_leader_rack, "metadata.max.age.ms": self.METADATA_MAX_AGE_MS})
# Start up and let some data get produced
self.start_producer_and_consumer()
time.sleep(self.METADATA_MAX_AGE_MS * 2. / 1000)
consumer_node = self.consumer.nodes[0]
consumer_idx = self.consumer.idx(consumer_node)
read_replica_attribute = "preferred-read-replica"
read_replica_mbean = "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=%s,topic=%s,partition=%d" % \
("console-consumer", self.topic, 0)
self.jmx_tool.jmx_object_names = [read_replica_mbean]
self.jmx_tool.jmx_attributes = [read_replica_attribute]
self.jmx_tool.start_jmx_tool(consumer_idx, consumer_node)
# Wait for at least one interval of "metadata.max.age.ms"
time.sleep(self.METADATA_MAX_AGE_MS * 2. / 1000)
# Read the JMX output
self.jmx_tool.read_jmx_output(consumer_idx, consumer_node)
all_captured_preferred_read_replicas = defaultdict(int)
self.logger.debug(self.jmx_tool.jmx_stats)
for ts, data in self.jmx_tool.jmx_stats[0].items():
for k, v in data.items():
if k.endswith(read_replica_attribute):
all_captured_preferred_read_replicas[int(v)] += 1
self.logger.debug("Saw the following preferred read replicas %s",
dict(all_captured_preferred_read_replicas.items()))
assert all_captured_preferred_read_replicas[non_leader_idx] > 0, \
"Expected to see broker %d (%s) as a preferred replica" % (non_leader_idx, non_leader_rack)
# Validate consumed messages
self.stop_producer_and_consumer()
self.validate() | unknown | codeparrot/codeparrot-clean | ||
"""these are live tests - you need to have the env vars AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY set and you will need to have (at this time) an EC2
instance, running or stopped, in the us-east-1 region
TODO: setup a mock server (e.g. http://www.mbtest.org/ or http://www.mock-server.com/)
as an alternative to live testing.
"""
from nose.tools import *
import os
import sys
import pickle
from rxaws.source.region import *
from rxaws.source.ec2instance import *
def test_region_source():
# Region should return list of dict where dict has a key 'RegionName'
test_source((RegionSource(), 'RegionName', sys._getframe().f_code.co_name))
# put more tests here if needed
def test_ec2instance_source():
# Ec2Instance should return list of dict where dict has a key 'InstanceId'
test_source((Ec2InstanceSource(), 'InstanceId', sys._getframe().f_code.co_name))
# put more tests here if needed
@nottest
def test_source(spec_tuple):
"""
helper function to test concrete source classes
:param spec_tuple: [0] class under test
[1] the key in the returned dict we expect
[2] this function's name
:return:
"""
# a source object should return a list of dict where the dict has
# an expected key
result = spec_tuple[0].execute()[0]
assert spec_tuple[1] in result, 'key %s does not exist' % spec_tuple[1]
# if this test passed, emit the test data we can use it for test fixtures!!
emit_fixture(spec_tuple)
def emit_fixture(spec_tuple):
# if the env var EMIT_FIXTURES is set, then we want to emit the test data
# we can use it for mock injection in other tests!!!
if not os.getenv('EMIT_FIXTURES'):
return
# emit all the element of the source object to the fixtures directory
fixture_file_name = os.getcwd() + '/tests/fixtures/source/' + spec_tuple[2] + '.ser'
pickle.dump([elem for elem in spec_tuple[0].execute()], open(fixture_file_name, "wb")) | unknown | codeparrot/codeparrot-clean | ||
"""
This module is for inspecting OGR data sources and generating either
models for GeoDjango and/or mapping dictionaries for use with the
`LayerMapping` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.field import (
OFTDate,
OFTDateTime,
OFTInteger,
OFTInteger64,
OFTReal,
OFTString,
OFTTime,
)
def mapping(data_source, geom_name="geom", layer_key=0, multi_geom=False):
"""
Given a DataSource, generate a dictionary that may be used
for invoking the LayerMapping utility.
Keyword Arguments:
`geom_name` => The name of the geometry field to use for the model.
`layer_key` => The key specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
"""
if isinstance(data_source, str):
# Instantiating the DataSource from the string.
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError(
"Data source parameter must be a string or a DataSource object."
)
# Creating the dictionary.
_mapping = {}
# Generating the field name for each field in the layer.
for field in data_source[layer_key].fields:
mfield = field.lower()
if mfield[-1:] == "_":
mfield += "field"
_mapping[mfield] = field
gtype = data_source[layer_key].geom_type
if multi_geom:
gtype.to_multi()
_mapping[geom_name] = str(gtype).upper()
return _mapping
def ogrinspect(*args, **kwargs):
"""
Given a data source (either a string or a DataSource object) and a string
model name this function will generate a GeoDjango model.
Usage:
>>> from django.contrib.gis.utils import ogrinspect
>>> ogrinspect('/path/to/shapefile.shp','NewModel')
...will print model definition to stout
or put this in a Python script and use to redirect the output to a new
model like:
$ python generate_model.py > myapp/models.py
# generate_model.py
from django.contrib.gis.utils import ogrinspect
shp_file = 'data/mapping_hacks/world_borders.shp'
model_name = 'WorldBorders'
print(ogrinspect(shp_file, model_name, multi_geom=True, srid=4326,
geom_name='shapes', blank=True))
Required Arguments
`datasource` => string or DataSource object to file pointer
`model name` => string of name of new model class to create
Optional Keyword Arguments
`geom_name` => For specifying the model name for the Geometry Field.
Otherwise will default to `geom`
`layer_key` => The key specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`srid` => The SRID to use for the Geometry Field. If it can be determined,
the SRID of the datasource is used.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
`name_field` => String - specifies a field name to return for the
__str__() method (which will be generated if specified).
`imports` => Boolean (default: True) - set to False to omit the
`from django.contrib.gis.db import models` code from the
autogenerated models thus avoiding duplicated imports when building
more than one model by batching ogrinspect()
`decimal` => Boolean or sequence (default: False). When set to True
all generated model fields corresponding to the `OFTReal` type will
be `DecimalField` instead of `FloatField`. A sequence of specific
field names to generate as `DecimalField` may also be used.
`blank` => Boolean or sequence (default: False). When set to True all
generated model fields will have `blank=True`. If the user wants to
give specific fields to have blank, then a list/tuple of OGR field
names may be used.
`null` => Boolean (default: False) - When set to True all generated
model fields will have `null=True`. If the user wants to specify
give specific fields to have null, then a list/tuple of OGR field
names may be used.
Note: Call the _ogrinspect() helper to do the heavy lifting.
"""
return "\n".join(_ogrinspect(*args, **kwargs))
def _ogrinspect(
data_source,
model_name,
geom_name="geom",
layer_key=0,
srid=None,
multi_geom=False,
name_field=None,
imports=True,
decimal=False,
blank=False,
null=False,
):
"""
Helper routine for `ogrinspect` that generates GeoDjango models
corresponding to the given data source. See the `ogrinspect` docstring for
more details.
"""
# Getting the DataSource
if isinstance(data_source, str):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError(
"Data source parameter must be a string or a DataSource object."
)
# Getting the layer corresponding to the layer key and getting
# a string listing of all OGR fields in the Layer.
layer = data_source[layer_key]
ogr_fields = layer.fields
# Creating lists from the `null`, `blank`, and `decimal`
# keyword arguments.
def process_kwarg(kwarg):
if isinstance(kwarg, (list, tuple)):
return [s.lower() for s in kwarg]
elif kwarg:
return [s.lower() for s in ogr_fields]
else:
return []
null_fields = process_kwarg(null)
blank_fields = process_kwarg(blank)
decimal_fields = process_kwarg(decimal)
# Gets the `null` and `blank` keywords for the given field name.
def get_kwargs_str(field_name):
kwlist = []
if field_name.lower() in null_fields:
kwlist.append("null=True")
if field_name.lower() in blank_fields:
kwlist.append("blank=True")
if kwlist:
return ", " + ", ".join(kwlist)
else:
return ""
# For those wishing to disable the imports.
if imports:
yield "# This is an auto-generated Django model module created by ogrinspect."
yield "from django.contrib.gis.db import models"
yield ""
yield ""
yield "class %s(models.Model):" % model_name
for field_name, width, precision, field_type in zip(
ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types
):
# The model field name.
mfield = field_name.lower()
if mfield[-1:] == "_":
mfield += "field"
# Getting the keyword args string.
kwargs_str = get_kwargs_str(field_name)
if field_type is OFTReal:
# By default OFTReals are mapped to `FloatField`, however, they
# may also be mapped to `DecimalField` if specified in the
# `decimal` keyword.
if field_name.lower() in decimal_fields:
yield (
" %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)"
) % (
mfield,
width,
precision,
kwargs_str,
)
else:
yield " %s = models.FloatField(%s)" % (mfield, kwargs_str[2:])
elif field_type is OFTInteger:
yield " %s = models.IntegerField(%s)" % (mfield, kwargs_str[2:])
elif field_type is OFTInteger64:
yield " %s = models.BigIntegerField(%s)" % (mfield, kwargs_str[2:])
elif field_type is OFTString:
yield " %s = models.CharField(max_length=%s%s)" % (
mfield,
width,
kwargs_str,
)
elif field_type is OFTDate:
yield " %s = models.DateField(%s)" % (mfield, kwargs_str[2:])
elif field_type is OFTDateTime:
yield " %s = models.DateTimeField(%s)" % (mfield, kwargs_str[2:])
elif field_type is OFTTime:
yield " %s = models.TimeField(%s)" % (mfield, kwargs_str[2:])
else:
raise TypeError("Unknown field type %s in %s" % (field_type, mfield))
# TODO: Autodetection of multigeometry types (see #7218).
gtype = layer.geom_type
if multi_geom:
gtype.to_multi()
geom_field = gtype.django
# Setting up the SRID keyword string.
if srid is None:
if layer.srs is None:
srid_str = "srid=-1"
else:
srid = layer.srs.srid
if srid is None:
srid_str = "srid=-1"
elif srid == 4326:
# WGS84 is already the default.
srid_str = ""
else:
srid_str = "srid=%s" % srid
else:
srid_str = "srid=%s" % srid
yield " %s = models.%s(%s)" % (geom_name, geom_field, srid_str)
if name_field:
yield ""
yield " def __str__(self): return self.%s" % name_field | python | github | https://github.com/django/django | django/contrib/gis/utils/ogrinspect.py |
#!/usr/bin/env python
import os
import Queue
import sys
import traceback
import time
sys.path += ['plugins'] # so 'import hook' works without duplication
sys.path += ['lib']
os.chdir(sys.path[0] or '.') # do stuff relative to the install directory
class Bot(object):
def __init__(self):
self.conns = {}
self.persist_dir = os.path.abspath('persist')
if not os.path.exists(self.persist_dir):
os.mkdir(self.persist_dir)
bot = Bot()
print 'Loading plugins'
# bootstrap the reloader
eval(compile(open(os.path.join('core', 'reload.py'), 'U').read(),
os.path.join('core', 'reload.py'), 'exec'))
reload(init=True)
print 'Connecting to IRC'
try:
config()
if not hasattr(bot, 'config'):
exit()
except Exception, e:
print 'ERROR: malformed config file:', e
traceback.print_exc()
sys.exit()
print 'Running main loop'
while True:
reload() # these functions only do things
config() # if changes have occured
for conn in bot.conns.itervalues():
try:
out = conn.out.get_nowait()
main(conn, out)
except Queue.Empty:
pass
while all(conn.out.empty() for conn in bot.conns.itervalues()):
time.sleep(.1) | unknown | codeparrot/codeparrot-clean | ||
# encoding: utf-8
from __future__ import absolute_import, division, print_function
from struct import Struct
from .exceptions import UnexpectedEndOfFileError
BIG_ENDIAN = '>'
LITTLE_ENDIAN = '<'
class StreamReader(object):
"""
Wraps a file-like object to provide access to structured data from a
binary file. Byte-order is configurable. *base_offset* is added to any
base value provided to calculate actual location for reads.
"""
def __init__(self, stream, byte_order, base_offset=0):
super(StreamReader, self).__init__()
self._stream = stream
self._byte_order = (
LITTLE_ENDIAN if byte_order == LITTLE_ENDIAN else BIG_ENDIAN
)
self._base_offset = base_offset
def read(self, count):
"""
Allow pass-through read() call
"""
return self._stream.read(count)
def read_byte(self, base, offset=0):
"""
Return the int value of the byte at the file position defined by
self._base_offset + *base* + *offset*. If *base* is None, the byte is
read from the current position in the stream.
"""
fmt = 'B'
return self._read_int(fmt, base, offset)
def read_long(self, base, offset=0):
"""
Return the int value of the four bytes at the file position defined by
self._base_offset + *base* + *offset*. If *base* is None, the long is
read from the current position in the stream. The endian setting of
this instance is used to interpret the byte layout of the long.
"""
fmt = '<L' if self._byte_order is LITTLE_ENDIAN else '>L'
return self._read_int(fmt, base, offset)
def read_short(self, base, offset=0):
"""
Return the int value of the two bytes at the file position determined
by *base* and *offset*, similarly to ``read_long()`` above.
"""
fmt = b'<H' if self._byte_order is LITTLE_ENDIAN else b'>H'
return self._read_int(fmt, base, offset)
def read_str(self, char_count, base, offset=0):
"""
Return a string containing the *char_count* bytes at the file
position determined by self._base_offset + *base* + *offset*.
"""
def str_struct(char_count):
format_ = '%ds' % char_count
return Struct(format_)
struct = str_struct(char_count)
chars = self._unpack_item(struct, base, offset)
unicode_str = chars.decode('UTF-8')
return unicode_str
def seek(self, base, offset=0):
location = self._base_offset + base + offset
self._stream.seek(location)
def tell(self):
"""
Allow pass-through tell() call
"""
return self._stream.tell()
def _read_bytes(self, byte_count, base, offset):
self.seek(base, offset)
bytes_ = self._stream.read(byte_count)
if len(bytes_) < byte_count:
raise UnexpectedEndOfFileError
return bytes_
def _read_int(self, fmt, base, offset):
struct = Struct(fmt)
return self._unpack_item(struct, base, offset)
def _unpack_item(self, struct, base, offset):
bytes_ = self._read_bytes(struct.size, base, offset)
return struct.unpack(bytes_)[0] | unknown | codeparrot/codeparrot-clean | ||
function Test() {
const obj = {
21: 'dimaMachina',
};
// Destructuring assignment
const {21: myVar} = obj;
return (
<div>
{obj[21]}
{myVar}
</div>
);
}
export const FIXTURE_ENTRYPOINT = {
fn: Test,
params: [{}],
}; | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/numeric-literal-as-object-property-key.js |
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import operator
from copy import copy as shallowcopy
from functools import partial
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible import context
from ansible.module_utils.six import iteritems, string_types, with_metaclass
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils._text import to_text, to_native
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.parsing.dataloader import DataLoader
from ansible.utils.display import Display
from ansible.utils.sentinel import Sentinel
from ansible.utils.vars import combine_vars, isidentifier, get_unique_id
display = Display()
def _generic_g(prop_name, self):
try:
value = self._attributes[prop_name]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, prop_name))
if value is Sentinel:
value = self._attr_defaults[prop_name]
return value
def _generic_g_method(prop_name, self):
try:
if self._squashed:
return self._attributes[prop_name]
method = "_get_attr_%s" % prop_name
return getattr(self, method)()
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, prop_name))
def _generic_g_parent(prop_name, self):
try:
if self._squashed or self._finalized:
value = self._attributes[prop_name]
else:
try:
value = self._get_parent_attribute(prop_name)
except AttributeError:
value = self._attributes[prop_name]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, prop_name))
if value is Sentinel:
value = self._attr_defaults[prop_name]
return value
def _generic_s(prop_name, self, value):
self._attributes[prop_name] = value
def _generic_d(prop_name, self):
del self._attributes[prop_name]
class BaseMeta(type):
"""
Metaclass for the Base object, which is used to construct the class
attributes based on the FieldAttributes available.
"""
def __new__(cls, name, parents, dct):
def _create_attrs(src_dict, dst_dict):
'''
Helper method which creates the attributes based on those in the
source dictionary of attributes. This also populates the other
attributes used to keep track of these attributes and via the
getter/setter/deleter methods.
'''
keys = list(src_dict.keys())
for attr_name in keys:
value = src_dict[attr_name]
if isinstance(value, Attribute):
if attr_name.startswith('_'):
attr_name = attr_name[1:]
# here we selectively assign the getter based on a few
# things, such as whether we have a _get_attr_<name>
# method, or if the attribute is marked as not inheriting
# its value from a parent object
method = "_get_attr_%s" % attr_name
if method in src_dict or method in dst_dict:
getter = partial(_generic_g_method, attr_name)
elif ('_get_parent_attribute' in dst_dict or '_get_parent_attribute' in src_dict) and value.inherit:
getter = partial(_generic_g_parent, attr_name)
else:
getter = partial(_generic_g, attr_name)
setter = partial(_generic_s, attr_name)
deleter = partial(_generic_d, attr_name)
dst_dict[attr_name] = property(getter, setter, deleter)
dst_dict['_valid_attrs'][attr_name] = value
dst_dict['_attributes'][attr_name] = Sentinel
dst_dict['_attr_defaults'][attr_name] = value.default
if value.alias is not None:
dst_dict[value.alias] = property(getter, setter, deleter)
dst_dict['_valid_attrs'][value.alias] = value
dst_dict['_alias_attrs'][value.alias] = attr_name
def _process_parents(parents, dst_dict):
'''
Helper method which creates attributes from all parent objects
recursively on through grandparent objects
'''
for parent in parents:
if hasattr(parent, '__dict__'):
_create_attrs(parent.__dict__, dst_dict)
new_dst_dict = parent.__dict__.copy()
new_dst_dict.update(dst_dict)
_process_parents(parent.__bases__, new_dst_dict)
# create some additional class attributes
dct['_attributes'] = {}
dct['_attr_defaults'] = {}
dct['_valid_attrs'] = {}
dct['_alias_attrs'] = {}
# now create the attributes based on the FieldAttributes
# available, including from parent (and grandparent) objects
_create_attrs(dct, dct)
_process_parents(parents, dct)
return super(BaseMeta, cls).__new__(cls, name, parents, dct)
class FieldAttributeBase(with_metaclass(BaseMeta, object)):
def __init__(self):
# initialize the data loader and variable manager, which will be provided
# later when the object is actually loaded
self._loader = None
self._variable_manager = None
# other internal params
self._validated = False
self._squashed = False
self._finalized = False
# every object gets a random uuid:
self._uuid = get_unique_id()
# we create a copy of the attributes here due to the fact that
# it was initialized as a class param in the meta class, so we
# need a unique object here (all members contained within are
# unique already).
self._attributes = self.__class__._attributes.copy()
self._attr_defaults = self.__class__._attr_defaults.copy()
for key, value in self._attr_defaults.items():
if callable(value):
self._attr_defaults[key] = value()
# and init vars, avoid using defaults in field declaration as it lives across plays
self.vars = dict()
def dump_me(self, depth=0):
''' this is never called from production code, it is here to be used when debugging as a 'complex print' '''
if depth == 0:
display.debug("DUMPING OBJECT ------------------------------------------------------")
display.debug("%s- %s (%s, id=%s)" % (" " * depth, self.__class__.__name__, self, id(self)))
if hasattr(self, '_parent') and self._parent:
self._parent.dump_me(depth + 2)
dep_chain = self._parent.get_dep_chain()
if dep_chain:
for dep in dep_chain:
dep.dump_me(depth + 2)
if hasattr(self, '_play') and self._play:
self._play.dump_me(depth + 2)
def preprocess_data(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
return ds
def load_data(self, ds, variable_manager=None, loader=None):
''' walk the input datastructure and assign any values '''
if ds is None:
raise AnsibleAssertionError('ds (%s) should not be None but it is.' % ds)
# cache the datastructure internally
setattr(self, '_ds', ds)
# the variable manager class is used to manage and merge variables
# down to a single dictionary for reference in templating, etc.
self._variable_manager = variable_manager
# the data loader class is used to parse data from strings and files
if loader is not None:
self._loader = loader
else:
self._loader = DataLoader()
# call the preprocess_data() function to massage the data into
# something we can more easily parse, and then call the validation
# function on it to ensure there are no incorrect key values
ds = self.preprocess_data(ds)
self._validate_attributes(ds)
# Walk all attributes in the class. We sort them based on their priority
# so that certain fields can be loaded before others, if they are dependent.
for name, attr in sorted(iteritems(self._valid_attrs), key=operator.itemgetter(1)):
# copy the value over unless a _load_field method is defined
target_name = name
if name in self._alias_attrs:
target_name = self._alias_attrs[name]
if name in ds:
method = getattr(self, '_load_%s' % name, None)
if method:
self._attributes[target_name] = method(name, ds[name])
else:
self._attributes[target_name] = ds[name]
# run early, non-critical validation
self.validate()
# return the constructed object
return self
def get_ds(self):
try:
return getattr(self, '_ds')
except AttributeError:
return None
def get_loader(self):
return self._loader
def get_variable_manager(self):
return self._variable_manager
def _post_validate_debugger(self, attr, value, templar):
value = templar.template(value)
valid_values = frozenset(('always', 'on_failed', 'on_unreachable', 'on_skipped', 'never'))
if value and isinstance(value, string_types) and value not in valid_values:
raise AnsibleParserError("'%s' is not a valid value for debugger. Must be one of %s" % (value, ', '.join(valid_values)), obj=self.get_ds())
return value
def _validate_attributes(self, ds):
'''
Ensures that there are no keys in the datastructure which do
not map to attributes for this object.
'''
valid_attrs = frozenset(self._valid_attrs.keys())
for key in ds:
if key not in valid_attrs:
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
def validate(self, all_vars=None):
''' validation that is done at parse time, not load time '''
all_vars = {} if all_vars is None else all_vars
if not self._validated:
# walk all fields in the object
for (name, attribute) in iteritems(self._valid_attrs):
if name in self._alias_attrs:
name = self._alias_attrs[name]
# run validator only if present
method = getattr(self, '_validate_%s' % name, None)
if method:
method(attribute, name, getattr(self, name))
else:
# and make sure the attribute is of the type it should be
value = self._attributes[name]
if value is not None:
if attribute.isa == 'string' and isinstance(value, (list, dict)):
raise AnsibleParserError(
"The field '%s' is supposed to be a string type,"
" however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds()
)
self._validated = True
def squash(self):
'''
Evaluates all attributes and sets them to the evaluated version,
so that all future accesses of attributes do not need to evaluate
parent attributes.
'''
if not self._squashed:
for name in self._valid_attrs.keys():
self._attributes[name] = getattr(self, name)
self._squashed = True
def copy(self):
'''
Create a copy of this object and return it.
'''
new_me = self.__class__()
for name in self._valid_attrs.keys():
if name in self._alias_attrs:
continue
new_me._attributes[name] = shallowcopy(self._attributes[name])
new_me._attr_defaults[name] = shallowcopy(self._attr_defaults[name])
new_me._loader = self._loader
new_me._variable_manager = self._variable_manager
new_me._validated = self._validated
new_me._finalized = self._finalized
new_me._uuid = self._uuid
# if the ds value was set on the object, copy it to the new copy too
if hasattr(self, '_ds'):
new_me._ds = self._ds
return new_me
def get_validated_value(self, name, attribute, value, templar):
if attribute.isa == 'string':
value = to_text(value)
elif attribute.isa == 'int':
value = int(value)
elif attribute.isa == 'float':
value = float(value)
elif attribute.isa == 'bool':
value = boolean(value, strict=True)
elif attribute.isa == 'percent':
# special value, which may be an integer or float
# with an optional '%' at the end
if isinstance(value, string_types) and '%' in value:
value = value.replace('%', '')
value = float(value)
elif attribute.isa == 'list':
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
if attribute.listof is not None:
for item in value:
if not isinstance(item, attribute.listof):
raise AnsibleParserError("the field '%s' should be a list of %s, "
"but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
elif attribute.required and attribute.listof == string_types:
if item is None or item.strip() == "":
raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
elif attribute.isa == 'set':
if value is None:
value = set()
elif not isinstance(value, (list, set)):
if isinstance(value, string_types):
value = value.split(',')
else:
# Making a list like this handles strings of
# text and bytes properly
value = [value]
if not isinstance(value, set):
value = set(value)
elif attribute.isa == 'dict':
if value is None:
value = dict()
elif not isinstance(value, dict):
raise TypeError("%s is not a dictionary" % value)
elif attribute.isa == 'class':
if not isinstance(value, attribute.class_type):
raise TypeError("%s is not a valid %s (got a %s instead)" % (name, attribute.class_type, type(value)))
value.post_validate(templar=templar)
return value
def post_validate(self, templar):
'''
we can't tell that everything is of the right type until we have
all the variables. Run basic types (from isa) as well as
any _post_validate_<foo> functions.
'''
# save the omit value for later checking
omit_value = templar.available_variables.get('omit')
for (name, attribute) in iteritems(self._valid_attrs):
if attribute.static:
value = getattr(self, name)
# we don't template 'vars' but allow template as values for later use
if name not in ('vars',) and templar.is_template(value):
display.warning('"%s" is not templatable, but we found: %s, '
'it will not be templated and will be used "as is".' % (name, value))
continue
if getattr(self, name) is None:
if not attribute.required:
continue
else:
raise AnsibleParserError("the field '%s' is required but was not set" % name)
elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
# Intermediate objects like Play() won't have their fields validated by
# default, as their values are often inherited by other objects and validated
# later, so we don't want them to fail out early
continue
try:
# Run the post-validator if present. These methods are responsible for
# using the given templar to template the values, if required.
method = getattr(self, '_post_validate_%s' % name, None)
if method:
value = method(attribute, getattr(self, name), templar)
elif attribute.isa == 'class':
value = getattr(self, name)
else:
# if the attribute contains a variable, template it now
value = templar.template(getattr(self, name))
# if this evaluated to the omit value, set the value back to
# the default specified in the FieldAttribute and move on
if omit_value is not None and value == omit_value:
if callable(attribute.default):
setattr(self, name, attribute.default())
else:
setattr(self, name, attribute.default)
continue
# and make sure the attribute is of the type it should be
if value is not None:
value = self.get_validated_value(name, attribute, value, templar)
# and assign the massaged value back to the attribute field
setattr(self, name, value)
except (TypeError, ValueError) as e:
value = getattr(self, name)
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s."
"The error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds(), orig_exc=e)
except (AnsibleUndefinedVariable, UndefinedError) as e:
if templar._fail_on_undefined_errors and name != 'name':
if name == 'args':
msg = "The task includes an option with an undefined variable. The error was: %s" % (to_native(e))
else:
msg = "The field '%s' has an invalid value, which includes an undefined variable. The error was: %s" % (name, to_native(e))
raise AnsibleParserError(msg, obj=self.get_ds(), orig_exc=e)
self._finalized = True
def _load_vars(self, attr, ds):
'''
Vars in a play can be specified either as a dictionary directly, or
as a list of dictionaries. If the later, this method will turn the
list into a single dictionary.
'''
def _validate_variable_keys(ds):
for key in ds:
if not isidentifier(key):
raise TypeError("'%s' is not a valid variable name" % key)
try:
if isinstance(ds, dict):
_validate_variable_keys(ds)
return combine_vars(self.vars, ds)
elif isinstance(ds, list):
all_vars = self.vars
for item in ds:
if not isinstance(item, dict):
raise ValueError
_validate_variable_keys(item)
all_vars = combine_vars(all_vars, item)
return all_vars
elif ds is None:
return {}
else:
raise ValueError
except ValueError as e:
raise AnsibleParserError("Vars in a %s must be specified as a dictionary, or a list of dictionaries" % self.__class__.__name__,
obj=ds, orig_exc=e)
except TypeError as e:
raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds, orig_exc=e)
def _extend_value(self, value, new_value, prepend=False):
'''
Will extend the value given with new_value (and will turn both
into lists if they are not so already). The values are run through
a set to remove duplicate values.
'''
if not isinstance(value, list):
value = [value]
if not isinstance(new_value, list):
new_value = [new_value]
# Due to where _extend_value may run for some attributes
# it is possible to end up with Sentinel in the list of values
# ensure we strip them
value = [v for v in value if v is not Sentinel]
new_value = [v for v in new_value if v is not Sentinel]
if prepend:
combined = new_value + value
else:
combined = value + new_value
return [i for i, _ in itertools.groupby(combined) if i is not None]
def dump_attrs(self):
'''
Dumps all attributes to a dictionary
'''
attrs = {}
for (name, attribute) in iteritems(self._valid_attrs):
attr = getattr(self, name)
if attribute.isa == 'class' and hasattr(attr, 'serialize'):
attrs[name] = attr.serialize()
else:
attrs[name] = attr
return attrs
def from_attrs(self, attrs):
'''
Loads attributes from a dictionary
'''
for (attr, value) in iteritems(attrs):
if attr in self._valid_attrs:
attribute = self._valid_attrs[attr]
if attribute.isa == 'class' and isinstance(value, dict):
obj = attribute.class_type()
obj.deserialize(value)
setattr(self, attr, obj)
else:
setattr(self, attr, value)
def serialize(self):
'''
Serializes the object derived from the base object into
a dictionary of values. This only serializes the field
attributes for the object, so this may need to be overridden
for any classes which wish to add additional items not stored
as field attributes.
'''
repr = self.dump_attrs()
# serialize the uuid field
repr['uuid'] = self._uuid
repr['finalized'] = self._finalized
repr['squashed'] = self._squashed
return repr
def deserialize(self, data):
'''
Given a dictionary of values, load up the field attributes for
this object. As with serialize(), if there are any non-field
attribute data members, this method will need to be overridden
and extended.
'''
if not isinstance(data, dict):
raise AnsibleAssertionError('data (%s) should be a dict but is a %s' % (data, type(data)))
for (name, attribute) in iteritems(self._valid_attrs):
if name in data:
setattr(self, name, data[name])
else:
if callable(attribute.default):
setattr(self, name, attribute.default())
else:
setattr(self, name, attribute.default)
# restore the UUID field
setattr(self, '_uuid', data.get('uuid'))
self._finalized = data.get('finalized', False)
self._squashed = data.get('squashed', False)
class Base(FieldAttributeBase):
_name = FieldAttribute(isa='string', default='', always_post_validate=True, inherit=False)
# connection/transport
_connection = FieldAttribute(isa='string', default=context.cliargs_deferred_get('connection'))
_port = FieldAttribute(isa='int')
_remote_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('remote_user'))
# variables
_vars = FieldAttribute(isa='dict', priority=100, inherit=False, static=True)
# module default params
_module_defaults = FieldAttribute(isa='list', extend=True, prepend=True)
# flags and misc. settings
_environment = FieldAttribute(isa='list', extend=True, prepend=True)
_no_log = FieldAttribute(isa='bool')
_run_once = FieldAttribute(isa='bool')
_ignore_errors = FieldAttribute(isa='bool')
_ignore_unreachable = FieldAttribute(isa='bool')
_check_mode = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('check'))
_diff = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('diff'))
_any_errors_fatal = FieldAttribute(isa='bool', default=C.ANY_ERRORS_FATAL)
_throttle = FieldAttribute(isa='int', default=0)
_timeout = FieldAttribute(isa='int', default=C.TASK_TIMEOUT)
# explicitly invoke a debugger on tasks
_debugger = FieldAttribute(isa='string')
# Privilege escalation
_become = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('become'))
_become_method = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_method'))
_become_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_user'))
_become_flags = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_flags'))
_become_exe = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_exe'))
# used to hold sudo/su stuff
DEPRECATED_ATTRIBUTES = [] | unknown | codeparrot/codeparrot-clean | ||
import time
from fabric.api import run, execute, env
environment = "production"
env.use_ssh_config = True
env.hosts = ["shaaaaa"]
branch = "master"
repo = "git@github.com:konklone/shaaaaaaaaaaaaa.git"
username = "shaaaaa"
home = "/home/%s/%s" % (username, username)
shared_path = "%s/shared" % home
versions_path = "%s/versions" % home
version_path = "%s/%s" % (versions_path, time.strftime("%Y%m%d%H%M%S"))
current_path = "%s/current" % home
logs = "/home/%s" % username
keep = 5
def checkout():
run('git clone -q -b %s %s %s' % (branch, repo, version_path))
def dependencies():
run('cd %s && npm install' % version_path)
# TODO: why did I do this? (cp instead of ln)
def make_current():
# run('rm -f %s && ln -s %s %s' % (current_path, version_path, current_path))
run('rm -rf %s && cp -r %s %s' % (current_path, version_path, current_path))
def cleanup():
versions = run("ls -x %s" % versions_path).split()
destroy = versions[:-keep]
for version in destroy:
command = "rm -rf %s/%s" % (versions_path, version)
run(command)
## can be run on their own
def start():
# run("cd %s && NODE_ENV=%s forever -l %s/forever.log -a start app.js -p 3000" % (current_path, environment, logs))
run(("cd %s && " +
"NODE_ENV=%s forever -l %s/forever.log -a start app.js 3000 && " +
"NODE_ENV=%s forever -l %s/forever.log -a start app.js 3001") %
(current_path, environment, logs, environment, logs)
)
def stop():
run("forever stop app.js")
def restart():
run("forever restart app.js")
def deploy():
execute(checkout)
execute(dependencies)
execute(make_current)
execute(restart)
execute(cleanup) | unknown | codeparrot/codeparrot-clean | ||
#### Note: this error code is no longer emitted by the compiler.
You can't import a type or module when the name of the item being imported is
the same as another type or submodule defined in the module.
An example of this error:
```compile_fail
use foo::Bar; // error
type Bar = u32;
mod foo {
pub mod Bar { }
}
fn main() {}
``` | unknown | github | https://github.com/rust-lang/rust | compiler/rustc_error_codes/src/error_codes/E0256.md |
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_PROFILER_CONVERT_TRACE_VIEWER_TRACE_EVENTS_TO_JSON_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_TRACE_VIEWER_TRACE_EVENTS_TO_JSON_H_
#include "xprof/convert/trace_viewer/trace_events_to_json.h" // from @org_xprof // IWYU pragma: export
#endif // TENSORFLOW_CORE_PROFILER_CONVERT_TRACE_VIEWER_TRACE_EVENTS_TO_JSON_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/profiler/convert/trace_viewer/trace_events_to_json.h |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing.google;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.google.common.collect.Maps.immutableEnumMap;
import static com.google.common.collect.testing.Helpers.mapEntry;
import static java.util.Arrays.asList;
import com.google.common.annotations.GwtCompatible;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Ordering;
import com.google.common.collect.testing.AnEnum;
import com.google.common.collect.testing.SampleElements;
import com.google.common.collect.testing.TestEnumMapGenerator;
import com.google.common.collect.testing.TestListGenerator;
import com.google.common.collect.testing.TestMapGenerator;
import com.google.common.collect.testing.TestStringListGenerator;
import com.google.common.collect.testing.TestStringMapGenerator;
import com.google.common.collect.testing.TestUnhashableCollectionGenerator;
import com.google.common.collect.testing.UnhashableObject;
import java.util.Collection;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.jspecify.annotations.NullMarked;
/**
* Generators of different types of map and related collections, such as keys, entries and values.
*
* @author Hayward Chan
*/
@GwtCompatible
@NullMarked
public class MapGenerators {
public static class ImmutableMapGenerator extends TestStringMapGenerator {
@Override
protected Map<String, String> create(Entry<String, String>[] entries) {
ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
for (Entry<String, String> entry : entries) {
checkNotNull(entry);
builder.put(entry.getKey(), entry.getValue());
}
return builder.buildOrThrow();
}
}
public static class ImmutableMapCopyOfGenerator extends TestStringMapGenerator {
@Override
protected Map<String, String> create(Entry<String, String>[] entries) {
Map<String, String> builder = new LinkedHashMap<>();
for (Entry<String, String> entry : entries) {
builder.put(entry.getKey(), entry.getValue());
}
return ImmutableMap.copyOf(builder);
}
}
public static class ImmutableMapCopyOfEntriesGenerator extends TestStringMapGenerator {
@Override
protected Map<String, String> create(Entry<String, String>[] entries) {
return ImmutableMap.copyOf(asList(entries));
}
}
public static class ImmutableMapUnhashableValuesGenerator
extends TestUnhashableCollectionGenerator<Collection<UnhashableObject>> {
@Override
public Collection<UnhashableObject> create(UnhashableObject[] elements) {
ImmutableMap.Builder<Integer, UnhashableObject> builder = ImmutableMap.builder();
int key = 1;
for (UnhashableObject value : elements) {
builder.put(key++, value);
}
return builder.buildOrThrow().values();
}
}
public static class ImmutableMapKeyListGenerator extends TestStringListGenerator {
@Override
public List<String> create(String[] elements) {
ImmutableMap.Builder<String, Integer> builder = ImmutableMap.builder();
for (int i = 0; i < elements.length; i++) {
builder.put(elements[i], i);
}
return builder.buildOrThrow().keySet().asList();
}
}
public static class ImmutableMapValueListGenerator extends TestStringListGenerator {
@Override
public List<String> create(String[] elements) {
ImmutableMap.Builder<Integer, String> builder = ImmutableMap.builder();
for (int i = 0; i < elements.length; i++) {
builder.put(i, elements[i]);
}
return builder.buildOrThrow().values().asList();
}
}
public static class ImmutableMapEntryListGenerator
implements TestListGenerator<Entry<String, Integer>> {
@Override
public SampleElements<Entry<String, Integer>> samples() {
return new SampleElements<>(
mapEntry("foo", 5),
mapEntry("bar", 3),
mapEntry("baz", 17),
mapEntry("quux", 1),
mapEntry("toaster", -2));
}
@SuppressWarnings("unchecked")
@Override
public Entry<String, Integer>[] createArray(int length) {
return (Entry<String, Integer>[]) new Entry<?, ?>[length];
}
@Override
public Iterable<Entry<String, Integer>> order(List<Entry<String, Integer>> insertionOrder) {
return insertionOrder;
}
@Override
public List<Entry<String, Integer>> create(Object... elements) {
ImmutableMap.Builder<String, Integer> builder = ImmutableMap.builder();
for (Object o : elements) {
@SuppressWarnings("unchecked")
Entry<String, Integer> entry = (Entry<String, Integer>) checkNotNull(o);
builder.put(entry);
}
return builder.buildOrThrow().entrySet().asList();
}
}
public static class ImmutableEnumMapGenerator extends TestEnumMapGenerator {
@Override
protected Map<AnEnum, String> create(Entry<AnEnum, String>[] entries) {
Map<AnEnum, String> map = new HashMap<>();
for (Entry<AnEnum, String> entry : entries) {
checkNotNull(entry);
map.put(entry.getKey(), entry.getValue());
}
return immutableEnumMap(map);
}
}
public static class ImmutableMapCopyOfEnumMapGenerator extends TestEnumMapGenerator {
@Override
protected Map<AnEnum, String> create(Entry<AnEnum, String>[] entries) {
EnumMap<AnEnum, String> map = new EnumMap<>(AnEnum.class);
for (Entry<AnEnum, String> entry : entries) {
map.put(entry.getKey(), entry.getValue());
}
return ImmutableMap.copyOf(map);
}
@Override
public Iterable<Entry<AnEnum, String>> order(List<Entry<AnEnum, String>> insertionOrder) {
return new Ordering<Entry<AnEnum, String>>() {
@Override
public int compare(Entry<AnEnum, String> left, Entry<AnEnum, String> right) {
return left.getKey().compareTo(right.getKey());
}
}.sortedCopy(insertionOrder);
}
}
public static class ImmutableMapValuesAsSingletonSetGenerator
implements TestMapGenerator<String, Collection<Integer>> {
@Override
public SampleElements<Entry<String, Collection<Integer>>> samples() {
return new SampleElements<>(
mapEntry("one", ImmutableSet.of(10000)),
mapEntry("two", ImmutableSet.of(-2000)),
mapEntry("three", ImmutableSet.of(300)),
mapEntry("four", ImmutableSet.of(-40)),
mapEntry("five", ImmutableSet.of(5)));
}
@Override
public Map<String, Collection<Integer>> create(Object... elements) {
ImmutableMap.Builder<String, Integer> builder = ImmutableMap.builder();
// assumes that each set is a singleton or less (as is done for the samples)
for (Object elem : elements) {
@SuppressWarnings("unchecked") // safe by generator contract
Entry<String, Collection<Integer>> entry = (Entry<String, Collection<Integer>>) elem;
Integer value = getOnlyElement(entry.getValue());
builder.put(entry.getKey(), value);
}
return builder.buildOrThrow().asMultimap().asMap();
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"}) // needed for arrays
public Entry<String, Collection<Integer>>[] createArray(int length) {
return new Entry[length];
}
@Override
public Iterable<Entry<String, Collection<Integer>>> order(
List<Entry<String, Collection<Integer>>> insertionOrder) {
return insertionOrder;
}
@Override
public String[] createKeyArray(int length) {
return new String[length];
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"}) // needed for arrays
public Collection<Integer>[] createValueArray(int length) {
return new ImmutableSet[length];
}
}
/**
* Useless constructor for a class of static utility methods.
*
* @deprecated Do not instantiate this utility class.
*/
@Deprecated
public MapGenerators() {}
} | java | github | https://github.com/google/guava | android/guava-testlib/src/com/google/common/collect/testing/google/MapGenerators.java |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
MAX_TAG_LENGTH = 60
@base.NovaObjectRegistry.register
class Tag(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added method exists()
VERSION = '1.1'
fields = {
'resource_id': fields.StringField(),
'tag': fields.StringField(),
}
@staticmethod
def _from_db_object(context, tag, db_tag):
for key in tag.fields:
setattr(tag, key, db_tag[key])
tag.obj_reset_changes()
tag._context = context
return tag
@base.remotable
def create(self):
db_tag = db.instance_tag_add(self._context, self.resource_id, self.tag)
self._from_db_object(self._context, self, db_tag)
@base.remotable_classmethod
def destroy(cls, context, resource_id, name):
db.instance_tag_delete(context, resource_id, name)
@base.remotable_classmethod
def exists(cls, context, resource_id, name):
return db.instance_tag_exists(context, resource_id, name)
@base.NovaObjectRegistry.register
class TagList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Tag <= version 1.1
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('Tag'),
}
@base.remotable_classmethod
def get_by_resource_id(cls, context, resource_id):
db_tags = db.instance_tag_get_by_instance_uuid(context, resource_id)
return base.obj_make_list(context, cls(), objects.Tag, db_tags)
@base.remotable_classmethod
def create(cls, context, resource_id, tags):
db_tags = db.instance_tag_set(context, resource_id, tags)
return base.obj_make_list(context, cls(), objects.Tag, db_tags)
@base.remotable_classmethod
def destroy(cls, context, resource_id):
db.instance_tag_delete_all(context, resource_id) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to clip weights.
This is useful in the original formulation of the Wasserstein loss, which
requires that the discriminator be K-Lipschitz. See
https://arxiv.org/pdf/1701.07875 for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.opt.python.training import variable_clipping_optimizer
__all__ = [
'clip_variables',
'clip_discriminator_weights',
]
def clip_discriminator_weights(optimizer, model, weight_clip):
"""Modifies an optimizer so it clips weights to a certain value.
Args:
optimizer: An optimizer to perform variable weight clipping.
model: A GANModel namedtuple.
weight_clip: Positive python float to clip discriminator weights. Used to
enforce a K-lipschitz condition, which is useful for some GAN training
schemes (ex WGAN: https://arxiv.org/pdf/1701.07875).
Returns:
An optimizer to perform weight clipping after updates.
Raises:
ValueError: If `weight_clip` is less than 0.
"""
return clip_variables(optimizer, model.discriminator_variables, weight_clip)
def clip_variables(optimizer, variables, weight_clip):
"""Modifies an optimizer so it clips weights to a certain value.
Args:
optimizer: An optimizer to perform variable weight clipping.
variables: A list of TensorFlow variables.
weight_clip: Positive python float to clip discriminator weights. Used to
enforce a K-lipschitz condition, which is useful for some GAN training
schemes (ex WGAN: https://arxiv.org/pdf/1701.07875).
Returns:
An optimizer to perform weight clipping after updates.
Raises:
ValueError: If `weight_clip` is less than 0.
"""
if weight_clip < 0:
raise ValueError(
'`discriminator_weight_clip` must be positive. Instead, was %s',
weight_clip)
return variable_clipping_optimizer.VariableClippingOptimizer(
opt=optimizer,
# Do no reduction, so clipping happens per-value.
vars_to_clip_dims={var: [] for var in variables},
max_norm=weight_clip,
use_locking=True,
colocate_clip_ops_with_vars=True) | unknown | codeparrot/codeparrot-clean | ||
"""
Higher order functions built on the BlockStructureManager to interact with a django cache.
"""
from django.core.cache import cache
from xmodule.modulestore.django import modulestore
from .manager import BlockStructureManager
def get_course_in_cache(course_key):
"""
A higher order function implemented on top of the
block_structure.get_collected function that returns the block
structure in the cache for the given course_key.
Returns:
BlockStructureBlockData - The collected block structure,
starting at root_block_usage_key.
"""
return get_block_structure_manager(course_key).get_collected()
def update_course_in_cache(course_key):
"""
A higher order function implemented on top of the
block_structure.updated_collected function that updates the block
structure in the cache for the given course_key.
"""
return get_block_structure_manager(course_key).update_collected_if_needed()
def clear_course_from_cache(course_key):
"""
A higher order function implemented on top of the
block_structure.clear_block_cache function that clears the block
structure from the cache for the given course_key.
Note: See Note in get_course_blocks. Even after MA-1604 is
implemented, this implementation should still be valid since the
entire block structure of the course is cached, even though
arbitrary access to an intermediate block will be supported.
"""
get_block_structure_manager(course_key).clear()
def get_block_structure_manager(course_key):
"""
Returns the manager for managing Block Structures for the given course.
"""
store = modulestore()
course_usage_key = store.make_course_usage_key(course_key)
return BlockStructureManager(course_usage_key, store, get_cache())
def get_cache():
"""
Returns the storage for caching Block Structures.
"""
return cache | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A vtctld webdriver test."""
import logging
import os
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
import unittest
from vtproto import vttest_pb2
from vttest import environment as vttest_environment
from vttest import local_database
import environment
import utils
def setUpModule():
try:
if utils.options.xvfb:
try:
# This will be killed automatically by utils.kill_sub_processes()
utils.run_bg(['Xvfb', ':15', '-ac'])
os.environ['DISPLAY'] = ':15'
except OSError as err:
# Despite running in background, utils.run_bg() will throw immediately
# if the Xvfb binary is not found.
logging.error(
"Can't start Xvfb (will try local DISPLAY instead): %s", err)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
utils.remove_tmp_files()
utils.kill_sub_processes()
class TestVtctldWeb(unittest.TestCase):
WEBDRIVER_TIMEOUT_S = 10
@classmethod
def setUpClass(cls):
"""Set up two keyspaces: one unsharded, one with two shards."""
topology = vttest_pb2.VTTestTopology()
topology.cells.append('test')
topology.cells.append('test2')
keyspace = topology.keyspaces.add(name='test_keyspace')
keyspace.replica_count = 2
keyspace.rdonly_count = 2
keyspace.shards.add(name='-80')
keyspace.shards.add(name='80-')
keyspace2 = topology.keyspaces.add(name='test_keyspace2')
keyspace2.shards.add(name='0')
keyspace2.replica_count = 2
keyspace2.rdonly_count = 1
cls.driver = environment.create_webdriver()
port = environment.reserve_ports(1)
vttest_environment.base_port = port
environment.reset_mysql_flavor()
cls.db = local_database.LocalDatabase(
topology,
os.path.join(environment.vttop, 'test/vttest_schema'),
False, None,
web_dir=os.path.join(environment.vttop, 'web/vtctld'),
default_schema_dir=os.path.join(
environment.vttop, 'test/vttest_schema/default'),
web_dir2=os.path.join(environment.vttop, 'web/vtctld2/app'))
cls.db.setup()
cls.vtctld_addr = 'http://localhost:%d' % cls.db.config()['port']
utils.pause('Paused test after vtcombo was started.\n'
'For manual testing, connect to vtctld: %s' % cls.vtctld_addr)
@classmethod
def tearDownClass(cls):
cls.db.teardown()
cls.driver.quit()
def _get_keyspaces(self):
"""Get list of all present keyspaces."""
content = self.driver.find_element_by_id('content')
# TODO(thompsonja) find better way to get keyspace name
keyspaces = content.find_elements_by_tag_name('md-card')
return [ks.find_element_by_tag_name('h2').text for ks in keyspaces]
def _get_keyspace_element(self, keyspace_name):
"""Get a specific keyspace element given a keyspace name."""
element_id = '%s-card' % keyspace_name
wait = WebDriverWait(self.driver, self.WEBDRIVER_TIMEOUT_S)
wait.until(expected_conditions.visibility_of_element_located(
(By.ID, element_id)))
return self.driver.find_element_by_id(element_id)
def _get_shards(self, keyspace_name):
shard_grid = self.driver.find_element_by_id(
'%s-shards-list' % keyspace_name)
return shard_grid.text.split('\n')
def _get_serving_shards(self, keyspace_name):
serving_shards = self.driver.find_element_by_id(
'%s-serving-list' % keyspace_name)
return serving_shards.text.split('\n')
def _get_inactive_shards(self, keyspace_name):
inactive_shards = self.driver.find_element_by_id(
'%s-inactive-list' % keyspace_name)
return inactive_shards.text.split('\n')
def _get_shard_element(self, keyspace_name, shard_name):
return self._get_keyspace_element(keyspace_name).find_element_by_link_text(
shard_name)
def _get_tablet_names(self):
tablet_elements = (
self.driver.find_element_by_id('tablets').find_elements_by_tag_name(
'md-card'))
tablet_titles = [
x.find_element_by_tag_name('md-toolbar').text.split('\n')[0]
for x in tablet_elements]
return dict(
[(x.split(' ')[0], x.split(' ')[1][1:-1]) for x in tablet_titles])
def _get_shard_record_keyspace_shard(self):
wait = WebDriverWait(self.driver, self.WEBDRIVER_TIMEOUT_S)
wait.until(expected_conditions.visibility_of_element_located(
(By.ID, 'keyspace-shard')))
return self.driver.find_element_by_id('keyspace-shard').text
def _get_shard_record_master_tablet(self):
return self.driver.find_element_by_id('master-tablet').text
def _check_tablet_types(self, tablet_types, expected_counts):
for expected_type, count in expected_counts.iteritems():
self.assertEquals(count,
len([x for x in tablet_types if x == expected_type]))
def _check_shard_overview(
self, keyspace_name, shard_name, expected_tablet_types):
logging.info('Checking %s/%s', keyspace_name, shard_name)
self._get_shard_element(keyspace_name, shard_name).click()
self.assertEquals(self._get_shard_record_keyspace_shard(),
'%s/%s' % (keyspace_name, shard_name))
master = self._get_shard_record_master_tablet()
logging.info('master tablet is %s', master)
shard_tablets = self._get_tablet_names()
self.assertEquals(shard_tablets[master], 'master')
self._check_tablet_types(shard_tablets.values(), expected_tablet_types)
self.driver.back()
def _get_dropdown_options(self, group):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(group)
return [op.text for op in
dropdown.find_elements_by_tag_name('option')]
def _get_dropdown_selection(self, group):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(group)
return dropdown.find_element_by_tag_name('label').text
def _change_dropdown_option(self, dropdown_id, dropdown_value):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(dropdown_id)
dropdown.click()
options = dropdown.find_elements_by_tag_name('li')
for op in options:
if op.text == dropdown_value:
logging.info('dropdown %s: option %s clicked', dropdown_id, op.text)
op.click()
break
def _check_dropdowns(self, keyspaces, selected_keyspace, cells, selected_cell,
types, selected_type, metrics, selected_metric):
"""Checking that all dropdowns have the correct options and selection."""
keyspace_options = self._get_dropdown_options('keyspace')
keyspace_selected = self._get_dropdown_selection('keyspace')
logging.info('Keyspace options: %s Keyspace selected: %s',
', '.join(keyspace_options), keyspace_selected)
self.assertListEqual(keyspaces, keyspace_options)
self.assertEqual(selected_keyspace, keyspace_selected)
cell_options = self._get_dropdown_options('cell')
cell_selected = self._get_dropdown_selection('cell')
logging.info('Cell options: %s Cell Selected: %s',
', '.join(cell_options), cell_selected)
self.assertListEqual(cells, cell_options)
self.assertEqual(selected_cell, cell_selected)
type_options = self._get_dropdown_options('type')
type_selected = self._get_dropdown_selection('type')
logging.info('Type options: %s Type Selected: %s',
', '.join(cell_options), cell_selected)
self.assertListEqual(types, type_options)
self.assertEqual(selected_type, type_selected)
metric_options = self._get_dropdown_options('metric')
metric_selected = self._get_dropdown_selection('metric')
logging.info('metric options: %s metric Selected: %s',
', '.join(metric_options), metric_selected)
self.assertListEqual(metrics, metric_options)
self.assertEqual(selected_metric, metric_selected)
def _check_heatmaps(self, selected_keyspace):
"""Checking that the view has the correct number of heatmaps drawn."""
status_content = self.driver.find_element_by_tag_name('vt-status')
keyspaces = status_content.find_elements_by_tag_name('vt-heatmap')
logging.info('Number of keyspaces found: %d', len(keyspaces))
if selected_keyspace == 'all':
available_keyspaces = self._get_dropdown_options('keyspace')
self.assertEqual(len(keyspaces), len(available_keyspaces)-1)
for ks in keyspaces:
heading = ks.find_element_by_id('keyspaceName')
logging.info('Keyspace name: %s', heading.text)
try:
ks.find_element_by_id(heading.text)
except NoSuchElementException:
self.fail('Cannot get keyspace')
self.assertIn(heading.text, available_keyspaces)
else:
self.assertEquals(len(keyspaces), 1)
heading = keyspaces[0].find_element_by_id('keyspaceName')
logging.info('Keyspace name: %s', heading.text)
try:
keyspaces[0].find_element_by_id(heading.text)
except NoSuchElementException:
self.fail('Cannot get keyspace')
self.assertEquals(heading.text, selected_keyspace)
def _check_new_view(
self, keyspaces, selected_keyspace, cells, selected_cell, types,
selected_type, metrics, selected_metric):
"""Checking the dropdowns and heatmaps for each newly routed view."""
logging.info('Testing realtime stats view')
self._check_dropdowns(keyspaces, selected_keyspace, cells, selected_cell,
types, selected_type, metrics, selected_metric)
self._check_heatmaps(selected_keyspace)
# Navigation
def _navigate_to_dashboard(self):
logging.info('Fetching main vtctld page: %s', self.vtctld_addr)
self.driver.get('%s/app2' % self.vtctld_addr)
wait = WebDriverWait(self.driver, self.WEBDRIVER_TIMEOUT_S)
wait.until(expected_conditions.visibility_of_element_located(
(By.ID, 'test_keyspace')))
def _navigate_to_keyspace_view(self):
self._navigate_to_dashboard()
dashboard_content = self.driver.find_element_by_tag_name('vt-dashboard')
keyspace_cards = dashboard_content.find_elements_by_class_name('vt-card')
self.assertEqual(2, len(keyspace_cards))
first_keyspace_card = keyspace_cards[0]
shard_stats = first_keyspace_card.find_element_by_tag_name('md-list')
shard_stats.click()
wait = WebDriverWait(self.driver, self.WEBDRIVER_TIMEOUT_S)
wait.until(expected_conditions.visibility_of_element_located(
(By.CLASS_NAME, 'vt-card')))
def _navigate_to_shard_view(self):
self._navigate_to_keyspace_view()
keyspace_content = self.driver.find_element_by_tag_name('vt-keyspace-view')
shard_cards = keyspace_content.find_elements_by_class_name(
'vt-serving-shard')
self.assertEqual(2, len(shard_cards))
first_shard_card = shard_cards[0]
first_shard_card.click()
wait = WebDriverWait(self.driver, self.WEBDRIVER_TIMEOUT_S)
wait.until(expected_conditions.visibility_of_element_located(
(By.ID, '1')))
# Get Elements
def _get_dashboard_keyspaces(self):
"""Get list of all present keyspaces."""
wait = WebDriverWait(self.driver, self.WEBDRIVER_TIMEOUT_S)
wait.until(expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'vt-dashboard')))
dashboard_content = self.driver.find_element_by_tag_name('vt-dashboard')
return [ks.text for ks in
dashboard_content.find_elements_by_class_name('vt-keyspace-card')]
def _get_dashboard_shards(self):
"""Get list of all present shards."""
wait = WebDriverWait(self.driver, self.WEBDRIVER_TIMEOUT_S)
wait.until(expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'vt-dashboard')))
dashboard_content = self.driver.find_element_by_tag_name('vt-dashboard')
return [sh.text for sh in
dashboard_content.find_elements_by_class_name('vt-shard-stats')]
def _get_keyspace_shards(self):
wait = WebDriverWait(self.driver, self.WEBDRIVER_TIMEOUT_S)
wait.until(expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'vt-keyspace-view')))
keyspace_content = self.driver.find_element_by_tag_name('vt-keyspace-view')
return [sh.text for sh in
keyspace_content.find_elements_by_class_name('vt-serving-shard')]
def _get_shard_tablets(self):
wait = WebDriverWait(self.driver, self.WEBDRIVER_TIMEOUT_S)
wait.until(expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'vt-shard-view')))
shard_content = self.driver.find_element_by_tag_name('vt-shard-view')
# Ignore Header row.
tablet_types = []
tablet_uids = []
table_rows = shard_content.find_elements_by_tag_name('tr')[1:]
for row in table_rows:
columns = row.find_elements_by_tag_name('td')
tablet_types.append(
columns[1].find_element_by_class_name('ui-cell-data').text)
tablet_uids.append(
columns[3].find_element_by_class_name('ui-cell-data').text)
return (tablet_types, tablet_uids)
def _get_first_option(self, dashboard_content):
dashboard_menu = dashboard_content.find_element_by_class_name('vt-menu')
dashboard_menu.click()
first_option = dashboard_content.find_element_by_class_name(
'ui-menuitem-text')
return first_option
def _get_dialog_cmd(self, dialog):
dialog_command = [
cmd.text for cmd in dialog.find_elements_by_class_name('vt-sheet')]
return dialog_command
def _toggle_dialog_checkbox(self, dialog, index):
ping_tablets_checkbox = dialog.find_elements_by_class_name(
'md-checkbox-inner-container')[index]
ping_tablets_checkbox.click()
def _get_validate_resp(self, dialog):
validate = dialog.find_element_by_id('vt-action')
validate.click()
validate_response = dialog.find_element_by_class_name('vt-resp').text
return validate_response
def _close_dialog(self, dialog):
dismiss = dialog.find_element_by_id('vt-dismiss')
dismiss.click()
def test_old_keyspace_overview(self):
logging.info('Testing old keyspace overview')
logging.info('Fetching main vtctld page: %s', self.vtctld_addr)
self.driver.get(self.vtctld_addr + '/app')
keyspace_names = self._get_keyspaces()
logging.info('Keyspaces: %s', ', '.join(keyspace_names))
self.assertListEqual(['test_keyspace', 'test_keyspace2'], keyspace_names)
test_keyspace_serving_shards = self._get_serving_shards('test_keyspace')
logging.info(
'Serving Shards in test_keyspace: %s', ', '.join(
test_keyspace_serving_shards))
self.assertListEqual(test_keyspace_serving_shards, ['-80', '80-'])
test_keyspace2_serving_shards = self._get_serving_shards('test_keyspace2')
logging.info(
'Serving Shards in test_keyspace2: %s', ', '.join(
test_keyspace2_serving_shards))
self.assertListEqual(test_keyspace2_serving_shards, ['0'])
with self.assertRaises(NoSuchElementException):
self._get_inactive_shards('test_keyspace')
logging.info(
'Inactive Shards in test_keyspace: %s', ', '.join([]))
with self.assertRaises(NoSuchElementException):
self._get_inactive_shards('test_keyspace2')
logging.info(
'Inactive Shards in test_keyspace2: %s', ', '.join([]))
def test_old_shard_overview(self):
logging.info('Testing old shard overview')
logging.info('Fetching main vtctld page: %s', self.vtctld_addr)
self.driver.get(self.vtctld_addr + '/app')
self._check_shard_overview(
'test_keyspace', '-80', {'master': 1, 'replica': 1, 'rdonly': 2})
self._check_shard_overview(
'test_keyspace', '80-', {'master': 1, 'replica': 1, 'rdonly': 2})
self._check_shard_overview(
'test_keyspace2', '0', {'master': 1, 'replica': 1, 'rdonly': 1})
def test_dashboard(self):
logging.info('Testing dashboard view')
self._navigate_to_dashboard()
keyspace_names = self._get_dashboard_keyspaces()
shard_names = self._get_dashboard_shards()
logging.info('Keyspaces: %s', ', '.join(keyspace_names))
logging.info('Shards: %s', ', '.join(shard_names))
self.assertListEqual(['test_keyspace', 'test_keyspace2'], keyspace_names)
self.assertListEqual(['2 Shards', '1 Shards'], shard_names)
def test_dashboard_validate(self):
self._navigate_to_dashboard()
dashboard_content = self.driver.find_element_by_tag_name('vt-dashboard')
first_menu_option = self._get_first_option(dashboard_content)
logging.info('First option of Dashboard menu: %s', first_menu_option.text)
self.assertEqual('Validate', first_menu_option.text)
first_menu_option.click()
dialog = dashboard_content.find_element_by_tag_name('vt-dialog')
dialog_command = self._get_dialog_cmd(dialog)
logging.info('Validate command: %s', ', '.join(dialog_command))
self.assertEqual(2, len(dialog_command))
self.assertListEqual(['Validate', '-ping-tablets=false'], dialog_command)
# Validate Dialog Checkbox is working
self._toggle_dialog_checkbox(dialog, 0)
dialog_command = self._get_dialog_cmd(dialog)
logging.info('Validate command: %s', ', '.join(dialog_command))
self.assertEqual(2, len(dialog_command))
self.assertEqual('-ping-tablets', dialog_command[1])
# Validate succeeded
validate_response = self._get_validate_resp(dialog)
logging.info('Validate command response: %s', validate_response)
self._close_dialog(dialog)
def test_create_keyspace(self):
self._navigate_to_dashboard()
dashboard_content = self.driver.find_element_by_tag_name('vt-dashboard')
dialog = dashboard_content.find_element_by_tag_name('vt-dialog')
# Create Keyspace Dialog command responds to name.
dashboard_menu = dashboard_content.find_element_by_class_name('vt-menu')
dashboard_menu.click()
dashboard_menu_options = (
dashboard_content.find_elements_by_class_name('ui-menuitem-text'))
new_keyspace_option = [
x for x in dashboard_menu_options if x.text == 'New'][0]
new_keyspace_option.click()
input_fields = [md_input.find_element_by_tag_name('input') for md_input in
dialog.find_elements_by_tag_name('md-input')]
keyspace_name_field = input_fields[0]
sharding_col_name_field = input_fields[1]
keyspace_name_field.send_keys('test_keyspace3')
dialog_command = [
cmd.text for cmd in dialog.find_elements_by_class_name('vt-sheet')]
logging.info('Create keyspace command: %s', ', '.join(dialog_command))
self.assertEqual(3, len(dialog_command))
self.assertListEqual(['CreateKeyspace', '-force=false', 'test_keyspace3'],
dialog_command)
# Create Keyspace autopopulates sharding_column type
sharding_col_name_field.send_keys('test_id')
dialog_command = [
cmd.text for cmd in dialog.find_elements_by_class_name('vt-sheet')]
logging.info('Create keyspace command: %s', ', '.join(dialog_command))
self.assertEqual(5, len(dialog_command))
self.assertListEqual(['CreateKeyspace', '-sharding_column_name=test_id',
'-sharding_column_type=UINT64', '-force=false',
'test_keyspace3'],
dialog_command)
# Dropdown works
dropdown = dialog.find_element_by_tag_name('p-dropdown')
dropdown.click()
options = dropdown.find_elements_by_tag_name('li')
options[1].click()
dialog_command = [
cmd.text for cmd in dialog.find_elements_by_class_name('vt-sheet')]
logging.info('Create keyspace command: %s', ', '.join(dialog_command))
self.assertEqual(5, len(dialog_command))
self.assertListEqual(['CreateKeyspace', '-sharding_column_name=test_id',
'-sharding_column_type=BYTES', '-force=false',
'test_keyspace3'],
dialog_command)
create = dialog.find_element_by_id('vt-action')
create.click()
dismiss = dialog.find_element_by_id('vt-dismiss')
dismiss.click()
keyspace_names = self._get_dashboard_keyspaces()
logging.info('Keyspaces: %s', ', '.join(keyspace_names))
self.assertListEqual(
['test_keyspace', 'test_keyspace2', 'test_keyspace3'], keyspace_names)
test_keyspace3 = dashboard_content.find_elements_by_class_name('vt-card')[2]
test_keyspace3.find_element_by_class_name('vt-menu').click()
options = test_keyspace3.find_elements_by_tag_name('li')
delete = [x for x in options if x.text == 'Delete'][0]
delete.click()
delete = dialog.find_element_by_id('vt-action')
delete.click()
dismiss = dialog.find_element_by_id('vt-dismiss')
dismiss.click()
keyspace_names = self._get_dashboard_keyspaces()
logging.info('Keyspaces: %s', ', '.join(keyspace_names))
self.assertListEqual(['test_keyspace', 'test_keyspace2'], keyspace_names)
def test_keyspace_view(self):
self._navigate_to_keyspace_view()
logging.info('Navigating to keyspace view')
self._navigate_to_keyspace_view()
logging.info('Testing keyspace view')
shard_names = self._get_keyspace_shards()
logging.info('Shards in first keyspace: %s', ', '.join(shard_names))
self.assertListEqual(['-80', '80-'], shard_names)
def test_shard_view(self):
self._navigate_to_shard_view()
logging.info('Navigating to shard view')
self._navigate_to_shard_view()
logging.info('Testing shard view')
tablet_types, tablet_uids = self._get_shard_tablets()
logging.info('Tablets types in first shard in first keyspace: %s',
', '.join(tablet_types))
logging.info('Tablets uids in first shard in first keyspace: %s',
', '.join(tablet_uids))
self.assertSetEqual(
set(['master', 'replica', 'rdonly', 'rdonly', 'replica', 'replica',
'rdonly', 'rdonly']), set(tablet_types))
self.assertSetEqual(
set(['1', '2', '3', '4', '5', '6', '7', '8']), set(tablet_uids))
def test_realtime_stats(self):
logging.info('Testing realtime stats view')
# Navigate to the status page from initial app.
# TODO(thompsonja): Fix this once direct navigation works (going to status
# page directly should display correctly)
self.driver.get('%s/app2' % self.vtctld_addr)
status_button = self.driver.find_element_by_partial_link_text('Status')
status_button.click()
wait = WebDriverWait(self.driver, self.WEBDRIVER_TIMEOUT_S)
wait.until(expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'vt-status')))
test_cases = [
(None, None, 'all', 'all', 'all'),
('type', 'REPLICA', 'all', 'all', 'REPLICA'),
('cell', 'test2', 'all', 'test2', 'REPLICA'),
('keyspace', 'test_keyspace', 'test_keyspace', 'test2', 'REPLICA'),
('cell', 'all', 'test_keyspace', 'all', 'REPLICA'),
('type', 'all', 'test_keyspace', 'all', 'all'),
('cell', 'test2', 'test_keyspace', 'test2', 'all'),
('keyspace', 'all', 'all', 'test2', 'all'),
]
for (dropdown_id, dropdown_val, keyspace, cell, tablet_type) in test_cases:
logging.info('Routing to new %s-%s-%s view', keyspace, cell, tablet_type)
if dropdown_id and dropdown_val:
self._change_dropdown_option(dropdown_id, dropdown_val)
tablet_type_options = ['all', 'MASTER', 'REPLICA', 'RDONLY']
if cell == 'test2':
tablet_type_options = ['all', 'REPLICA', 'RDONLY']
self._check_new_view(keyspaces=['all', 'test_keyspace', 'test_keyspace2'],
selected_keyspace=keyspace,
cells=['all', 'test', 'test2'],
selected_cell=cell,
types=tablet_type_options,
selected_type=tablet_type,
metrics=['lag', 'qps', 'health'],
selected_metric='health'
)
def add_test_options(parser):
parser.add_option(
'--no-xvfb', action='store_false', dest='xvfb', default=True,
help='Use local DISPLAY instead of headless Xvfb mode.')
if __name__ == '__main__':
utils.main(test_options=add_test_options) | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
from __future__ import division
try:
import cPickle as pickle
except ImportError:
import pickle
import sys
import random
import os
import re
import datetime
# TODO memorization previous tweet corpus
class MarkovChain(object):
def __init__(self, db_file_path=None):
self.db_file_path = db_file_path
if not db_file_path:
directory = "db"
filename = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
if not os.path.exists(directory):
os.makedirs(directory)
self.db_file_path = os.path.join(directory, filename)
try:
with open(self.db_file_path, 'rb') as dbfile:
self.db = pickle.load(dbfile)
except IOError:
sys.stdout.write('Database file not found, using empty database')
self.db = {}
except ValueError:
sys.stdout.write('Database corrupt or unreadable, using empty database')
self.db = {}
def generate_database(self, text_sample, sentence_sep='[.!?\n]'):
"""Generate word probability database from raw content string."""
# Get an iterator for the 'sentences'
text_sample = self._word_iter(text_sample, sentence_sep)
# We're using '' as special symbol for the beginning
# of a sentence
self.db = {"": {"": 0.0}}
for line in text_sample:
words = line.strip().split() # split words in line
if len(words) == 0:
continue
# first word follows a sentence end
if words[0] in self.db[""]:
self.db[""][words[0]] += 1
else:
self.db[""][words[0]] = 1.0
for i in range(len(words) - 1):
if words[i] in self.db:
# the current word has been found at least once
# increment parametrized wordcounts
if words[i + 1] in self.db[words[i]]:
self.db[words[i]][words[i + 1]] += 1
else:
self.db[words[i]][words[i + 1]] = 1.0
else:
# word has been found for the first time
self.db[words[i]] = {words[i + 1]: 1.0}
# last word precedes a sentence end
if words[len(words) - 1] in self.db:
if "" in self.db[words[len(words) - 1]]:
self.db[words[len(words) - 1]][""] += 1
else:
self.db[words[len(words) - 1]][""] = 1.0
else:
self.db[words[len(words) - 1]] = {"": 1.0}
# We've now got the db filled with parametrized word counts
# We still need to normalize this to represent probabilities
for word in self.db:
wordsum = 0
for nextword in self.db[word]:
wordsum += self.db[word][nextword]
if wordsum != 0:
for nextword in self.db[word]:
self.db[word][nextword] /= wordsum
# Now we dump the db to disk
return self.dumpdb()
def dumpdb(self):
try:
with open(self.db_file_path, 'wb') as dbfile:
pickle.dump(self.db, dbfile)
# It looks like db was written successfully
return True
except IOError:
sys.stderr.write('Database file could not be written')
return False
def generate_string(self):
""".Generate a "sentence" with the database of known text."""
return self._accumulate_with_seed('')
def generate_string_with_seed(self, seed):
"""Generate a "sentence" with the database and a given word."""
# using str.split here means we're contructing the list in memory
# but as the generated sentence only depends on the last word of the seed
# I'm assuming seeds tend to be rather short.
words = seed.split()
if len(words) > 0 and words[len(words) - 1] in self.db:
sen = ''
if len(words) > 1:
sen = words[0]
for i in range(1, len(words) - 1):
sen = sen + ' ' + words[i]
sen += ' '
return sen + self._accumulate_with_seed(words[len(words) - 1])
# Just pretend we've managed to generate a sentence.
sep = ' '
if seed == '':
sep = ''
return seed + sep + self.generate_string()
@staticmethod
def _word_iter(text, separator='.'):
"""
An iterator over the "words" in the given text, as defined by
the regular expression given as separator.
"""
exp = re.compile(separator)
pos = 0
for occ in exp.finditer(text):
sub = text[pos:occ.start()].strip()
if sub:
yield sub
pos = occ.start() + 1
if pos < len(text):
# take case of the last part
sub = text[pos:].strip()
if sub:
yield sub
def _accumulate_with_seed(self, seed):
"""
Accumulate the generated sentence with a given single word as a seed.
"""
next_word = self._next_word(seed)
sentence = [seed] if seed else []
while next_word:
sentence.append(next_word)
next_word = self._next_word(next_word)
return ' '.join(sentence)
def _next_word(self, lastword):
probmap = self.db[lastword]
sample = random.random()
# since rounding errors might make us miss out on some words
maxprob = 0.0
maxprobword = ""
for candidate in probmap:
# remember which word had the highest probability
# this is the word we'll default to if we can't find anythin else
if probmap[candidate] > maxprob:
maxprob = probmap[candidate]
maxprobword = candidate
if sample > probmap[candidate]:
sample -= probmap[candidate]
else:
return candidate
return maxprobword | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for skin services."""
__author__ = 'Sean Lip'
from core.domain import skins_services
from core.tests import test_utils
class DefaultSkinsUnitTests(test_utils.GenericTestBase):
"""Tests for the default skins."""
def test_get_all_skin_ids(self):
self.assertEqual(
sorted(skins_services.Registry.get_all_skin_ids()),
['conversation_v1', 'snapshots_v1'])
def test_default_skins_are_present(self):
conversation_skin = skins_services.Registry.get_skin_templates([
'conversation_v1'])
self.assertIn('conversation.css', conversation_skin)
self.assertIn('skins/Conversation', conversation_skin)
snapshots_skin = skins_services.Registry.get_skin_templates([
'snapshots_v1'])
self.assertIn('skins/Snapshots', snapshots_skin)
two_skins = skins_services.Registry.get_skin_templates([
'conversation_v1', 'snapshots_v1'])
self.assertIn('skins/Conversation', two_skins)
self.assertIn('skins/Snapshots', two_skins)
def test_nonexistent_skins_raise_error(self):
with self.assertRaises(Exception):
skins_services.Registry.get_skin_templates([
'conversation_v1', 'nonexistent']) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to get X events from Record.
This is designed to read events from X windows for keyboard and mouse
events.
"""
__author__ = 'Scott Kirkwood (scott+keymon@forusers.com)'
from Xlib import display
from Xlib import X
from Xlib import XK
from Xlib.ext import record
from Xlib.protocol import rq
import locale
import sys
import time
import threading
import collections
class XEvent(object):
"""An event, mimics edev.py events."""
def __init__(self, atype, scancode, code, value):
self._type = atype
self._scancode = scancode
self._code = code
self._value = value
def get_type(self):
"""Get the type of event."""
return self._type
type = property(get_type)
def get_scancode(self):
"""Get the scancode if any."""
return self._scancode
scancode = property(get_scancode)
def get_code(self):
"""Get the code string."""
return self._code
code = property(get_code)
def get_value(self):
"""Get the value 0 for up, 1 for down, etc."""
return self._value
value = property(get_value)
def __str__(self):
return 'type:%s scancode:%s code:%s value:%s' % (self._type,
self._scancode, self._code, self._value)
class XEvents(threading.Thread):
"""A thread to queue up X window events from RECORD extension."""
_butn_to_code = {
1: 'BTN_LEFT', 2: 'BTN_MIDDLE', 3: 'BTN_RIGHT',
4: 'REL_WHEEL', 5: 'REL_WHEEL', 6: 'REL_LEFT', 7: 'REL_RIGHT'}
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.setName('Xlib-thread')
self._listening = False
self.record_display = display.Display()
self.local_display = display.Display()
self.ctx = None
self.keycode_to_symbol = collections.defaultdict(lambda: 'KEY_DUNNO')
self._setup_lookup()
self.events = [] # each of type XEvent
def run(self):
"""Standard run method for threading."""
self.start_listening()
def _setup_lookup(self):
"""Setup the key lookups."""
# set locale to default C locale, see Issue 77.
# Use setlocale(None) to get curent locale instead of getlocal.
# See Issue 125 and http://bugs.python.org/issue1699853.
OLD_CTYPE = locale.setlocale(locale.LC_CTYPE, None)
locale.setlocale(locale.LC_CTYPE, 'C')
for name in dir(XK):
if name[:3] == "XK_":
code = getattr(XK, name)
self.keycode_to_symbol[code] = 'KEY_' + name[3:].upper()
locale.setlocale(locale.LC_CTYPE, OLD_CTYPE)
self.keycode_to_symbol[65027] = 'KEY_ISO_LEVEL3_SHIFT'
self.keycode_to_symbol[269025062] = 'KEY_BACK'
self.keycode_to_symbol[269025063] = 'KEY_FORWARD'
self.keycode_to_symbol[16777215] = 'KEY_CAPS_LOCK'
self.keycode_to_symbol[269025067] = 'KEY_WAKEUP'
# Multimedia keys
self.keycode_to_symbol[269025042] = 'KEY_AUDIOMUTE'
self.keycode_to_symbol[269025041] = 'KEY_AUDIOLOWERVOLUME'
self.keycode_to_symbol[269025043] = 'KEY_AUDIORAISEVOLUME'
self.keycode_to_symbol[269025047] = 'KEY_AUDIONEXT'
self.keycode_to_symbol[269025044] = 'KEY_AUDIOPLAY'
self.keycode_to_symbol[269025046] = 'KEY_AUDIOPREV'
self.keycode_to_symbol[269025045] = 'KEY_AUDIOSTOP'
# Turkish / F layout
self.keycode_to_symbol[699] = 'KEY_GBREVE' # scancode = 26 / 18
self.keycode_to_symbol[697] = 'KEY_IDOTLESS' # scancode = 23 / 19
self.keycode_to_symbol[442] = 'KEY_SCEDILLA' # scancode = 39 / 40
def next_event(self):
"""Returns the next event in queue, or None if none."""
if self.events:
return self.events.pop(0)
return None
def start_listening(self):
"""Start listening to RECORD extension and queuing events."""
if not self.record_display.has_extension("RECORD"):
print "RECORD extension not found"
sys.exit(1)
self._listening = True
self.ctx = self.record_display.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyPress, X.MotionNotify), # why only two, it's a range?
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
self.record_display.record_enable_context(self.ctx, self._handler)
# Don't understand this, how can we free the context yet still use it in Stop?
self.record_display.record_free_context(self.ctx)
self.record_display.close()
def stop_listening(self):
"""Stop listening to events."""
if not self._listening:
return
self.local_display.record_disable_context(self.ctx)
self.local_display.flush()
self.local_display.close()
self._listening = False
self.join(0.05)
def listening(self):
"""Are you listening?"""
return self._listening
def _handler(self, reply):
"""Handle an event."""
if reply.category != record.FromServer:
return
if reply.client_swapped:
return
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(
data, self.record_display.display, None, None)
if event.type == X.ButtonPress:
self._handle_mouse(event, 1)
elif event.type == X.ButtonRelease:
self._handle_mouse(event, 0)
elif event.type == X.KeyPress:
self._handle_key(event, 1)
elif event.type == X.KeyRelease:
self._handle_key(event, 0)
elif event.type == X.MotionNotify:
self._handle_mouse(event, 2)
else:
print event
def _handle_mouse(self, event, value):
"""Add a mouse event to events.
Params:
event: the event info
value: 2=motion, 1=down, 0=up
"""
if value == 2:
self.events.append(XEvent('EV_MOV',
0, 0, (event.root_x, event.root_y)))
elif event.detail in [4, 5]:
if event.detail == 5:
value = -1
else:
value = 1
self.events.append(XEvent('EV_REL',
0, XEvents._butn_to_code.get(event.detail, 'BTN_%d' % event.detail), value))
else:
self.events.append(XEvent('EV_KEY',
0, XEvents._butn_to_code.get(event.detail, 'BTN_%d' % event.detail), value))
def _handle_key(self, event, value):
"""Add key event to events.
Params:
event: the event info
value: 1=down, 0=up
"""
keysym = self.local_display.keycode_to_keysym(event.detail, 0)
if keysym not in self.keycode_to_symbol:
print 'Missing code for %d = %d' % (event.detail - 8, keysym)
self.events.append(XEvent('EV_KEY', event.detail - 8, self.keycode_to_symbol[keysym], value))
def _run_test():
"""Run a test or debug session."""
events = XEvents()
events.start()
while not events.listening():
time.sleep(1)
print 'Waiting for initializing...'
print 'Press ESCape to quit'
try:
while events.listening():
try:
evt = events.next_event()
except KeyboardInterrupt:
print 'User interrupted'
events.stop_listening()
if evt:
print evt
if evt.code == 'KEY_ESCAPE':
events.stop_listening()
finally:
events.stop_listening()
if __name__ == '__main__':
_run_test() | unknown | codeparrot/codeparrot-clean | ||
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CORE_FP_CONTROL_UTILS_PRIVATE_HPP
#define OPENCV_CORE_FP_CONTROL_UTILS_PRIVATE_HPP
#include "fp_control_utils.hpp"
#if OPENCV_SUPPORTS_FP_DENORMALS_HINT == 0
// disabled
#elif defined(OPENCV_IMPL_FP_HINTS)
// custom
#elif defined(OPENCV_IMPL_FP_HINTS_X86)
// custom
#elif defined(__SSE__) || defined(__SSE2__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
#include <xmmintrin.h>
#define OPENCV_IMPL_FP_HINTS_X86 1
#define OPENCV_IMPL_FP_HINTS 1
#endif
#ifndef OPENCV_IMPL_FP_HINTS
#define OPENCV_IMPL_FP_HINTS 0
#endif
#ifndef OPENCV_IMPL_FP_HINTS_X86
#define OPENCV_IMPL_FP_HINTS_X86 0
#endif
#endif // OPENCV_CORE_FP_CONTROL_UTILS_PRIVATE_HPP | unknown | github | https://github.com/opencv/opencv | modules/core/include/opencv2/core/utils/fp_control.private.hpp |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 Acysos S.L. (http://acysos.com)
# Ignacio Ibeas Izquierdo <ignacio@acysos.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import wizard
from . import models | unknown | codeparrot/codeparrot-clean | ||
name: Update Homebrew Tap
description: Updates the Homebrew Tap for the Spring Boot CLI
inputs:
spring-boot-version:
description: 'The version to publish'
required: true
token:
description: 'Token to use for GitHub authentication'
required: true
runs:
using: composite
steps:
- name: Check Out Homebrew Tap Repo
uses: actions/checkout@v6
with:
path: updated-homebrew-tap-repo
repository: spring-io/homebrew-tap
token: ${{ inputs.token }}
- name: Await Formula
uses: ./.github/actions/await-http-resource
with:
url: https://repo.maven.apache.org/maven2/org/springframework/boot/spring-boot-cli/${{ inputs.spring-boot-version }}/spring-boot-cli-${{ inputs.spring-boot-version }}-homebrew.rb
- name: Update Homebrew Tap
shell: bash
run: |
pushd updated-homebrew-tap-repo > /dev/null
curl https://repo.maven.apache.org/maven2/org/springframework/boot/spring-boot-cli/${{ inputs.spring-boot-version }}/spring-boot-cli-${{ inputs.spring-boot-version }}-homebrew.rb --output spring-boot-cli-${{ inputs.spring-boot-version }}-homebrew.rb
rm spring-boot.rb
mv spring-boot-cli-*.rb spring-boot.rb
git config user.name "Spring Builds" > /dev/null
git config user.email "spring-builds@users.noreply.github.com" > /dev/null
git add spring-boot.rb > /dev/null
git commit -m "Upgrade to Spring Boot ${{ inputs.spring-boot-version }}" > /dev/null
git push
echo "DONE"
popd > /dev/null | unknown | github | https://github.com/spring-projects/spring-boot | .github/actions/update-homebrew-tap/action.yml |
# Copyright 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The rescue mode extension."""
import webob
from webob import exc
from nova import compute
from nova import log as logging
from nova.api.openstack import extensions as exts
from nova.api.openstack import faults
LOG = logging.getLogger("nova.api.contrib.rescue")
def wrap_errors(fn):
""""Ensure errors are not passed along."""
def wrapped(*args):
try:
fn(*args)
except Exception, e:
return faults.Fault(exc.HTTPInternalServerError())
return wrapped
class Rescue(exts.ExtensionDescriptor):
"""The Rescue controller for the OpenStack API."""
def __init__(self):
super(Rescue, self).__init__()
self.compute_api = compute.API()
@wrap_errors
def _rescue(self, input_dict, req, instance_id):
"""Rescue an instance."""
context = req.environ["nova.context"]
self.compute_api.rescue(context, instance_id)
return webob.Response(status_int=202)
@wrap_errors
def _unrescue(self, input_dict, req, instance_id):
"""Unrescue an instance."""
context = req.environ["nova.context"]
self.compute_api.unrescue(context, instance_id)
return webob.Response(status_int=202)
def get_name(self):
return "Rescue"
def get_alias(self):
return "os-rescue"
def get_description(self):
return "Instance rescue mode"
def get_namespace(self):
return "http://docs.openstack.org/ext/rescue/api/v1.1"
def get_updated(self):
return "2011-08-18T00:00:00+00:00"
def get_actions(self):
"""Return the actions the extension adds, as required by contract."""
actions = [
exts.ActionExtension("servers", "rescue", self._rescue),
exts.ActionExtension("servers", "unrescue", self._unrescue),
]
return actions | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTopic;
import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTopicCollection;
import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTransaction;
import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTransactionCollection;
import org.apache.kafka.common.message.AddPartitionsToTxnResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.apache.kafka.common.requests.AddPartitionsToTxnResponse.errorsForTransaction;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class AddPartitionsToTxnRequestTest {
private static final int PRODUCER_ID = 10;
private static final short PRODUCER_EPOCH = 1;
private static final int THROTTLE_TIME_MS = 10;
private static final TopicPartition TP_0 = new TopicPartition("topic", 0);
private static final TopicPartition TP_1 = new TopicPartition("topic", 1);
private final String transactionalId1 = "transaction1";
private final String transactionalId2 = "transaction2";
@ParameterizedTest
@ApiKeyVersionsSource(apiKey = ApiKeys.ADD_PARTITIONS_TO_TXN)
public void testConstructor(short version) {
AddPartitionsToTxnRequest request;
if (version < 4) {
List<TopicPartition> partitions = new ArrayList<>();
partitions.add(TP_0);
partitions.add(TP_1);
AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forClient(transactionalId1, PRODUCER_ID, PRODUCER_EPOCH, partitions);
request = builder.build(version);
assertEquals(transactionalId1, request.data().v3AndBelowTransactionalId());
assertEquals(PRODUCER_ID, request.data().v3AndBelowProducerId());
assertEquals(PRODUCER_EPOCH, request.data().v3AndBelowProducerEpoch());
assertEquals(partitions, AddPartitionsToTxnRequest.getPartitions(request.data().v3AndBelowTopics()));
} else {
AddPartitionsToTxnTransactionCollection transactions = createTwoTransactionCollection();
AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forBroker(transactions);
request = builder.build(version);
AddPartitionsToTxnTransaction reqTxn1 = request.data().transactions().find(transactionalId1);
AddPartitionsToTxnTransaction reqTxn2 = request.data().transactions().find(transactionalId2);
assertEquals(transactions.find(transactionalId1), reqTxn1);
assertEquals(transactions.find(transactionalId2), reqTxn2);
}
AddPartitionsToTxnResponse response = request.getErrorResponse(THROTTLE_TIME_MS, Errors.UNKNOWN_TOPIC_OR_PARTITION.exception());
assertEquals(THROTTLE_TIME_MS, response.throttleTimeMs());
if (version >= 4) {
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), response.data().errorCode());
// Since the error is top level, we count it as one error in the counts.
assertEquals(Collections.singletonMap(Errors.UNKNOWN_TOPIC_OR_PARTITION, 1), response.errorCounts());
} else {
assertEquals(Collections.singletonMap(Errors.UNKNOWN_TOPIC_OR_PARTITION, 2), response.errorCounts());
}
}
@Test
public void testBatchedRequests() {
AddPartitionsToTxnTransactionCollection transactions = createTwoTransactionCollection();
AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forBroker(transactions);
AddPartitionsToTxnRequest request = builder.build(ApiKeys.ADD_PARTITIONS_TO_TXN.latestVersion());
Map<String, List<TopicPartition>> expectedMap = new HashMap<>();
expectedMap.put(transactionalId1, Collections.singletonList(TP_0));
expectedMap.put(transactionalId2, Collections.singletonList(TP_1));
assertEquals(expectedMap, request.partitionsByTransaction());
AddPartitionsToTxnResponseData.AddPartitionsToTxnResultCollection results = new AddPartitionsToTxnResponseData.AddPartitionsToTxnResultCollection();
results.add(request.errorResponseForTransaction(transactionalId1, Errors.UNKNOWN_TOPIC_OR_PARTITION));
results.add(request.errorResponseForTransaction(transactionalId2, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED));
AddPartitionsToTxnResponse response = new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData()
.setResultsByTransaction(results)
.setThrottleTimeMs(THROTTLE_TIME_MS));
assertEquals(Collections.singletonMap(TP_0, Errors.UNKNOWN_TOPIC_OR_PARTITION), errorsForTransaction(response.getTransactionTopicResults(transactionalId1)));
assertEquals(Collections.singletonMap(TP_1, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED), errorsForTransaction(response.getTransactionTopicResults(transactionalId2)));
}
@Test
public void testNormalizeRequest() {
List<TopicPartition> partitions = new ArrayList<>();
partitions.add(TP_0);
partitions.add(TP_1);
AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forClient(transactionalId1, PRODUCER_ID, PRODUCER_EPOCH, partitions);
AddPartitionsToTxnRequest request = builder.build((short) 3);
AddPartitionsToTxnRequest singleton = request.normalizeRequest();
assertEquals(partitions, singleton.partitionsByTransaction().get(transactionalId1));
AddPartitionsToTxnTransaction transaction = singleton.data().transactions().find(transactionalId1);
assertEquals(PRODUCER_ID, transaction.producerId());
assertEquals(PRODUCER_EPOCH, transaction.producerEpoch());
}
private AddPartitionsToTxnTransactionCollection createTwoTransactionCollection() {
AddPartitionsToTxnTopicCollection topics0 = new AddPartitionsToTxnTopicCollection();
topics0.add(new AddPartitionsToTxnTopic()
.setName(TP_0.topic())
.setPartitions(Collections.singletonList(TP_0.partition())));
AddPartitionsToTxnTopicCollection topics1 = new AddPartitionsToTxnTopicCollection();
topics1.add(new AddPartitionsToTxnTopic()
.setName(TP_1.topic())
.setPartitions(Collections.singletonList(TP_1.partition())));
AddPartitionsToTxnTransactionCollection transactions = new AddPartitionsToTxnTransactionCollection();
transactions.add(new AddPartitionsToTxnTransaction()
.setTransactionalId(transactionalId1)
.setProducerId(PRODUCER_ID)
.setProducerEpoch(PRODUCER_EPOCH)
.setVerifyOnly(true)
.setTopics(topics0));
transactions.add(new AddPartitionsToTxnTransaction()
.setTransactionalId(transactionalId2)
.setProducerId(PRODUCER_ID + 1)
.setProducerEpoch((short) (PRODUCER_EPOCH + 1))
.setVerifyOnly(false)
.setTopics(topics1));
return transactions;
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/common/requests/AddPartitionsToTxnRequestTest.java |
/*
* Copyright 2010-2025 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.impl.base.test.cases.symbols
import com.intellij.psi.PsiElement
import org.jetbrains.kotlin.analysis.api.KaSession
import org.jetbrains.kotlin.analysis.api.components.canBeAnalysed
import org.jetbrains.kotlin.analysis.api.components.containingFile
import org.jetbrains.kotlin.analysis.api.impl.base.components.KaBaseIllegalPsiException
import org.jetbrains.kotlin.analysis.api.impl.base.symbols.pointers.KaBaseCachedSymbolPointer.Companion.isCacheable
import org.jetbrains.kotlin.analysis.api.impl.base.symbols.pointers.KaBasePsiSymbolPointer
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.symbols.SymbolTestDirectives.DO_NOT_CHECK_NON_PSI_SYMBOL_RESTORE
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.symbols.SymbolTestDirectives.DO_NOT_CHECK_NON_PSI_SYMBOL_RESTORE_K1
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.symbols.SymbolTestDirectives.DO_NOT_CHECK_NON_PSI_SYMBOL_RESTORE_K2
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.symbols.SymbolTestDirectives.DO_NOT_CHECK_SYMBOL_RESTORE
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.symbols.SymbolTestDirectives.DO_NOT_CHECK_SYMBOL_RESTORE_K1
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.symbols.SymbolTestDirectives.DO_NOT_CHECK_SYMBOL_RESTORE_K2
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.symbols.SymbolTestDirectives.PRETTY_RENDERER_OPTION
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.symbols.SymbolTestDirectives.RENDER_IS_PUBLIC_API
import org.jetbrains.kotlin.analysis.api.renderer.declarations.KaDeclarationRenderer
import org.jetbrains.kotlin.analysis.api.renderer.declarations.impl.KaDeclarationRendererForDebug
import org.jetbrains.kotlin.analysis.api.renderer.declarations.renderers.KaClassifierBodyRenderer
import org.jetbrains.kotlin.analysis.api.renderer.types.KaExpandedTypeRenderingMode
import org.jetbrains.kotlin.analysis.api.renderer.types.renderers.KaFunctionalTypeRenderer
import org.jetbrains.kotlin.analysis.api.symbols.*
import org.jetbrains.kotlin.analysis.api.symbols.pointers.KaSymbolPointer
import org.jetbrains.kotlin.analysis.test.framework.base.AbstractAnalysisApiBasedTest
import org.jetbrains.kotlin.analysis.test.framework.projectStructure.KtTestModule
import org.jetbrains.kotlin.analysis.test.framework.projectStructure.ktTestModuleStructure
import org.jetbrains.kotlin.analysis.test.framework.services.expressionMarkerProvider
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind
import org.jetbrains.kotlin.analysis.test.framework.utils.executeOnPooledThreadInReadAction
import org.jetbrains.kotlin.analysis.test.framework.utils.stripOutSnapshotVersion
import org.jetbrains.kotlin.analysis.utils.printer.prettyPrint
import org.jetbrains.kotlin.psi.*
import org.jetbrains.kotlin.test.directives.model.Directive
import org.jetbrains.kotlin.test.directives.model.DirectivesContainer
import org.jetbrains.kotlin.test.directives.model.RegisteredDirectives
import org.jetbrains.kotlin.test.directives.model.SimpleDirectivesContainer
import org.jetbrains.kotlin.test.services.TestServices
import org.jetbrains.kotlin.test.services.assertions
import org.jetbrains.kotlin.test.services.moduleStructure
import org.jetbrains.kotlin.utils.addIfNotNull
import org.jetbrains.kotlin.utils.addToStdlib.applyIf
import org.jetbrains.kotlin.utils.exceptions.KotlinIllegalArgumentExceptionWithAttachments
import org.jetbrains.kotlin.utils.mapToSetOrEmpty
import org.opentest4j.AssertionFailedError
import java.util.concurrent.ExecutionException
import kotlin.reflect.full.*
import kotlin.reflect.jvm.javaField
import kotlin.test.fail
abstract class AbstractSymbolTest : AbstractAnalysisApiBasedTest() {
/**
* Currently [KaFileSymbol] cannot be restored without a backed PSI element,
* so it is better to suppress it to not hide other problems.
*/
open val suppressPsiBasedFilePointerCheck: Boolean get() = true
open val defaultRenderer = KaDeclarationRendererForDebug.WITH_QUALIFIED_NAMES
open val defaultRendererOption: PrettyRendererOption? = null
override val additionalDirectives: List<DirectivesContainer>
get() = super.additionalDirectives + listOf(SymbolTestDirectives)
abstract fun KaSession.collectSymbols(ktFile: KtFile, testServices: TestServices): SymbolsData
override fun doTestByMainFile(mainFile: KtFile, mainModule: KtTestModule, testServices: TestServices) {
testServices.moduleStructure.allDirectives.suppressIf(
suppressionDirective = SymbolTestDirectives.ILLEGAL_PSI,
filter = Throwable::isIllegalPsiException,
) {
doTestByMainFile(mainFile, mainModule, testServices, disablePsiBasedLogic = false)
doTestByMainFile(mainFile, mainModule, testServices, disablePsiBasedLogic = true)
}
}
private fun doTestByMainFile(
mainFile: KtFile,
mainModule: KtTestModule,
testServices: TestServices,
disablePsiBasedLogic: Boolean,
) {
val markerProvider = testServices.expressionMarkerProvider
val analyzeContext = testServices.ktTestModuleStructure.allMainKtFiles.firstNotNullOfOrNull {
markerProvider.getBottommostElementOfTypeAtCaretOrNull<KtElement>(it, "context")
}
val directives = mainModule.testModule.directives
val directiveToIgnore = directives.doNotCheckNonPsiSymbolRestoreDirective()?.takeIf { disablePsiBasedLogic }
?: directives.doNotCheckSymbolRestoreDirective()
val prettyRenderer = buildList {
addIfNotNull(defaultRendererOption)
addAll(directives[PRETTY_RENDERER_OPTION])
}.fold(defaultRenderer) { acc, prettyRenderingMode ->
prettyRenderingMode.transformation(acc)
}
fun KaSession.safePointer(ktSymbol: KaSymbol): KaSymbolPointer<*>? {
if (disablePsiBasedLogic && ktSymbol is KaFileSymbol && suppressPsiBasedFilePointerCheck) return null
val result = ktSymbol.runCatching {
createPointerForTest(disablePsiBasedLogic = disablePsiBasedLogic)
}
val pointer = when {
directiveToIgnore != null -> result.getOrNull()
else -> result.getOrThrow()
} ?: return null
assertSymbolPointer(pointer, testServices)
return pointer
}
val pointersWithRendered = executeOnPooledThreadInReadAction {
analyzeForTest(analyzeContext ?: mainFile) {
val (symbols, symbolForPrettyRendering) = collectSymbols(mainFile, testServices).also {
if (disablePsiBasedLogic) {
it.dropBackingPsi()
}
}
checkContainingFiles(symbols, mainFile, testServices)
val pointerWithRenderedSymbol = symbols
.asSequence()
.flatMap { symbol ->
sequenceOf(symbol to true) + symbol.withImplicitSymbols().map { implicitSymbol ->
if (disablePsiBasedLogic) {
implicitSymbol.dropBackingPsi()
}
implicitSymbol to false
}
}
.distinctBy { it.first }
.map { (symbol, shouldBeRendered) ->
PointerWithRenderedSymbol(
pointer = safePointer(symbol),
rendered = renderSymbolForComparison(symbol, directives),
shouldBeRendered = shouldBeRendered,
)
}
.toList()
val pointerWithPrettyRenderedSymbol = symbolForPrettyRendering.map { symbol ->
PointerWithRenderedSymbol(
safePointer(symbol),
when (symbol) {
is KaReceiverParameterSymbol -> KaDebugRenderer().render(useSiteSession, symbol)
is KaDeclarationSymbol -> symbol.render(prettyRenderer)
is KaFileSymbol -> prettyPrint {
printCollection(symbol.fileScope.declarations.asIterable(), separator = "\n\n") {
append(it.render(prettyRenderer))
}
}
else -> error(symbol::class.toString())
},
)
}
SymbolPointersData(pointerWithRenderedSymbol, pointerWithPrettyRenderedSymbol)
}
}
compareResults(pointersWithRendered, testServices, disablePsiBasedLogic)
configurator.doGlobalModuleStateModification(mainFile.project)
restoreSymbolsInOtherReadActionAndCompareResults(
directiveToIgnore = directiveToIgnore,
ktFile = mainFile,
pointersWithRendered = pointersWithRendered.pointers,
testServices = testServices,
directives = directives,
disablePsiBasedLogic = disablePsiBasedLogic,
analyzeContext = analyzeContext,
)
}
private fun KaSymbol.createPointerForTest(disablePsiBasedLogic: Boolean): KaSymbolPointer<*> =
KaBasePsiSymbolPointer.withDisabledPsiBasedPointers(disable = disablePsiBasedLogic) { createPointer() }
private fun assertSymbolPointer(pointer: KaSymbolPointer<*>, testServices: TestServices) {
testServices.assertions.assertTrue(value = pointer.pointsToTheSameSymbolAs(pointer)) {
"The symbol is not equal to itself: ${pointer::class}"
}
}
context(_: KaSession)
private fun checkContainingFiles(symbols: List<KaSymbol>, mainFile: KtFile, testServices: TestServices) {
val allowedContainingFileSymbols = getAllowedContainingFiles(mainFile, testServices).mapToSetOrEmpty {
it.takeIf { it.canBeAnalysed() }?.symbol
}
for (symbol in symbols) {
if (symbol.origin != KaSymbolOrigin.SOURCE) continue
val containingFileSymbol = symbol.containingFile
when {
symbol is KaFileSymbol -> {
testServices.assertions.assertEquals(null, containingFileSymbol) {
"'containingFile' for ${KaFileSymbol::class.simpleName} should be 'null'"
}
}
containingFileSymbol !in allowedContainingFileSymbols -> {
testServices.assertions.fail {
"Invalid file for `$symbol`: Found `$containingFileSymbol`, which is not an allowed file symbol."
}
}
}
}
}
/**
* Returns the set of [KtFile]s which may contain any of the found symbols. If a symbol is not contained in one of these files, the test
* fails.
*/
open fun getAllowedContainingFiles(mainFile: KtFile, testServices: TestServices): Set<KtFile> = setOf(mainFile)
private fun RegisteredDirectives.doNotCheckSymbolRestoreDirective(): Directive? = findSpecificDirective(
commonDirective = DO_NOT_CHECK_SYMBOL_RESTORE,
k1Directive = DO_NOT_CHECK_SYMBOL_RESTORE_K1,
k2Directive = DO_NOT_CHECK_SYMBOL_RESTORE_K2,
)
private fun RegisteredDirectives.doNotCheckNonPsiSymbolRestoreDirective(): Directive? = findSpecificDirective(
commonDirective = DO_NOT_CHECK_NON_PSI_SYMBOL_RESTORE,
k1Directive = DO_NOT_CHECK_NON_PSI_SYMBOL_RESTORE_K1,
k2Directive = DO_NOT_CHECK_NON_PSI_SYMBOL_RESTORE_K2,
)
private fun compareResults(
data: SymbolPointersData,
testServices: TestServices,
disablePsiBasedLogic: Boolean,
) {
val actual = data.pointers.renderDeclarations()
compareResults(actual, testServices, disablePsiBasedLogic, extension = "txt")
val actualPretty = data.pointersForPrettyRendering.renderDeclarations()
compareResults(actualPretty, testServices, disablePsiBasedLogic, extension = "pretty.txt")
}
private fun compareResults(actual: String, testServices: TestServices, disablePsiBasedLogic: Boolean, extension: String) {
val assertions = testServices.assertions
if (!disablePsiBasedLogic) {
assertions.assertEqualsToTestOutputFile(actual = actual, extension = extension)
} else {
val expectedFile = getTestOutputFile(extension).toFile()
val nonPsiExpectedFile = getTestOutputFile("nonPsi.$extension").toFile()
when {
assertions.doesEqualToFile(expectedFile, actual) -> {
if (nonPsiExpectedFile.exists() &&
configurator.frontendKind == FrontendKind.Fir &&
configurator.analysisApiMode == AnalysisApiMode.Ide
) {
throw AssertionError("'${nonPsiExpectedFile.path}' should be removed as the actual output is the same as '${expectedFile.path}'")
}
}
else -> {
if (nonPsiExpectedFile.exists() && configurator.frontendKind == FrontendKind.Fir) {
assertions.assertEqualsToFile(nonPsiExpectedFile, actual)
return
}
val message = """
Non-PSI version doesn't equal to the PSI-based variation.
If you want to commit both results, please add a separate file "${expectedFile.nameWithoutExtension}.nonPsi.txt".
""".trimIndent()
throw AssertionFailedError(
/* message = */ message,
/* expected = */ expectedFile.readText(),
/* actual = */ actual,
)
}
}
}
}
private fun List<PointerWithRenderedSymbol>.renderDeclarations(): String =
mapNotNull { it.rendered.takeIf { _ -> it.shouldBeRendered } }
.renderAsDeclarations()
.applyIf(configurator.frontendKind == FrontendKind.Fe10) { stripOutSnapshotVersion() }
private fun List<String>.renderAsDeclarations(): String =
if (isEmpty()) "NO_SYMBOLS"
else joinToString(separator = "\n\n")
private fun restoreSymbolsInOtherReadActionAndCompareResults(
directiveToIgnore: Directive?,
ktFile: KtFile,
pointersWithRendered: List<PointerWithRenderedSymbol>,
testServices: TestServices,
directives: RegisteredDirectives,
disablePsiBasedLogic: Boolean,
analyzeContext: KtElement?,
) {
var failed = false
val restoredPointers = mutableListOf<KaSymbolPointer<*>>()
try {
val restored = analyzeForTest(analyzeContext ?: ktFile) {
pointersWithRendered.mapNotNull { (pointer, expectedRender, shouldBeRendered) ->
val pointer = pointer ?: error("Symbol pointer was not created for symbol:\n$expectedRender")
val restored = restoreSymbol(pointer, disablePsiBasedLogic) ?: error("Symbol was not restored:\n$expectedRender")
restoredPointers += pointer
val actualRender = renderSymbolForComparison(restored, directives)
if (shouldBeRendered) {
actualRender
} else {
testServices.assertions.assertEquals(expectedRender, actualRender) { "${restored::class}" }
null
}
}
}
val actual = restored.renderAsDeclarations()
val expectedFile = getTestOutputFile().toFile()
if (!testServices.assertions.doesEqualToFile(expectedFile, actual)) {
error("Restored content is not the same. Actual:\n$actual")
}
} catch (e: Throwable) {
if (directiveToIgnore == null) throw e
failed = true
}
if (!failed) {
compareCachedSymbols(restoredPointers, testServices, ktFile, disablePsiBasedLogic, analyzeContext)
compareRestoredSymbols(restoredPointers, testServices, ktFile, disablePsiBasedLogic, analyzeContext)
}
// Do not fail for standalone as the IDE mode may have different behavior and it is primary
if (failed || directiveToIgnore == null || configurator.analysisApiMode == AnalysisApiMode.Standalone) return
fail("'// ${directiveToIgnore.name}' directive has no effect on the test")
}
private fun compareCachedSymbols(
pointers: List<KaSymbolPointer<*>>,
testServices: TestServices,
ktFile: KtFile,
disablePsiBasedLogic: Boolean,
analyzeContext: KtElement?,
) {
if (pointers.isEmpty()) return
val contextElement = analyzeContext ?: ktFile
analyzeForTest(contextElement) {
pointers.forEach { pointer ->
val firstRestore =
restoreSymbol(pointer, disablePsiBasedLogic)
?: error("Unexpectedly non-restored symbol pointer: ${contextElement::class}")
val secondRestore =
restoreSymbol(pointer, disablePsiBasedLogic)
?: error("Unexpectedly non-restored symbol pointer: ${contextElement::class}")
if (firstRestore.isCacheable) {
testServices.assertions.assertTrue(firstRestore === secondRestore) {
"${pointer::class} does not support symbol caching"
}
}
}
}
}
private fun compareRestoredSymbols(
restoredPointers: List<KaSymbolPointer<*>>,
testServices: TestServices,
ktFile: KtFile,
disablePsiBasedLogic: Boolean,
analyzeContext: KtElement?,
) {
if (restoredPointers.isEmpty()) return
analyzeForTest(analyzeContext ?: ktFile) {
val symbolsToPointersMap = restoredPointers.groupByTo(mutableMapOf()) {
restoreSymbol(it, disablePsiBasedLogic) ?: error("Unexpectedly non-restored symbol pointer: ${it::class}")
}
val pointersToCheck = symbolsToPointersMap.map { (key, value) ->
value += key.createPointerForTest(disablePsiBasedLogic = disablePsiBasedLogic)
value
}
for (pointers in pointersToCheck) {
for (firstPointer in pointers) {
for (secondPointer in pointers) {
testServices.assertions.assertTrue(firstPointer.pointsToTheSameSymbolAs(secondPointer)) {
"${firstPointer::class} is not the same as ${secondPointer::class}"
}
}
}
}
}
}
protected open fun KaSession.renderSymbolForComparison(symbol: KaSymbol, directives: RegisteredDirectives): String {
val renderer = KaDebugRenderer(
renderExtra = true,
renderExpandedTypes = directives[PRETTY_RENDERER_OPTION].any { it == PrettyRendererOption.FULLY_EXPANDED_TYPES },
renderIsPublicApi = RENDER_IS_PUBLIC_API in directives
)
return with(renderer) { render(useSiteSession, symbol) }
}
}
object SymbolTestDirectives : SimpleDirectivesContainer() {
val DO_NOT_CHECK_SYMBOL_RESTORE by directive(
description = "Symbol restoring for some symbols in current test is not supported yet",
)
val DO_NOT_CHECK_SYMBOL_RESTORE_K1 by directive(
description = "Symbol restoring for some symbols in current test is not supported yet in K1",
)
val DO_NOT_CHECK_SYMBOL_RESTORE_K2 by directive(
description = "Symbol restoring for some symbols in current test is not supported yet in K2",
)
val DO_NOT_CHECK_NON_PSI_SYMBOL_RESTORE by directive(
description = "Symbol restoring w/o psi for some symbols in current test is not supported yet",
)
val DO_NOT_CHECK_NON_PSI_SYMBOL_RESTORE_K1 by directive(
description = "Symbol restoring w/o psi for some symbols in current test is not supported yet in K1",
)
val DO_NOT_CHECK_NON_PSI_SYMBOL_RESTORE_K2 by directive(
description = "Symbol restoring w/o psi for some symbols in current test is not supported yet in K2",
)
val PRETTY_RENDERER_OPTION by enumDirective(description = "Explicit rendering mode") { PrettyRendererOption.valueOf(it) }
val TARGET_FILE_NAME by stringDirective(description = "The name of the main file")
val ILLEGAL_PSI by stringDirective(description = "Symbol should not be created for this PSI element")
val RENDER_IS_PUBLIC_API by directive(description = "Render `isPublicApi` attribute for symbols")
}
enum class PrettyRendererOption(val transformation: (KaDeclarationRenderer) -> KaDeclarationRenderer) {
BODY_WITH_MEMBERS(
{ renderer ->
renderer.with {
classifierBodyRenderer = KaClassifierBodyRenderer.BODY_WITH_MEMBERS
}
}
),
FULLY_EXPANDED_TYPES(
{ renderer ->
renderer.with {
typeRenderer = typeRenderer.with {
expandedTypeRenderingMode = KaExpandedTypeRenderingMode.RENDER_EXPANDED_TYPE
functionalTypeRenderer = KaFunctionalTypeRenderer.AS_CLASS_TYPE_FOR_REFLECTION_TYPES_WITH_PARAMETER_NAMES
}
}
}
)
}
internal val KtDeclaration.isValidForSymbolCreation
get() = when (this) {
is KtBackingField -> false
is KtDestructuringDeclaration -> false
is KtPropertyAccessor -> false
is KtParameter -> !isFunctionTypeParameter && ownerDeclaration == null
is KtNamedFunction -> name != null
else -> true
}
data class SymbolsData(
val symbols: List<KaSymbol>,
val symbolsForPrettyRendering: List<KaSymbol> = symbols,
)
private data class SymbolPointersData(
val pointers: List<PointerWithRenderedSymbol>,
val pointersForPrettyRendering: List<PointerWithRenderedSymbol>,
)
private data class PointerWithRenderedSymbol(
val pointer: KaSymbolPointer<*>?,
val rendered: String,
val shouldBeRendered: Boolean = true,
)
private fun KaSymbol?.withImplicitSymbols(): Sequence<KaSymbol> {
val ktSymbol = this ?: return emptySequence()
return sequence {
yield(ktSymbol)
if (ktSymbol is KaDeclarationSymbol) {
for (parameter in ktSymbol.typeParameters) {
yieldAll(parameter.withImplicitSymbols())
}
}
if (ktSymbol is KaCallableSymbol) {
for (parameter in ktSymbol.contextParameters) {
yieldAll(parameter.withImplicitSymbols())
}
yieldAll(ktSymbol.receiverParameter.withImplicitSymbols())
}
if (ktSymbol is KaPropertySymbol) {
yieldAll(ktSymbol.getter.withImplicitSymbols())
yieldAll(ktSymbol.setter.withImplicitSymbols())
}
if (ktSymbol is KaFunctionSymbol) {
for (parameter in ktSymbol.valueParameters) {
yieldAll(parameter.withImplicitSymbols())
}
}
if (ktSymbol is KaValueParameterSymbol) {
yieldAll(ktSymbol.generatedPrimaryConstructorProperty.withImplicitSymbols())
}
}
}
private fun <S : KaSymbol> KaSession.restoreSymbol(pointer: KaSymbolPointer<S>, disablePsiBasedLogic: Boolean): S? {
val symbol = pointer.restoreSymbol() ?: return null
if (disablePsiBasedLogic) {
symbol.dropBackingPsi()
}
return symbol
}
private fun SymbolsData.dropBackingPsi() {
symbols.forEach(KaSymbol::dropBackingPsi)
symbolsForPrettyRendering.forEach(KaSymbol::dropBackingPsi)
}
/**
* Some K2 implementations of [KaSymbol] is backed by some [PsiElement],
* so they may implement some API on top of PSI, FirSymbols or both of them.
*
* FirSymbol-based implementation is the source of truth, so the PSI-based implementation
* exists to cover simple cases.
*
* As most of the symbols have the underlying PSI element, it is crucial to
* have consistent implementation for PSI-based and FirSymbol-based symbols.
*/
private fun KaSymbol.dropBackingPsi() {
val interfaceInstance = Class.forName("org.jetbrains.kotlin.analysis.api.fir.symbols.KaFirPsiSymbol")
val symbolType = KaSymbol::class.createType()
val thisClass = this::class
for (property in thisClass.declaredMemberProperties) {
// Some symbols may have owning symbols, so they should be invalidated as well
if (!property.name.startsWith("owning") || !property.returnType.isSubtypeOf(symbolType)) continue
val symbol = property.getter.call(this) as KaSymbol
symbol.dropBackingPsi()
}
if (!interfaceInstance.isInstance(this)) return
when (thisClass.simpleName) {
// Those classes are PSI-based only, so they have FirSymbol only for the compatibility with other classes
"KaFirPsiJavaClassSymbol",
"KaFirPsiJavaTypeParameterSymbol",
-> return
// There classes depend on the property PSI. The owning property is already invalidated above
"KaFirDefaultPropertyGetterSymbol",
"KaFirDefaultPropertySetterSymbol",
"KaFirPropertyGetterSymbol",
"KaFirPropertySetterSymbol",
-> return
}
val property = thisClass.memberProperties.single { it.name == "backingPsi" }
val returnType = property.returnType
if (!returnType.isSubtypeOf(PsiElement::class.createType().withNullability(true))) {
error("Unexpected return type '$returnType' for '${this::class.simpleName}' class")
}
val field = property.javaField ?: error("Backing field is not found")
field.isAccessible = true
// Drop backing PSI to trigger non-psi implementation
field.set(this, null)
}
private val Throwable.isIllegalPsiException: Boolean
get() = when (this) {
is KaBaseIllegalPsiException -> true
is ExecutionException -> cause?.isIllegalPsiException == true
is KotlinIllegalArgumentExceptionWithAttachments -> {
message?.startsWith("Creating ${KaVariableSymbol::class.simpleName} for function type parameter is not possible.") == true
}
else -> false
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-impl-base/testFixtures/org/jetbrains/kotlin/analysis/api/impl/base/test/cases/symbols/AbstractSymbolTest.kt |
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
class product_product(osv.osv):
_name = 'product.product'
_inherit = 'product.product'
def compute_price(self, cr, uid, ids, recursive=False, test=False, real_time_accounting = False, context=None):
'''
Will return test dict when the test = False
Multiple ids at once?
testdict is used to inform the user about the changes to be made
'''
testdict = {}
for prod_id in ids:
bom_obj = self.pool.get('mrp.bom')
bom_id = bom_obj._bom_find(cr, uid, product_id = prod_id, context=context)
if bom_id:
# In recursive mode, it will first compute the prices of child boms
if recursive:
#Search the products that are components of this bom of prod_id
bom = bom_obj.browse(cr, uid, bom_id, context=context)
#Call compute_price on these subproducts
prod_set = set([x.product_id.id for x in bom.bom_line_ids])
res = self.compute_price(cr, uid, list(prod_set), recursive=recursive, test=test, real_time_accounting = real_time_accounting, context=context)
if test:
testdict.update(res)
#Use calc price to calculate and put the price on the product of the BoM if necessary
price = self._calc_price(cr, uid, bom_obj.browse(cr, uid, bom_id, context=context), test=test, real_time_accounting = real_time_accounting, context=context)
if test:
testdict.update({prod_id : price})
if test:
return testdict
else:
return True
def _calc_price(self, cr, uid, bom, test = False, real_time_accounting=False, context=None):
if context is None:
context={}
price = 0
uom_obj = self.pool.get("product.uom")
tmpl_obj = self.pool.get('product.template')
for sbom in bom.bom_line_ids:
my_qty = sbom.product_qty
price += uom_obj._compute_price(cr, uid, sbom.product_id.uom_id.id, sbom.product_id.standard_price, sbom.product_uom.id) * my_qty
if bom.routing_id:
for wline in bom.routing_id.workcenter_lines:
wc = wline.workcenter_id
cycle = wline.cycle_nbr
hour = (wc.time_start + wc.time_stop + cycle * wc.time_cycle) * (wc.time_efficiency or 1.0)
price += wc.costs_cycle * cycle + wc.costs_hour * hour
price = self.pool.get('product.uom')._compute_price(cr,uid,bom.product_uom.id, price, bom.product_id.uom_id.id)
#Convert on product UoM quantities
if price > 0:
price = uom_obj._compute_price(cr, uid, bom.product_uom.id, price / bom.product_qty, bom.product_id.uom_id.id)
product = tmpl_obj.browse(cr, uid, bom.product_tmpl_id.id, context=context)
if not test:
if (product.valuation != "real_time" or not real_time_accounting):
tmpl_obj.write(cr, uid, [product.id], {'standard_price' : price}, context=context)
else:
#Call wizard function here
wizard_obj = self.pool.get("stock.change.standard.price")
ctx = context.copy()
ctx.update({'active_id': product.id, 'active_model': 'product.template'})
wiz_id = wizard_obj.create(cr, uid, {'new_price': price}, context=ctx)
wizard_obj.change_price(cr, uid, [wiz_id], context=ctx)
return price
product_product()
class product_bom(osv.osv):
_inherit = 'mrp.bom'
_columns = {
'standard_price': fields.related('product_tmpl_id','standard_price',type="float",relation="product.product",string="Standard Price",store=False)
}
product_bom()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#import StellarMass
import XrayLuminosity
import numpy as n
from scipy.stats import norm
from scipy.integrate import quad
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import glob
import astropy.io.fits as fits
import os
import time
import numpy as n
import sys
print " set up box, and redshift "
#MD 1 hlist_0.74980_SAM_Nb_0.fits
#MD 25 hlist_0.75440_SAM_Nb_10.fits
#duty_cycle = 0.01
bins = n.arange(6,13,0.1)
xb = (bins[1:] + bins[:-1]) / 2.
def measureSMF(snap_name, env='MD10', volume=1000.**3., out_dir="../"):
fileList = n.array(glob.glob(os.path.join(os.environ[env], "work_agn", "out_"+snap_name+"_SAM_Nb_*_Ms.fits")))
fileList.sort()
print fileList
Hall = n.zeros((len(fileList), len(bins)-1))
for ii, fileN in enumerate(fileList):
print fileN
hh = fits.open(fileN)
mass = hh[1].data['stellar_mass_Mo13_mvir']
print mass
selection = (mass>0) # (hh[1].data['stellar_mass_reliable'])&(mass>0)
Hall[ii], bb = n.histogram(hh[1].data['stellar_mass_Mo13_mvir'], bins=bins)
counts = n.sum(Hall, axis=0)
dN_dVdlogM = counts*0.6777**3./(bins[1:]-bins[:-1])/volume/n.log(10)
data = n.transpose([bins[:-1], bins[1:], counts, dN_dVdlogM ])
n.savetxt(os.path.join(out_dir, "out_"+snap_name+"_SMF.txt"), data, header = "logMs_low logMs_up counts dN_dVdlogM")
def measureSMF_tracer(snap_name, tracer_name, env='MD10', volume=1000.**3., out_dir="../"):
out_file = os.path.join(out_dir, "out_"+snap_name+"_"+tracer_name+"_SMF.txt")
#if os.path.isfile(out_file)==False:
fileList = n.array(glob.glob(os.path.join(os.environ[env], "work_agn", "out_"+snap_name+"_SAM_Nb_*_Ms.fits")))
fileList.sort()
fileList_T = n.array(glob.glob(os.path.join(os.environ[env], "work_agn", "out_"+snap_name+"_SAM_Nb_*_"+tracer_name+".fits")))
fileList_T.sort()
tracer_name
print fileList, fileList_T
if len(fileList_T)==len(fileList):
Hall = n.zeros((len(fileList), len(bins)-1))
for ii, fileN in enumerate(fileList):
print fileN
hh = fits.open(fileN)
lines = fits.open(fileList_T[ii])[1].data['line_number']
mass = hh[1].data['stellar_mass_Mo13_mvir'][lines]
Hall[ii], bb = n.histogram(mass, bins=bins)
counts = n.sum(Hall, axis=0)
dN_dVdlogM = counts*0.6777**3./(bins[1:]-bins[:-1])/volume/n.log(10)
data = n.transpose([bins[:-1], bins[1:], counts, dN_dVdlogM ])
n.savetxt(out_file, data, header = "logMs_low logMs_up counts dN_dVdlogM")
# open the output file_type
summ = fits.open(os.path.join(os.environ["MD10"], 'output_MD_1.0Gpc.fits'))[1].data
out_dir = os.path.join(os.path.join(os.environ['MD10'], "duty_cycle"))
for el in summ[::-1]:
print el
measureSMF(snap_name=el["snap_name"], env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S5_BCG", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S5_GAL", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S6_AGN", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S8_BG1", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S8_BG2", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S8_ELG", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S8_QSO", env='MD10', volume=1000.**3., out_dir = out_dir) | unknown | codeparrot/codeparrot-clean | ||
"""
Tests of the XBlock-family functionality mixins
"""
from unittest import TestCase
from xblock.fields import List, Scope, Integer
from xblock.mixins import ScopedStorageMixin, HierarchyMixin, IndexInfoMixin, ViewsMixin
class AttrAssertionMixin(TestCase):
"""
A mixin to add attribute assertion methods to TestCases.
"""
def assertHasAttr(self, obj, attr):
"Assert that `obj` has the attribute named `attr`."
self.assertTrue(hasattr(obj, attr), "{!r} doesn't have attribute {!r}".format(obj, attr))
def assertNotHasAttr(self, obj, attr):
"Assert that `obj` doesn't have the attribute named `attr`."
self.assertFalse(hasattr(obj, attr), "{!r} has attribute {!r}".format(obj, attr))
class TestScopedStorageMixin(AttrAssertionMixin, TestCase):
"Tests of the ScopedStorageMixin."
class ScopedStorageMixinTester(ScopedStorageMixin):
"""Toy class for ScopedStorageMixin testing"""
field_a = Integer(scope=Scope.settings)
field_b = Integer(scope=Scope.content)
class ChildClass(ScopedStorageMixinTester):
"""Toy class for ModelMetaclass testing"""
pass
class FieldsMixin(object):
"""Toy mixin for field testing"""
field_c = Integer(scope=Scope.settings)
class MixinChildClass(FieldsMixin, ScopedStorageMixinTester):
"""Toy class for ScopedStorageMixin testing with mixed-in fields"""
pass
class MixinGrandchildClass(MixinChildClass):
"""Toy class for ScopedStorageMixin testing with inherited mixed-in fields"""
pass
def test_scoped_storage_mixin(self):
# `ModelMetaclassTester` and `ChildClass` both obtain the `fields` attribute
# from the `ModelMetaclass`. Since this is not understood by static analysis,
# silence this error for the duration of this test.
# pylint: disable=E1101
self.assertIsNot(self.ScopedStorageMixinTester.fields, self.ChildClass.fields)
self.assertHasAttr(self.ScopedStorageMixinTester, 'field_a')
self.assertHasAttr(self.ScopedStorageMixinTester, 'field_b')
self.assertIs(self.ScopedStorageMixinTester.field_a, self.ScopedStorageMixinTester.fields['field_a'])
self.assertIs(self.ScopedStorageMixinTester.field_b, self.ScopedStorageMixinTester.fields['field_b'])
self.assertHasAttr(self.ChildClass, 'field_a')
self.assertHasAttr(self.ChildClass, 'field_b')
self.assertIs(self.ChildClass.field_a, self.ChildClass.fields['field_a'])
self.assertIs(self.ChildClass.field_b, self.ChildClass.fields['field_b'])
def test_with_mixins(self):
# Testing model metaclass with mixins
# `MixinChildClass` and `MixinGrandchildClass` both obtain the `fields` attribute
# from the `ScopedStorageMixin`. Since this is not understood by static analysis,
# silence this error for the duration of this test.
# pylint: disable=E1101
self.assertHasAttr(self.MixinChildClass, 'field_a')
self.assertHasAttr(self.MixinChildClass, 'field_c')
self.assertIs(self.MixinChildClass.field_a, self.MixinChildClass.fields['field_a'])
self.assertIs(self.FieldsMixin.field_c, self.MixinChildClass.fields['field_c'])
self.assertHasAttr(self.MixinGrandchildClass, 'field_a')
self.assertHasAttr(self.MixinGrandchildClass, 'field_c')
self.assertIs(self.MixinGrandchildClass.field_a, self.MixinGrandchildClass.fields['field_a'])
self.assertIs(self.MixinGrandchildClass.field_c, self.MixinGrandchildClass.fields['field_c'])
class TestHierarchyMixin(AttrAssertionMixin, TestCase):
"Tests of the HierarchyMixin."
class HasChildren(HierarchyMixin):
"""Toy class for ChildrenModelMetaclass testing"""
has_children = True
class WithoutChildren(HierarchyMixin):
"""Toy class for ChildrenModelMetaclass testing"""
pass
class InheritedChildren(HasChildren):
"""Toy class for ChildrenModelMetaclass testing"""
pass
def test_children_metaclass(self):
# `HasChildren` and `WithoutChildren` both obtain the `children` attribute and
# the `has_children` method from the `ChildrenModelMetaclass`. Since this is not
# understood by static analysis, silence this error for the duration of this test.
# pylint: disable=E1101
self.assertTrue(self.HasChildren.has_children)
self.assertFalse(self.WithoutChildren.has_children)
self.assertTrue(self.InheritedChildren.has_children)
self.assertHasAttr(self.HasChildren, 'children')
self.assertNotHasAttr(self.WithoutChildren, 'children')
self.assertHasAttr(self.InheritedChildren, 'children')
self.assertIsInstance(self.HasChildren.children, List)
self.assertEqual(Scope.children, self.HasChildren.children.scope)
self.assertIsInstance(self.InheritedChildren.children, List)
self.assertEqual(Scope.children, self.InheritedChildren.children.scope)
class TestIndexInfoMixin(AttrAssertionMixin):
"""
Tests for Index
"""
class IndexInfoMixinTester(IndexInfoMixin):
"""Test class for index mixin"""
pass
def test_index_info(self):
self.assertHasAttr(self.IndexInfoMixinTester, 'index_dictionary')
with_index_info = self.IndexInfoMixinTester().index_dictionary()
self.assertFalse(with_index_info)
self.assertTrue(isinstance(with_index_info, dict))
class TestViewsMixin(TestCase):
"""
Tests for ViewsMixin
"""
def test_supports_view_decorator(self):
"""
Tests the @supports decorator for xBlock view methods
"""
class SupportsDecoratorTester(ViewsMixin):
"""
Test class for @supports decorator
"""
@ViewsMixin.supports("a_functionality")
def functionality_supported_view(self):
"""
A view that supports a functionality
"""
pass # pragma: no cover
@ViewsMixin.supports("functionality1", "functionality2")
def multi_featured_view(self):
"""
A view that supports multiple functionalities
"""
pass # pragma: no cover
def an_unsupported_view(self):
"""
A view that does not support any functionality
"""
pass # pragma: no cover
test_xblock = SupportsDecoratorTester()
for view_name, functionality, expected_result in (
("functionality_supported_view", "a_functionality", True),
("functionality_supported_view", "bogus_functionality", False),
("functionality_supported_view", None, False),
("an_unsupported_view", "a_functionality", False),
("multi_featured_view", "functionality1", True),
("multi_featured_view", "functionality2", True),
("multi_featured_view", "bogus_functionality", False),
):
self.assertEquals(
test_xblock.has_support(getattr(test_xblock, view_name), functionality),
expected_result
)
def test_has_support_override(self):
"""
Tests overriding has_support
"""
class HasSupportOverrideTester(ViewsMixin):
"""
Test class for overriding has_support
"""
def has_support(self, view, functionality):
"""
Overrides implementation of has_support
"""
return functionality == "a_functionality"
test_xblock = HasSupportOverrideTester()
for view_name, functionality, expected_result in (
("functionality_supported_view", "a_functionality", True),
("functionality_supported_view", "bogus_functionality", False),
):
self.assertEquals(
test_xblock.has_support(getattr(test_xblock, view_name, None), functionality),
expected_result
) | unknown | codeparrot/codeparrot-clean | ||
use once_cell::sync::Lazy;
use turbopack_trace_utils::tracing_presets::{
TRACING_OVERVIEW_TARGETS, TRACING_TURBO_TASKS_TARGETS, TRACING_TURBOPACK_TARGETS,
};
pub static TRACING_NEXT_OVERVIEW_TARGETS: Lazy<Vec<&str>> = Lazy::new(|| {
[
&TRACING_OVERVIEW_TARGETS[..],
&[
"next_napi_bindings=info",
"next_swc=info",
"next_api=info",
"next_dev=info",
"next_core=info",
"next_font=info",
"turbopack_node=info",
],
]
.concat()
});
pub static TRACING_NEXT_TARGETS: Lazy<Vec<&str>> = Lazy::new(|| {
[
&TRACING_NEXT_OVERVIEW_TARGETS[..],
&[
"next_napi_bindings=trace",
"next_swc=trace",
"next_api=trace",
"next_dev=trace",
"next_core=trace",
"next_font=trace",
],
]
.concat()
});
pub static TRACING_NEXT_TURBOPACK_TARGETS: Lazy<Vec<&str>> =
Lazy::new(|| [&TRACING_NEXT_TARGETS[..], &TRACING_TURBOPACK_TARGETS[..]].concat());
pub static TRACING_NEXT_TURBO_TASKS_TARGETS: Lazy<Vec<&str>> = Lazy::new(|| {
[
&TRACING_NEXT_TURBOPACK_TARGETS[..],
&TRACING_TURBO_TASKS_TARGETS[..],
]
.concat()
}); | rust | github | https://github.com/vercel/next.js | crates/next-core/src/tracing_presets.rs |
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
########################################################
import datetime
import os
import platform
import random
import shutil
import socket
import sys
import time
from ansible.errors import AnsibleOptionsError
from ansible.cli import CLI
from ansible.plugins import module_loader
from ansible.utils.cmd_functions import run_cmd
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
########################################################
class PullCLI(CLI):
''' code behind ansible ad-hoc cli'''
DEFAULT_REPO_TYPE = 'git'
DEFAULT_PLAYBOOK = 'local.yml'
PLAYBOOK_ERRORS = {
1: 'File does not exist',
2: 'File is not readable'
}
SUPPORTED_REPO_MODULES = ['git']
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage='%prog -U <repository> [options]',
connect_opts=True,
vault_opts=True,
runtask_opts=True,
subset_opts=True,
inventory_opts=True,
module_opts=True,
runas_prompt_opts=True,
)
# options unique to pull
self.parser.add_option('--purge', default=False, action='store_true',
help='purge checkout after playbook run')
self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
help='only run the playbook if the repository has been updated')
self.parser.add_option('-s', '--sleep', dest='sleep', default=None,
help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests')
self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
help='run the playbook even if the repository could not be updated')
self.parser.add_option('-d', '--directory', dest='dest', default=None,
help='directory to checkout repository to')
self.parser.add_option('-U', '--url', dest='url', default=None,
help='URL of the playbook repository')
self.parser.add_option('--full', dest='fullclone', action='store_true',
help='Do a full clone, instead of a shallow one.')
self.parser.add_option('-C', '--checkout', dest='checkout',
help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.')
self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
help='adds the hostkey for the repo url if not already added')
self.parser.add_option('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
help='Repository module name, which ansible will use to check out the repo. Default is %s.' % self.DEFAULT_REPO_TYPE)
self.parser.add_option('--verify-commit', dest='verify', default=False, action='store_true',
help='verify GPG signature of checked out commit, if it fails abort running the playbook.'
' This needs the corresponding VCS module to support such an operation')
self.parser.add_option('--clean', dest='clean', default=False, action='store_true',
help='modified files in the working repository will be discarded')
self.parser.add_option('--track-subs', dest='tracksubs', default=False, action='store_true',
help='submodules will track the latest changes'
' This is equivalent to specifying the --remote flag to git submodule update')
# for pull we don't wan't a default
self.parser.set_defaults(inventory=None)
self.options, self.args = self.parser.parse_args(self.args[1:])
if not self.options.dest:
hostname = socket.getfqdn()
# use a hostname dependent directory, in case of $HOME on nfs
self.options.dest = os.path.join('~/.ansible/pull', hostname)
self.options.dest = os.path.expandvars(os.path.expanduser(self.options.dest))
if self.options.sleep:
try:
secs = random.randint(0,int(self.options.sleep))
self.options.sleep = secs
except ValueError:
raise AnsibleOptionsError("%s is not a number." % self.options.sleep)
if not self.options.url:
raise AnsibleOptionsError("URL for repository not specified, use -h for help")
if self.options.module_name not in self.SUPPORTED_REPO_MODULES:
raise AnsibleOptionsError("Unsuported repo module %s, choices are %s" % (self.options.module_name, ','.join(self.SUPPORTED_REPO_MODULES)))
display.verbosity = self.options.verbosity
self.validate_conflicts(vault_opts=True)
def run(self):
''' use Runner lib to do SSH things '''
super(PullCLI, self).run()
# log command line
now = datetime.datetime.now()
display.display(now.strftime("Starting Ansible Pull at %F %T"))
display.display(' '.join(sys.argv))
# Build Checkout command
# Now construct the ansible command
node = platform.node()
host = socket.getfqdn()
limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]]))
base_opts = '-c local '
if self.options.verbosity > 0:
base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ])
# Attempt to use the inventory passed in as an argument
# It might not yet have been downloaded so use localhost as default
if not self.options.inventory or ( ',' not in self.options.inventory and not os.path.exists(self.options.inventory)):
inv_opts = 'localhost,'
else:
inv_opts = self.options.inventory
#FIXME: enable more repo modules hg/svn?
if self.options.module_name == 'git':
repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest)
if self.options.checkout:
repo_opts += ' version=%s' % self.options.checkout
if self.options.accept_host_key:
repo_opts += ' accept_hostkey=yes'
if self.options.private_key_file:
repo_opts += ' key_file=%s' % self.options.private_key_file
if self.options.verify:
repo_opts += ' verify_commit=yes'
if self.options.clean:
repo_opts += ' force=yes'
if self.options.tracksubs:
repo_opts += ' track_submodules=yes'
if not self.options.fullclone:
repo_opts += ' depth=1'
path = module_loader.find_plugin(self.options.module_name)
if path is None:
raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name))
bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
# hardcode local and inventory/host as this is just meant to fetch the repo
cmd = '%s/ansible -i "%s" %s -m %s -a "%s" all -l "%s"' % (bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts)
for ev in self.options.extra_vars:
cmd += ' -e "%s"' % ev
# Nap?
if self.options.sleep:
display.display("Sleeping for %d seconds..." % self.options.sleep)
time.sleep(self.options.sleep)
# RUN the Checkout command
display.debug("running ansible with VCS module to checkout repo")
display.vvvv('EXEC: %s' % cmd)
rc, out, err = run_cmd(cmd, live=True)
if rc != 0:
if self.options.force:
display.warning("Unable to update repository. Continuing with (forced) run of playbook.")
else:
return rc
elif self.options.ifchanged and '"changed": true' not in out:
display.display("Repository has not changed, quitting.")
return 0
playbook = self.select_playbook(self.options.dest)
if playbook is None:
raise AnsibleOptionsError("Could not find a playbook to run.")
# Build playbook command
cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
if self.options.vault_password_file:
cmd += " --vault-password-file=%s" % self.options.vault_password_file
if self.options.inventory:
cmd += ' -i "%s"' % self.options.inventory
for ev in self.options.extra_vars:
cmd += ' -e "%s"' % ev
if self.options.ask_sudo_pass or self.options.ask_su_pass or self.options.become_ask_pass:
cmd += ' --ask-become-pass'
if self.options.skip_tags:
cmd += ' --skip-tags "%s"' % self.options.skip_tags
if self.options.tags:
cmd += ' -t "%s"' % self.options.tags
if self.options.subset:
cmd += ' -l "%s"' % self.options.subset
else:
cmd += ' -l "%s"' % limit_opts
os.chdir(self.options.dest)
# RUN THE PLAYBOOK COMMAND
display.debug("running ansible-playbook to do actual work")
display.debug('EXEC: %s' % cmd)
rc, out, err = run_cmd(cmd, live=True)
if self.options.purge:
os.chdir('/')
try:
shutil.rmtree(self.options.dest)
except Exception as e:
display.error("Failed to remove %s: %s" % (self.options.dest, str(e)))
return rc
def try_playbook(self, path):
if not os.path.exists(path):
return 1
if not os.access(path, os.R_OK):
return 2
return 0
def select_playbook(self, path):
playbook = None
if len(self.args) > 0 and self.args[0] is not None:
playbook = os.path.join(path, self.args[0])
rc = self.try_playbook(playbook)
if rc != 0:
display.warning("%s: %s" % (playbook, self.PLAYBOOK_ERRORS[rc]))
return None
return playbook
else:
fqdn = socket.getfqdn()
hostpb = os.path.join(path, fqdn + '.yml')
shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
localpb = os.path.join(path, self.DEFAULT_PLAYBOOK)
errors = []
for pb in [hostpb, shorthostpb, localpb]:
rc = self.try_playbook(pb)
if rc == 0:
playbook = pb
break
else:
errors.append("%s: %s" % (pb, self.PLAYBOOK_ERRORS[rc]))
if playbook is None:
display.warning("\n".join(errors))
return playbook | unknown | codeparrot/codeparrot-clean | ||
//===--- ThreadsafeFS.cpp -------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "support/ThreadsafeFS.h"
#include "Logger.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <memory>
namespace clang {
namespace clangd {
namespace {
/// Always opens files in the underlying filesystem as "volatile", meaning they
/// won't be memory-mapped. Memory-mapping isn't desirable for clangd:
/// - edits to the underlying files change contents MemoryBuffers owned by
// SourceManager, breaking its invariants and leading to crashes
/// - it locks files on windows, preventing edits
class VolatileFileSystem : public llvm::vfs::ProxyFileSystem {
public:
explicit VolatileFileSystem(llvm::IntrusiveRefCntPtr<FileSystem> FS)
: ProxyFileSystem(std::move(FS)) {}
llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
openFileForRead(const llvm::Twine &InPath) override {
llvm::SmallString<128> Path;
InPath.toVector(Path);
auto File = getUnderlyingFS().openFileForRead(Path);
if (!File)
return File;
// Try to guess preamble files, they can be memory-mapped even on Windows as
// clangd has exclusive access to those and nothing else should touch them.
llvm::StringRef FileName = llvm::sys::path::filename(Path);
if (FileName.starts_with("preamble-") && FileName.ends_with(".pch"))
return File;
return std::make_unique<VolatileFile>(std::move(*File));
}
private:
class VolatileFile : public llvm::vfs::File {
public:
VolatileFile(std::unique_ptr<llvm::vfs::File> Wrapped)
: Wrapped(std::move(Wrapped)) {
assert(this->Wrapped);
}
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
getBuffer(const llvm::Twine &Name, int64_t FileSize,
bool RequiresNullTerminator, bool /*IsVolatile*/) override {
return Wrapped->getBuffer(Name, FileSize, RequiresNullTerminator,
/*IsVolatile=*/true);
}
llvm::ErrorOr<llvm::vfs::Status> status() override {
return Wrapped->status();
}
llvm::ErrorOr<std::string> getName() override { return Wrapped->getName(); }
std::error_code close() override { return Wrapped->close(); }
private:
std::unique_ptr<File> Wrapped;
};
};
} // namespace
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem>
ThreadsafeFS::view(PathRef CWD) const {
auto FS = view(std::nullopt);
if (auto EC = FS->setCurrentWorkingDirectory(CWD))
elog("VFS: failed to set CWD to {0}: {1}", CWD, EC.message());
return FS;
}
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem>
RealThreadsafeFS::viewImpl() const {
// Avoid using memory-mapped files.
// FIXME: Try to use a similar approach in Sema instead of relying on
// propagation of the 'isVolatile' flag through all layers.
return new VolatileFileSystem(llvm::vfs::createPhysicalFileSystem());
}
} // namespace clangd
} // namespace clang | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/clangd/support/ThreadsafeFS.cpp |
from __future__ import division, absolute_import, print_function
__all__ = ['less', 'cosh', 'arcsinh', 'add', 'ceil', 'arctan2', 'floor_divide',
'fmod', 'hypot', 'logical_and', 'power', 'sinh', 'remainder', 'cos',
'equal', 'arccos', 'less_equal', 'divide', 'bitwise_or',
'bitwise_and', 'logical_xor', 'log', 'subtract', 'invert',
'negative', 'log10', 'arcsin', 'arctanh', 'logical_not',
'not_equal', 'tanh', 'true_divide', 'maximum', 'arccosh',
'logical_or', 'minimum', 'conjugate', 'tan', 'greater',
'bitwise_xor', 'fabs', 'floor', 'sqrt', 'arctan', 'right_shift',
'absolute', 'sin', 'multiply', 'greater_equal', 'left_shift',
'exp', 'divide_safe']
from numpy import less, cosh, arcsinh, add, ceil, arctan2, floor_divide, \
fmod, hypot, logical_and, power, sinh, remainder, cos, \
equal, arccos, less_equal, divide, bitwise_or, bitwise_and, \
logical_xor, log, subtract, invert, negative, log10, arcsin, \
arctanh, logical_not, not_equal, tanh, true_divide, maximum, \
arccosh, logical_or, minimum, conjugate, tan, greater, bitwise_xor, \
fabs, floor, sqrt, arctan, right_shift, absolute, sin, \
multiply, greater_equal, left_shift, exp, divide as divide_safe | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010 (ita)
"""
This tool modifies the task signature scheme to store and obtain
information about the task execution (why it must run, etc)::
def configure(conf):
conf.load('why')
After adding the tool, a full rebuild is necessary:
waf clean build --zones=task
"""
from waflib import Task, Utils, Logs, Errors
def signature(self):
# compute the result one time, and suppose the scan_signature will give the good result
try:
return self.cache_sig
except AttributeError:
pass
self.m = Utils.md5()
self.m.update(self.hcode)
id_sig = self.m.digest()
# explicit deps
self.m = Utils.md5()
self.sig_explicit_deps()
exp_sig = self.m.digest()
# env vars
self.m = Utils.md5()
self.sig_vars()
var_sig = self.m.digest()
# implicit deps / scanner results
self.m = Utils.md5()
if self.scan:
try:
self.sig_implicit_deps()
except Errors.TaskRescan:
return self.signature()
impl_sig = self.m.digest()
ret = self.cache_sig = impl_sig + id_sig + exp_sig + var_sig
return ret
Task.Task.signature = signature
old = Task.Task.runnable_status
def runnable_status(self):
ret = old(self)
if ret == Task.RUN_ME:
try:
old_sigs = self.generator.bld.task_sigs[self.uid()]
except (KeyError, AttributeError):
Logs.debug("task: task must run as no previous signature exists")
else:
new_sigs = self.cache_sig
def v(x):
return Utils.to_hex(x)
Logs.debug('Task %r', self)
msgs = ['* Implicit or scanner dependency', '* Task code', '* Source file, explicit or manual dependency', '* Configuration data variable']
tmp = 'task: -> %s: %s %s'
for x in range(len(msgs)):
l = len(Utils.SIG_NIL)
a = new_sigs[x*l : (x+1)*l]
b = old_sigs[x*l : (x+1)*l]
if (a != b):
Logs.debug(tmp, msgs[x].ljust(35), v(a), v(b))
return ret
Task.Task.runnable_status = runnable_status | unknown | codeparrot/codeparrot-clean | ||
from __future__ import annotations
from io import (
BytesIO,
StringIO,
)
import os
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
NA,
DataFrame,
Index,
)
import pandas._testing as tm
from pandas.io.common import get_handle
from pandas.io.xml import read_xml
# CHECKLIST
# [x] - ValueError: "Values for parser can only be lxml or etree."
# etree
# [x] - ImportError: "lxml not found, please install or use the etree parser."
# [X] - TypeError: "...is not a valid type for attr_cols"
# [X] - TypeError: "...is not a valid type for elem_cols"
# [X] - LookupError: "unknown encoding"
# [X] - KeyError: "...is not included in namespaces"
# [X] - KeyError: "no valid column"
# [X] - ValueError: "To use stylesheet, you need lxml installed..."
# [] - OSError: (NEED PERMISSION ISSUE, DISK FULL, ETC.)
# [X] - FileNotFoundError: "No such file or directory"
# [X] - PermissionError: "Forbidden"
# lxml
# [X] - TypeError: "...is not a valid type for attr_cols"
# [X] - TypeError: "...is not a valid type for elem_cols"
# [X] - LookupError: "unknown encoding"
# [] - OSError: (NEED PERMISSION ISSUE, DISK FULL, ETC.)
# [X] - FileNotFoundError: "No such file or directory"
# [X] - KeyError: "...is not included in namespaces"
# [X] - KeyError: "no valid column"
# [X] - ValueError: "stylesheet is not a url, file, or xml string."
# [] - LookupError: (NEED WRONG ENCODING FOR FILE OUTPUT)
# [] - URLError: (USUALLY DUE TO NETWORKING)
# [] - HTTPError: (NEED AN ONLINE STYLESHEET)
# [X] - OSError: "failed to load external entity"
# [X] - XMLSyntaxError: "Opening and ending tag mismatch"
# [X] - XSLTApplyError: "Cannot resolve URI"
# [X] - XSLTParseError: "failed to compile"
# [X] - PermissionError: "Forbidden"
@pytest.fixture
def geom_df():
return DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": [4, np.nan, 3],
}
)
@pytest.fixture
def planet_df():
return DataFrame(
{
"planet": [
"Mercury",
"Venus",
"Earth",
"Mars",
"Jupiter",
"Saturn",
"Uranus",
"Neptune",
],
"type": [
"terrestrial",
"terrestrial",
"terrestrial",
"terrestrial",
"gas giant",
"gas giant",
"ice giant",
"ice giant",
],
"location": [
"inner",
"inner",
"inner",
"inner",
"outer",
"outer",
"outer",
"outer",
],
"mass": [
0.330114,
4.86747,
5.97237,
0.641712,
1898.187,
568.3174,
86.8127,
102.4126,
],
}
)
@pytest.fixture
def from_file_expected():
return """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<category>cooking</category>
<title>Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.0</price>
</row>
<row>
<index>1</index>
<category>children</category>
<title>Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</row>
<row>
<index>2</index>
<category>web</category>
<title>Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</row>
</data>"""
def equalize_decl(doc):
# etree and lxml differ on quotes and case in xml declaration
if doc is not None:
doc = doc.replace(
'<?xml version="1.0" encoding="utf-8"?',
"<?xml version='1.0' encoding='utf-8'?",
)
return doc
@pytest.fixture(params=["rb", "r"])
def mode(request):
return request.param
@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"])
def parser(request):
return request.param
# FILE OUTPUT
def test_file_output_str_read(xml_books, parser, from_file_expected, temp_file):
df_file = read_xml(xml_books, parser=parser)
df_file.to_xml(temp_file, parser=parser)
output = temp_file.read_text(encoding="utf-8").strip()
output = equalize_decl(output)
assert output == from_file_expected
def test_file_output_bytes_read(xml_books, parser, from_file_expected, temp_file):
df_file = read_xml(xml_books, parser=parser)
df_file.to_xml(temp_file, parser=parser)
output = temp_file.read_text(encoding="utf-8").strip()
output = equalize_decl(output)
assert output == from_file_expected
def test_str_output(xml_books, parser, from_file_expected):
df_file = read_xml(xml_books, parser=parser)
output = df_file.to_xml(parser=parser)
output = equalize_decl(output)
assert output == from_file_expected
def test_wrong_file_path(parser, geom_df):
path = "/my/fake/path/output.xml"
with pytest.raises(
OSError,
match=(r"Cannot save file into a non-existent directory: .*path"),
):
geom_df.to_xml(path, parser=parser)
# INDEX
def test_index_false(xml_books, parser, temp_file):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<category>cooking</category>
<title>Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.0</price>
</row>
<row>
<category>children</category>
<title>Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</row>
<row>
<category>web</category>
<title>Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</row>
</data>"""
df_file = read_xml(xml_books, parser=parser)
df_file.to_xml(temp_file, index=False, parser=parser)
output = temp_file.read_text(encoding="utf-8").strip()
output = equalize_decl(output)
assert output == expected
def test_index_false_rename_row_root(xml_books, parser, temp_file):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<books>
<book>
<category>cooking</category>
<title>Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.0</price>
</book>
<book>
<category>children</category>
<title>Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
<book>
<category>web</category>
<title>Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</book>
</books>"""
df_file = read_xml(xml_books, parser=parser)
df_file.to_xml(
temp_file, index=False, root_name="books", row_name="book", parser=parser
)
output = temp_file.read_text(encoding="utf-8").strip()
output = equalize_decl(output)
assert output == expected
@pytest.mark.parametrize("typ", [int, str])
def test_index_false_with_offset_input_index(parser, typ, geom_df):
"""
Tests that the output does not contain the `<index>` field when the index of the
input Dataframe has an offset.
This is a regression test for issue #42458.
"""
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
offset_index = [typ(i) for i in range(10, 13)]
offset_geom_df = geom_df.copy()
offset_geom_df.index = Index(offset_index)
output = offset_geom_df.to_xml(index=False, parser=parser)
output = equalize_decl(output)
assert output == expected
# NA_REP
na_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
def test_na_elem_output(parser, geom_df):
output = geom_df.to_xml(parser=parser)
output = equalize_decl(output)
assert output == na_expected
def test_na_empty_str_elem_option(parser, geom_df):
output = geom_df.to_xml(na_rep="", parser=parser)
output = equalize_decl(output)
assert output == na_expected
def test_na_empty_elem_option(parser, geom_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides>0.0</sides>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(na_rep="0.0", parser=parser)
output = equalize_decl(output)
assert output == expected
# ATTR_COLS
def test_attrs_cols_nan_output(parser, geom_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row index="0" shape="square" degrees="360" sides="4.0"/>
<row index="1" shape="circle" degrees="360"/>
<row index="2" shape="triangle" degrees="180" sides="3.0"/>
</data>"""
output = geom_df.to_xml(attr_cols=["shape", "degrees", "sides"], parser=parser)
output = equalize_decl(output)
assert output == expected
def test_attrs_cols_prefix(parser, geom_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="http://example.xom">
<doc:row doc:index="0" doc:shape="square" \
doc:degrees="360" doc:sides="4.0"/>
<doc:row doc:index="1" doc:shape="circle" \
doc:degrees="360"/>
<doc:row doc:index="2" doc:shape="triangle" \
doc:degrees="180" doc:sides="3.0"/>
</doc:data>"""
output = geom_df.to_xml(
attr_cols=["index", "shape", "degrees", "sides"],
namespaces={"doc": "http://example.xom"},
prefix="doc",
parser=parser,
)
output = equalize_decl(output)
assert output == expected
def test_attrs_unknown_column(parser, geom_df):
with pytest.raises(KeyError, match=("no valid column")):
geom_df.to_xml(attr_cols=["shape", "degree", "sides"], parser=parser)
def test_attrs_wrong_type(parser, geom_df):
with pytest.raises(TypeError, match=("is not a valid type for attr_cols")):
geom_df.to_xml(attr_cols='"shape", "degree", "sides"', parser=parser)
# ELEM_COLS
def test_elems_cols_nan_output(parser, geom_df):
elems_cols_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<degrees>360</degrees>
<sides>4.0</sides>
<shape>square</shape>
</row>
<row>
<degrees>360</degrees>
<sides/>
<shape>circle</shape>
</row>
<row>
<degrees>180</degrees>
<sides>3.0</sides>
<shape>triangle</shape>
</row>
</data>"""
output = geom_df.to_xml(
index=False, elem_cols=["degrees", "sides", "shape"], parser=parser
)
output = equalize_decl(output)
assert output == elems_cols_expected
def test_elems_unknown_column(parser, geom_df):
with pytest.raises(KeyError, match=("no valid column")):
geom_df.to_xml(elem_cols=["shape", "degree", "sides"], parser=parser)
def test_elems_wrong_type(parser, geom_df):
with pytest.raises(TypeError, match=("is not a valid type for elem_cols")):
geom_df.to_xml(elem_cols='"shape", "degree", "sides"', parser=parser)
def test_elems_and_attrs_cols(parser, geom_df):
elems_cols_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row shape="square">
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row shape="circle">
<degrees>360</degrees>
<sides/>
</row>
<row shape="triangle">
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(
index=False,
elem_cols=["degrees", "sides"],
attr_cols=["shape"],
parser=parser,
)
output = equalize_decl(output)
assert output == elems_cols_expected
# HIERARCHICAL COLUMNS
def test_hierarchical_columns(parser, planet_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<location>inner</location>
<type>terrestrial</type>
<count_mass>4</count_mass>
<sum_mass>11.81</sum_mass>
<mean_mass>2.95</mean_mass>
</row>
<row>
<location>outer</location>
<type>gas giant</type>
<count_mass>2</count_mass>
<sum_mass>2466.5</sum_mass>
<mean_mass>1233.25</mean_mass>
</row>
<row>
<location>outer</location>
<type>ice giant</type>
<count_mass>2</count_mass>
<sum_mass>189.23</sum_mass>
<mean_mass>94.61</mean_mass>
</row>
<row>
<location>All</location>
<type/>
<count_mass>8</count_mass>
<sum_mass>2667.54</sum_mass>
<mean_mass>333.44</mean_mass>
</row>
</data>"""
pvt = planet_df.pivot_table(
index=["location", "type"],
values="mass",
aggfunc=["count", "sum", "mean"],
margins=True,
).round(2)
output = pvt.to_xml(parser=parser)
output = equalize_decl(output)
assert output == expected
def test_hierarchical_attrs_columns(parser, planet_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row location="inner" type="terrestrial" count_mass="4" \
sum_mass="11.81" mean_mass="2.95"/>
<row location="outer" type="gas giant" count_mass="2" \
sum_mass="2466.5" mean_mass="1233.25"/>
<row location="outer" type="ice giant" count_mass="2" \
sum_mass="189.23" mean_mass="94.61"/>
<row location="All" type="" count_mass="8" \
sum_mass="2667.54" mean_mass="333.44"/>
</data>"""
pvt = planet_df.pivot_table(
index=["location", "type"],
values="mass",
aggfunc=["count", "sum", "mean"],
margins=True,
).round(2)
output = pvt.to_xml(attr_cols=list(pvt.reset_index().columns.values), parser=parser)
output = equalize_decl(output)
assert output == expected
# MULTIINDEX
def test_multi_index(parser, planet_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<location>inner</location>
<type>terrestrial</type>
<count>4</count>
<sum>11.81</sum>
<mean>2.95</mean>
</row>
<row>
<location>outer</location>
<type>gas giant</type>
<count>2</count>
<sum>2466.5</sum>
<mean>1233.25</mean>
</row>
<row>
<location>outer</location>
<type>ice giant</type>
<count>2</count>
<sum>189.23</sum>
<mean>94.61</mean>
</row>
</data>"""
agg = (
planet_df.groupby(["location", "type"])["mass"]
.agg(["count", "sum", "mean"])
.round(2)
)
output = agg.to_xml(parser=parser)
output = equalize_decl(output)
assert output == expected
def test_multi_index_attrs_cols(parser, planet_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row location="inner" type="terrestrial" count="4" \
sum="11.81" mean="2.95"/>
<row location="outer" type="gas giant" count="2" \
sum="2466.5" mean="1233.25"/>
<row location="outer" type="ice giant" count="2" \
sum="189.23" mean="94.61"/>
</data>"""
agg = (
planet_df.groupby(["location", "type"])["mass"]
.agg(["count", "sum", "mean"])
.round(2)
)
output = agg.to_xml(attr_cols=list(agg.reset_index().columns.values), parser=parser)
output = equalize_decl(output)
assert output == expected
# NAMESPACE
def test_default_namespace(parser, geom_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data xmlns="http://example.com">
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(namespaces={"": "http://example.com"}, parser=parser)
output = equalize_decl(output)
assert output == expected
def test_unused_namespaces(parser, geom_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data xmlns:oth="http://other.org" xmlns:ex="http://example.com">
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(
namespaces={"oth": "http://other.org", "ex": "http://example.com"},
parser=parser,
)
output = equalize_decl(output)
assert output == expected
# PREFIX
def test_namespace_prefix(parser, geom_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="http://example.com">
<doc:row>
<doc:index>0</doc:index>
<doc:shape>square</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides>4.0</doc:sides>
</doc:row>
<doc:row>
<doc:index>1</doc:index>
<doc:shape>circle</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides/>
</doc:row>
<doc:row>
<doc:index>2</doc:index>
<doc:shape>triangle</doc:shape>
<doc:degrees>180</doc:degrees>
<doc:sides>3.0</doc:sides>
</doc:row>
</doc:data>"""
output = geom_df.to_xml(
namespaces={"doc": "http://example.com"}, prefix="doc", parser=parser
)
output = equalize_decl(output)
assert output == expected
def test_missing_prefix_in_nmsp(parser, geom_df):
with pytest.raises(KeyError, match=("doc is not included in namespaces")):
geom_df.to_xml(
namespaces={"": "http://example.com"}, prefix="doc", parser=parser
)
def test_namespace_prefix_and_default(parser, geom_df):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="http://other.org" xmlns="http://example.com">
<doc:row>
<doc:index>0</doc:index>
<doc:shape>square</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides>4.0</doc:sides>
</doc:row>
<doc:row>
<doc:index>1</doc:index>
<doc:shape>circle</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides/>
</doc:row>
<doc:row>
<doc:index>2</doc:index>
<doc:shape>triangle</doc:shape>
<doc:degrees>180</doc:degrees>
<doc:sides>3.0</doc:sides>
</doc:row>
</doc:data>"""
output = geom_df.to_xml(
namespaces={"": "http://example.com", "doc": "http://other.org"},
prefix="doc",
parser=parser,
)
output = equalize_decl(output)
assert output == expected
# ENCODING
encoding_expected = """\
<?xml version='1.0' encoding='ISO-8859-1'?>
<data>
<row>
<index>0</index>
<rank>1</rank>
<malename>José</malename>
<femalename>Sofía</femalename>
</row>
<row>
<index>1</index>
<rank>2</rank>
<malename>Luis</malename>
<femalename>Valentina</femalename>
</row>
<row>
<index>2</index>
<rank>3</rank>
<malename>Carlos</malename>
<femalename>Isabella</femalename>
</row>
<row>
<index>3</index>
<rank>4</rank>
<malename>Juan</malename>
<femalename>Camila</femalename>
</row>
<row>
<index>4</index>
<rank>5</rank>
<malename>Jorge</malename>
<femalename>Valeria</femalename>
</row>
</data>"""
def test_encoding_option_str(xml_baby_names, parser):
df_file = read_xml(xml_baby_names, parser=parser, encoding="ISO-8859-1").head(5)
output = df_file.to_xml(encoding="ISO-8859-1", parser=parser)
if output is not None:
# etree and lxml differ on quotes and case in xml declaration
output = output.replace(
'<?xml version="1.0" encoding="ISO-8859-1"?',
"<?xml version='1.0' encoding='ISO-8859-1'?",
)
assert output == encoding_expected
def test_correct_encoding_file(xml_baby_names, temp_file):
pytest.importorskip("lxml")
df_file = read_xml(xml_baby_names, encoding="ISO-8859-1", parser="lxml")
df_file.to_xml(temp_file, index=False, encoding="ISO-8859-1", parser="lxml")
@pytest.mark.parametrize("encoding", ["UTF-8", "UTF-16", "ISO-8859-1"])
def test_wrong_encoding_option_lxml(xml_baby_names, parser, encoding, temp_file):
pytest.importorskip("lxml")
df_file = read_xml(xml_baby_names, encoding="ISO-8859-1", parser="lxml")
df_file.to_xml(temp_file, index=False, encoding=encoding, parser=parser)
def test_misspelled_encoding(parser, geom_df):
with pytest.raises(LookupError, match=("unknown encoding")):
geom_df.to_xml(encoding="uft-8", parser=parser)
# PRETTY PRINT
def test_xml_declaration_pretty_print(geom_df):
pytest.importorskip("lxml")
expected = """\
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(xml_declaration=False)
assert output == expected
def test_no_pretty_print_with_decl(parser, geom_df):
expected = (
"<?xml version='1.0' encoding='utf-8'?>\n"
"<data><row><index>0</index><shape>square</shape>"
"<degrees>360</degrees><sides>4.0</sides></row><row>"
"<index>1</index><shape>circle</shape><degrees>360"
"</degrees><sides/></row><row><index>2</index><shape>"
"triangle</shape><degrees>180</degrees><sides>3.0</sides>"
"</row></data>"
)
output = geom_df.to_xml(pretty_print=False, parser=parser)
output = equalize_decl(output)
# etree adds space for closed tags
if output is not None:
output = output.replace(" />", "/>")
assert output == expected
def test_no_pretty_print_no_decl(parser, geom_df):
expected = (
"<data><row><index>0</index><shape>square</shape>"
"<degrees>360</degrees><sides>4.0</sides></row><row>"
"<index>1</index><shape>circle</shape><degrees>360"
"</degrees><sides/></row><row><index>2</index><shape>"
"triangle</shape><degrees>180</degrees><sides>3.0</sides>"
"</row></data>"
)
output = geom_df.to_xml(xml_declaration=False, pretty_print=False, parser=parser)
# etree adds space for closed tags
if output is not None:
output = output.replace(" />", "/>")
assert output == expected
# PARSER
@td.skip_if_installed("lxml")
def test_default_parser_no_lxml(geom_df):
with pytest.raises(
ImportError, match=("lxml not found, please install or use the etree parser.")
):
geom_df.to_xml()
def test_unknown_parser(geom_df):
with pytest.raises(
ValueError, match=("Values for parser can only be lxml or etree.")
):
geom_df.to_xml(parser="bs4")
# STYLESHEET
xsl_expected = """\
<?xml version="1.0" encoding="utf-8"?>
<data>
<row>
<field field="index">0</field>
<field field="shape">square</field>
<field field="degrees">360</field>
<field field="sides">4.0</field>
</row>
<row>
<field field="index">1</field>
<field field="shape">circle</field>
<field field="degrees">360</field>
<field field="sides"/>
</row>
<row>
<field field="index">2</field>
<field field="shape">triangle</field>
<field field="degrees">180</field>
<field field="sides">3.0</field>
</row>
</data>"""
def test_stylesheet_file_like(xsl_row_field_output, mode, geom_df):
pytest.importorskip("lxml")
with open(
xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
) as f:
assert geom_df.to_xml(stylesheet=f) == xsl_expected
def test_stylesheet_io(xsl_row_field_output, mode, geom_df):
# note: By default the bodies of untyped functions are not checked,
# consider using --check-untyped-defs
pytest.importorskip("lxml")
xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked]
with open(
xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
) as f:
if mode == "rb":
xsl_obj = BytesIO(f.read())
else:
xsl_obj = StringIO(f.read())
output = geom_df.to_xml(stylesheet=xsl_obj)
assert output == xsl_expected
def test_stylesheet_buffered_reader(xsl_row_field_output, mode, geom_df):
pytest.importorskip("lxml")
with open(
xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
) as f:
output = geom_df.to_xml(stylesheet=f)
assert output == xsl_expected
def test_stylesheet_wrong_path(geom_df):
pytest.importorskip("lxml.etree")
xsl = os.path.join("does", "not", "exist", "row_field_output.xslt")
with pytest.raises(
FileNotFoundError, match=r"\[Errno 2\] No such file or director"
):
geom_df.to_xml(stylesheet=xsl)
@pytest.mark.parametrize("val", [StringIO(""), BytesIO(b"")])
def test_empty_string_stylesheet(val, geom_df):
lxml_etree = pytest.importorskip("lxml.etree")
msg = "|".join(
[
"Document is empty",
"Start tag expected, '<' not found",
# Seen on Mac with lxml 4.9.1
r"None \(line 0\)",
]
)
with pytest.raises(lxml_etree.XMLSyntaxError, match=msg):
geom_df.to_xml(stylesheet=val)
def test_incorrect_xsl_syntax(geom_df):
lxml_etree = pytest.importorskip("lxml.etree")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" >
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="row/*">
<field>
<xsl:attribute name="field">
<xsl:value-of select="name()"/>
</xsl:attribute>
<xsl:value-of select="text()"/>
</field>
</xsl:template>
</xsl:stylesheet>"""
with pytest.raises(
lxml_etree.XMLSyntaxError, match="Opening and ending tag mismatch"
):
geom_df.to_xml(stylesheet=StringIO(xsl))
def test_incorrect_xsl_eval(geom_df):
lxml_etree = pytest.importorskip("lxml.etree")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node(*)">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="row/*">
<field>
<xsl:attribute name="field">
<xsl:value-of select="name()"/>
</xsl:attribute>
<xsl:value-of select="text()"/>
</field>
</xsl:template>
</xsl:stylesheet>"""
with pytest.raises(lxml_etree.XSLTParseError, match="failed to compile"):
geom_df.to_xml(stylesheet=StringIO(xsl))
def test_incorrect_xsl_apply(geom_df, temp_file):
lxml_etree = pytest.importorskip("lxml.etree")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:copy-of select="document('non_existent.xml')/*"/>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>"""
with pytest.raises(lxml_etree.XSLTApplyError, match="Cannot resolve URI"):
geom_df.to_xml(temp_file, stylesheet=StringIO(xsl))
def test_stylesheet_with_etree(geom_df):
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node(*)">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>"""
with pytest.raises(ValueError, match="To use stylesheet, you need lxml installed"):
geom_df.to_xml(parser="etree", stylesheet=StringIO(xsl))
def test_style_to_csv(geom_df):
pytest.importorskip("lxml")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:param name="delim">,</xsl:param>
<xsl:template match="/data">
<xsl:text>,shape,degrees,sides
</xsl:text>
<xsl:apply-templates select="row"/>
</xsl:template>
<xsl:template match="row">
<xsl:value-of select="concat(index, $delim, shape, $delim,
degrees, $delim, sides)"/>
<xsl:text>
</xsl:text>
</xsl:template>
</xsl:stylesheet>"""
out_csv = geom_df.to_csv(lineterminator="\n")
if out_csv is not None:
out_csv = out_csv.strip()
out_xml = geom_df.to_xml(stylesheet=StringIO(xsl))
assert out_csv == out_xml
def test_style_to_string(geom_df):
pytest.importorskip("lxml")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:param name="delim"><xsl:text> </xsl:text></xsl:param>
<xsl:template match="/data">
<xsl:text> shape degrees sides
</xsl:text>
<xsl:apply-templates select="row"/>
</xsl:template>
<xsl:template match="row">
<xsl:value-of select="concat(index, ' ',
substring($delim, 1, string-length('triangle')
- string-length(shape) + 1),
shape,
substring($delim, 1, string-length(name(degrees))
- string-length(degrees) + 2),
degrees,
substring($delim, 1, string-length(name(sides))
- string-length(sides) + 2),
sides)"/>
<xsl:text>
</xsl:text>
</xsl:template>
</xsl:stylesheet>"""
out_str = geom_df.to_string()
out_xml = geom_df.to_xml(na_rep="NaN", stylesheet=StringIO(xsl))
assert out_xml == out_str
def test_style_to_json(geom_df):
pytest.importorskip("lxml")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:param name="quot">"</xsl:param>
<xsl:template match="/data">
<xsl:text>{"shape":{</xsl:text>
<xsl:apply-templates select="descendant::row/shape"/>
<xsl:text>},"degrees":{</xsl:text>
<xsl:apply-templates select="descendant::row/degrees"/>
<xsl:text>},"sides":{</xsl:text>
<xsl:apply-templates select="descendant::row/sides"/>
<xsl:text>}}</xsl:text>
</xsl:template>
<xsl:template match="shape|degrees|sides">
<xsl:variable name="val">
<xsl:if test = ".=''">
<xsl:value-of select="'null'"/>
</xsl:if>
<xsl:if test = "number(text()) = text()">
<xsl:value-of select="text()"/>
</xsl:if>
<xsl:if test = "number(text()) != text()">
<xsl:value-of select="concat($quot, text(), $quot)"/>
</xsl:if>
</xsl:variable>
<xsl:value-of select="concat($quot, preceding-sibling::index,
$quot,':', $val)"/>
<xsl:if test="preceding-sibling::index != //row[last()]/index">
<xsl:text>,</xsl:text>
</xsl:if>
</xsl:template>
</xsl:stylesheet>"""
out_json = geom_df.to_json()
out_xml = geom_df.to_xml(stylesheet=StringIO(xsl))
assert out_json == out_xml
# COMPRESSION
geom_xml = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
def test_compression_output(parser, compression_only, geom_df, temp_file):
path = temp_file
geom_df.to_xml(path, parser=parser, compression=compression_only)
with get_handle(
path,
"r",
compression=compression_only,
) as handle_obj:
output = handle_obj.handle.read()
output = equalize_decl(output)
assert geom_xml == output.strip()
def test_filename_and_suffix_comp(
parser, compression_only, geom_df, compression_to_extension, tmp_path
):
compfile = "xml." + compression_to_extension[compression_only]
path = tmp_path / compfile
geom_df.to_xml(path, parser=parser, compression=compression_only)
with get_handle(
path,
"r",
compression=compression_only,
) as handle_obj:
output = handle_obj.handle.read()
output = equalize_decl(output)
assert geom_xml == output.strip()
def test_ea_dtypes(any_numeric_ea_dtype, parser):
# GH#43903
expected = """<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<a/>
</row>
</data>"""
df = DataFrame({"a": [NA]}).astype(any_numeric_ea_dtype)
result = df.to_xml(parser=parser)
assert equalize_decl(result).strip() == expected
def test_unsupported_compression(parser, geom_df, temp_file):
with pytest.raises(ValueError, match="Unrecognized compression type"):
path = temp_file
geom_df.to_xml(path, parser=parser, compression="7z")
# STORAGE OPTIONS
@pytest.mark.single_cpu
def test_s3_permission_output(parser, s3_bucket_public, geom_df):
s3fs = pytest.importorskip("s3fs")
pytest.importorskip("lxml")
with tm.external_error_raised((PermissionError, FileNotFoundError)):
fs = s3fs.S3FileSystem(anon=True)
fs.ls(s3_bucket_public.name)
geom_df.to_xml(
f"s3://{s3_bucket_public.name}/geom.xml", compression="zip", parser=parser
) | python | github | https://github.com/pandas-dev/pandas | pandas/tests/io/xml/test_to_xml.py |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
import erpnext
from frappe.utils.make_random import get_random
from frappe.utils import nowdate, add_days, add_years, getdate, add_months
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
from erpnext.hr.doctype.salary_slip.test_salary_slip import make_earning_salary_component,\
make_deduction_salary_component, make_employee_salary_slip
from erpnext.hr.doctype.employee.test_employee import make_employee
test_dependencies = ["Fiscal Year"]
class TestSalaryStructure(unittest.TestCase):
def setUp(self):
for dt in ["Salary Slip", "Salary Structure", "Salary Structure Assignment"]:
frappe.db.sql("delete from `tab%s`" % dt)
self.make_holiday_list()
frappe.db.set_value("Company", erpnext.get_default_company(), "default_holiday_list", "Salary Structure Test Holiday List")
make_employee("test_employee@salary.com")
make_employee("test_employee_2@salary.com")
def make_holiday_list(self):
if not frappe.db.get_value("Holiday List", "Salary Structure Test Holiday List"):
holiday_list = frappe.get_doc({
"doctype": "Holiday List",
"holiday_list_name": "Salary Structure Test Holiday List",
"from_date": nowdate(),
"to_date": add_years(nowdate(), 1),
"weekly_off": "Sunday"
}).insert()
holiday_list.get_weekly_off_dates()
holiday_list.save()
def test_amount_totals(self):
frappe.db.set_value("HR Settings", None, "include_holidays_in_total_working_days", 0)
sal_slip = frappe.get_value("Salary Slip", {"employee_name":"test_employee_2@salary.com"})
if not sal_slip:
sal_slip = make_employee_salary_slip("test_employee_2@salary.com", "Monthly", "Salary Structure Sample")
self.assertEqual(sal_slip.get("salary_structure"), 'Salary Structure Sample')
self.assertEqual(sal_slip.get("earnings")[0].amount, 50000)
self.assertEqual(sal_slip.get("earnings")[1].amount, 3000)
self.assertEqual(sal_slip.get("earnings")[2].amount, 25000)
self.assertEqual(sal_slip.get("gross_pay"), 78000)
self.assertEqual(sal_slip.get("deductions")[0].amount, 5000)
self.assertEqual(sal_slip.get("deductions")[1].amount, 5000)
self.assertEqual(sal_slip.get("total_deduction"), 10000)
self.assertEqual(sal_slip.get("net_pay"), 68000)
def test_whitespaces_in_formula_conditions_fields(self):
salary_structure = make_salary_structure("Salary Structure Sample", "Monthly", dont_submit=True)
for row in salary_structure.earnings:
row.formula = "\n%s\n\n"%row.formula
row.condition = "\n%s\n\n"%row.condition
for row in salary_structure.deductions:
row.formula = "\n%s\n\n"%row.formula
row.condition = "\n%s\n\n"%row.condition
salary_structure.save()
for row in salary_structure.earnings:
self.assertFalse("\n" in row.formula or "\n" in row.condition)
for row in salary_structure.deductions:
self.assertFalse(("\n" in row.formula) or ("\n" in row.condition))
def test_salary_structures_assignment(self):
salary_structure = make_salary_structure("Salary Structure Sample", "Monthly")
employee = "test_assign_stucture@salary.com"
employee_doc_name = make_employee(employee)
# clear the already assigned stuctures
frappe.db.sql('''delete from `tabSalary Structure Assignment` where employee=%s and salary_structure=%s ''',
("test_assign_stucture@salary.com",salary_structure.name))
#test structure_assignment
salary_structure.assign_salary_structure(employee=employee_doc_name,from_date='2013-01-01',base=5000,variable=200)
salary_structure_assignment = frappe.get_doc("Salary Structure Assignment",{'employee':employee_doc_name, 'from_date':'2013-01-01'})
self.assertEqual(salary_structure_assignment.docstatus, 1)
self.assertEqual(salary_structure_assignment.base, 5000)
self.assertEqual(salary_structure_assignment.variable, 200)
def make_salary_structure(salary_structure, payroll_frequency, employee=None, dont_submit=False, other_details=None, test_tax=False):
if test_tax:
frappe.db.sql("""delete from `tabSalary Structure` where name=%s""",(salary_structure))
if not frappe.db.exists('Salary Structure', salary_structure):
details = {
"doctype": "Salary Structure",
"name": salary_structure,
"company": erpnext.get_default_company(),
"earnings": make_earning_salary_component(test_tax=test_tax),
"deductions": make_deduction_salary_component(test_tax=test_tax),
"payroll_frequency": payroll_frequency,
"payment_account": get_random("Account")
}
if other_details and isinstance(other_details, dict):
details.update(other_details)
salary_structure_doc = frappe.get_doc(details).insert()
if not dont_submit:
salary_structure_doc.submit()
else:
salary_structure_doc = frappe.get_doc("Salary Structure", salary_structure)
if employee and not frappe.db.get_value("Salary Structure Assignment",
{'employee':employee, 'docstatus': 1}) and salary_structure_doc.docstatus==1:
create_salary_structure_assignment(employee, salary_structure)
return salary_structure_doc
def create_salary_structure_assignment(employee, salary_structure, from_date=None):
if frappe.db.exists("Salary Structure Assignment", {"employee": employee}):
frappe.db.sql("""delete from `tabSalary Structure Assignment` where employee=%s""",(employee))
salary_structure_assignment = frappe.new_doc("Salary Structure Assignment")
salary_structure_assignment.employee = employee
salary_structure_assignment.base = 50000
salary_structure_assignment.variable = 5000
salary_structure_assignment.from_date = from_date or add_months(nowdate(), -1)
salary_structure_assignment.salary_structure = salary_structure
salary_structure_assignment.company = erpnext.get_default_company()
salary_structure_assignment.save(ignore_permissions=True)
salary_structure_assignment.submit()
return salary_structure_assignment | unknown | codeparrot/codeparrot-clean | ||
import fsui
from launcher.i18n import gettext
from launcher.setup.setupwelcomepage import SetupWelcomePage
from launcher.ui.skin import LauncherTheme
from launcher.ui.widgets import PrevButton, NextButton, CloseButton
class SetupWizardDialog(fsui.Window):
@classmethod
def open(cls, parent=None):
return fsui.open_window_instance(cls, parent)
def __init__(self, parent):
super().__init__(
parent,
gettext("Setup Wizard"),
minimizable=False,
maximizable=False,
)
self.theme = LauncherTheme.get()
self.layout = fsui.VerticalLayout()
page = SetupWelcomePage(self)
self.layout.add(page, expand=True, fill=True)
button_layout = fsui.HorizontalLayout()
self.layout.add(button_layout, fill=True, margin=20)
button_layout.add_spacer(0, expand=True)
self.prev_button = PrevButton(self)
button_layout.add(self.prev_button, fill=True, margin_left=10)
self.next_button = NextButton(self)
button_layout.add(self.next_button, fill=True, margin_left=10)
if self.window.theme.has_close_buttons:
self.close_button = CloseButton(self)
button_layout.add(self.close_button, fill=True, margin_left=10) | unknown | codeparrot/codeparrot-clean | ||
from langchain_core.documents import BaseDocumentTransformer, Document
__all__ = ["BaseDocumentTransformer", "Document"] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/schema/document.py |
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ...._models import BaseModel
__all__ = ["ResponseFunctionCallArgumentsDoneEvent"]
class ResponseFunctionCallArgumentsDoneEvent(BaseModel):
arguments: str
"""The final arguments as a JSON string."""
call_id: str
"""The ID of the function call."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the function call item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.function_call_arguments.done"]
"""The event type, must be `response.function_call_arguments.done`.""" | python | github | https://github.com/openai/openai-python | src/openai/types/beta/realtime/response_function_call_arguments_done_event.py |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module to add gyp support to cr."""
import cr
import os
GYP_DEFINE_PREFIX = 'GYP_DEF_'
class GypPrepareOut(cr.PrepareOut):
"""A prepare action that runs gyp whenever you select an output directory."""
ENABLED = cr.Config.From(
GYP_GENERATORS='ninja',
GYP_GENERATOR_FLAGS='output_dir={CR_OUT_BASE} config={CR_BUILDTYPE}',
GYP_DEF_target_arch='{CR_ENVSETUP_ARCH}',
)
def UpdateContext(self):
# Collapse GYP_DEFINES from all GYP_DEF prefixes
gyp_defines = cr.context.Find('GYP_DEFINES') or ''
for key, value in cr.context.exported.items():
if key.startswith(GYP_DEFINE_PREFIX):
gyp_defines += ' %s=%s' % (key[len(GYP_DEFINE_PREFIX):], value)
cr.context['GYP_DEFINES'] = gyp_defines.strip()
if cr.context.verbose >= 1:
print cr.context.Substitute('GYP_DEFINES = {GYP_DEFINES}')
def Prepare(self):
if cr.context.verbose >= 1:
print cr.context.Substitute('Invoking gyp with {GYP_GENERATOR_FLAGS}')
cr.Host.Execute(
'{CR_SRC}/build/gyp_chromium',
'--depth={CR_SRC}',
'--check'
) | unknown | codeparrot/codeparrot-clean | ||
import pysal
import os.path
import scipy.io as sio
import pysal.core.FileIO as FileIO
from pysal.weights import W
from pysal.weights.util import full, full2W
from warnings import warn
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>"
__all__ = ["MatIO"]
class MatIO(FileIO.FileIO):
"""
Opens, reads, and writes weights file objects in MATLAB Level 4-5 MAT format.
MAT files are used in Dr. LeSage's MATLAB Econometrics library.
The MAT file format can handle both full and sparse matrices,
and it allows for a matrix dimension greater than 256.
In PySAL, row and column headers of a MATLAB array are ignored.
PySAL uses matlab io tools in scipy.
Thus, it is subject to all limits that loadmat and savemat in scipy have.
Notes
-----
If a given weights object contains too many observations to
write it out as a full matrix,
PySAL writes out the object as a sparse matrix.
References
----------
MathWorks (2011) "MATLAB 7 MAT-File Format" at
http://www.mathworks.com/help/pdf_doc/matlab/matfile_format.pdf.
scipy matlab io
http://docs.scipy.org/doc/scipy/reference/tutorial/io.html
"""
FORMATS = ['mat']
MODES = ['r', 'w']
def __init__(self, *args, **kwargs):
self._varName = 'Unknown'
FileIO.FileIO.__init__(self, *args, **kwargs)
self.file = open(self.dataPath, self.mode + 'b')
def _set_varName(self, val):
if issubclass(type(val), basestring):
self._varName = val
def _get_varName(self):
return self._varName
varName = property(fget=_get_varName, fset=_set_varName)
def read(self, n=-1):
self._complain_ifclosed(self.closed)
return self._read()
def seek(self, pos):
if pos == 0:
self.file.seek(0)
self.pos = 0
def _read(self):
"""Reads MATLAB mat file
Returns a pysal.weights.weights.W object
Examples
--------
Type 'dir(w)' at the interpreter to see what methods are supported.
Open a MATLAB mat file and read it into a pysal weights object
>>> w = pysal.open(pysal.examples.get_path('spat-sym-us.mat'),'r').read()
Get the number of observations from the header
>>> w.n
46
Get the mean number of neighbors
>>> w.mean_neighbors
4.0869565217391308
Get neighbor distances for a single observation
>>> w[1]
{25: 1, 3: 1, 28: 1, 39: 1}
"""
if self.pos > 0:
raise StopIteration
mat = sio.loadmat(self.file)
mat_keys = [k for k in mat if not k.startswith("_")]
full_w = mat[mat_keys[0]]
self.pos += 1
return full2W(full_w)
def write(self, obj):
"""
Parameters
----------
.write(weightsObject)
accepts a weights object
Returns
------
a MATLAB mat file
write a weights object to the opened mat file.
Examples
--------
>>> import tempfile, pysal, os
>>> testfile = pysal.open(pysal.examples.get_path('spat-sym-us.mat'),'r')
>>> w = testfile.read()
Create a temporary file for this example
>>> f = tempfile.NamedTemporaryFile(suffix='.mat')
Reassign to new var
>>> fname = f.name
Close the temporary named file
>>> f.close()
Open the new file in write mode
>>> o = pysal.open(fname,'w')
Write the Weights object into the open file
>>> o.write(w)
>>> o.close()
Read in the newly created mat file
>>> wnew = pysal.open(fname,'r').read()
Compare values from old to new
>>> wnew.pct_nonzero == w.pct_nonzero
True
Clean up temporary file created for this example
>>> os.remove(fname)
"""
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W):
try:
w = full(obj)[0]
except ValueError:
w = obj.sparse
sio.savemat(self.file, {'WEIGHT': w})
self.pos += 1
else:
raise TypeError("Expected a pysal weights object, got: %s" % (
type(obj)))
def close(self):
self.file.close()
FileIO.FileIO.close(self) | unknown | codeparrot/codeparrot-clean | ||
// @enableInferEventHandlers
import {useRef} from 'react';
// Simulates react-hook-form's handleSubmit or similar event handler wrappers
function handleSubmit<T>(callback: (data: T) => void) {
return (event: any) => {
event.preventDefault();
callback({} as T);
};
}
function Component() {
const ref = useRef<HTMLInputElement>(null);
const onSubmit = (data: any) => {
// This should be allowed: accessing ref.current in an event handler
// that's wrapped by handleSubmit and passed to onSubmit prop
if (ref.current !== null) {
console.log(ref.current.value);
}
};
return (
<>
<input ref={ref} />
<form onSubmit={handleSubmit(onSubmit)}>
<button type="submit">Submit</button>
</form>
</>
);
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{}],
}; | typescript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/allow-ref-access-in-event-handler-wrapper.tsx |
#if defined(__VSX__)
#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__)
#include <altivec.h>
#else
#error "OpenCV only supports little-endian mode"
#endif
#else
#error "VSX is not supported"
#endif
int main()
{
__vector float testF = vec_splats(0.f);
testF = vec_madd(testF, testF, testF);
return 0;
} | cpp | github | https://github.com/opencv/opencv | cmake/checks/cpu_vsx.cpp |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.Readable;
import java.util.EnumMap;
import java.util.Map;
/**
* Possible error codes.
*
* - {@link Errors#GROUP_AUTHORIZATION_FAILED}
* - {@link Errors#NOT_COORDINATOR}
* - {@link Errors#COORDINATOR_NOT_AVAILABLE}
* - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS}
* - {@link Errors#INVALID_REQUEST}
* - {@link Errors#INVALID_GROUP_ID}
* - {@link Errors#GROUP_ID_NOT_FOUND}
* - {@link Errors#TOPIC_AUTHORIZATION_FAILED}
*/
public class ConsumerGroupDescribeResponse extends AbstractResponse {
private final ConsumerGroupDescribeResponseData data;
public ConsumerGroupDescribeResponse(ConsumerGroupDescribeResponseData data) {
super(ApiKeys.CONSUMER_GROUP_DESCRIBE);
this.data = data;
}
@Override
public ConsumerGroupDescribeResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> counts = new EnumMap<>(Errors.class);
data.groups().forEach(
group -> updateErrorCounts(counts, Errors.forCode(group.errorCode()))
);
return counts;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public static ConsumerGroupDescribeResponse parse(Readable readable, short version) {
return new ConsumerGroupDescribeResponse(
new ConsumerGroupDescribeResponseData(readable, version)
);
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java |
#!/usr/bin/env python
#
# Converts the output of ans2csv.sh to a csv with a global ranking.
#
# The global ranking is computing according to the Minimum Feedback
# Arc Set solver described by Lopez (2012).
#
#
import sys
import csv
import os
from collections import namedtuple,defaultdict,Counter
from argparse import ArgumentParser
from csv import DictReader
# pip install BitVector
from BitVector import BitVector
import mfas_solver
# Output format
RankRow = namedtuple('RankRow', 'src_id sys_id rank')
class Ranking:
""" Semantics are the relation sysA "better than" sysB, except when
rank == 0, which indicates equality.
"""
def rank_to_int(self, rank):
""" Convert ans2csv.sh ranking values to __cmp__ values.
a<b indicates that a is better than b.
"""
if rank == '<':
return -1
elif rank == '>':
return 1
elif rank == '=':
return 0
else:
raise RuntimeError('Invalid ranking: ' + str(rank))
def __init__(self, src_id, sys1_id, sys2_id, rank):
self.src_id = int(src_id)
sys1_id = sys1_id
sys2_id = sys2_id
self.rank = self.rank_to_int(rank)
if self.rank < 0:
# A is better than B
self.sysA = sys1_id
self.sysB = sys2_id
self.rank *= -1
else:
# B is better than or equal to A
self.sysA = sys2_id
self.sysB = sys1_id
def __str__(self):
return '[src:%d sys1:%s sys2:%s rank:%d]' % (self.src_id,
self.sysA,
self.sysB,
self.rank)
def parse_answer_file(answer_file):
"""
Returns the following data structures:
segmentId -> list of rankings
Args:
Returns:
Raises:
"""
src2rank = defaultdict(list)
with open(answer_file) as infile:
for i,row in enumerate(DictReader(infile)):
ranking = Ranking(row.get('segmentId'),
row.get('system1'),
row.get('system2'),
row.get('cmp'))
src2rank[ranking.src_id].append(ranking)
sys.stderr.write('Read: %d rows%s' % (i, os.linesep))
return src2rank
def uncovered(bv):
""" Uncovered bits in a coverage set.
"""
return [i for i in xrange(len(bv)) if bv[i] == 0]
def make_rows(id_list, tie_with_prev=None):
""" Converts the sorted id list to a list
of RankedRow namedtuples for output.
Rankings are 1-indexed
Args:
Returns:
Raises:
"""
row_list = []
last_rank = 0
for i,sys_id in enumerate(id_list):
rank = i + 1
if tie_with_prev and tie_with_prev[i]:
rank = last_rank
row_list.append(RankRow(src_id=None,
sys_id=sys_id,
rank=rank))
last_rank = rank
return row_list
def mark_ties(ranking, edges):
""" TODO(spenceg): This is naive! Transitivity
is not checked.
Args:
Returns:
Raises:
"""
tie_with_prev = [False]*len(ranking)
for i in xrange(1,len(ranking)):
prev_v = ranking[i-1]
v = ranking[i]
if (prev_v,v) in edges and edges[(prev_v,v)] == 0:
tie_with_prev[i] = True
return tie_with_prev
def sort_tgts(ranking_list):
""" Use the ranking list to build a total ordering
of the ranked translations (indicated by Ranking.sys{1,2}_id
Args:
Returns:
Raises:
"""
# Aggregate ranking counts
di_edges = Counter()
eq_edges = Counter()
edge_counts = Counter()
vertices = set()
for ranking in ranking_list:
# print str(ranking)
vertices.add(ranking.sysA)
vertices.add(ranking.sysB)
edge = (ranking.sysA,ranking.sysB)
edge_counts[edge] += 1
if ranking.rank == 0:
eq_edges[edge] += 1
else:
di_edges[edge] += 1
# SPECIAL CASE: Equality
# TA. Lopez discarded this data as a pre-processing
# step. That is clearly bad since a single pairwise ranked judgment could
# subsume all equality judgments.
# Assert equality if equality is the majority pairwise judgment
# TODO(spenceg): The Lopez implementation returns different
# results if 0-weight edges are included in the tournament. Weird?
for (a,b),n_eq in eq_edges.iteritems():
n_eq += eq_edges[(b,a)]
total = edge_counts[(a,b)] + edge_counts[(b,a)]
perc_eq = float(n_eq) / float(total)
if perc_eq >= 0.5:
del di_edges[(a,b)]
del di_edges[(b,a)]
#if not (a,b) in di_edges:
# di_edges[(a,b)] = 0
#if not (b,a) in di_edges:
# di_edges[(a,b)] = 0
# Filter edges by only allowing one directed edge between
# vertices. Edge weights are non-negative, and indicate
# victories.
tournament = Counter()
for (a,b) in di_edges.keys():
ab_cost = di_edges[(a,b)]
assert ab_cost >= 0
if (b,a) in di_edges:
ba_cost = di_edges[(b,a)]
cost_diff = ab_cost - ba_cost
if cost_diff > 0:
tournament[(a,b)] = cost_diff
elif cost_diff < 0:
tournament[(b,a)] = -1 * cost_diff
else:
tournament[(a,b)] = ab_cost
# Generate the ranking
vertices = list(vertices)
# Call the reference Lopez implementation
ranking = mfas_solver.lopez_solver(tournament, vertices)
# Sanity check
assert len(ranking) == len(vertices)
# TODO(spenceg): Use the equality rankings as a post-processing
# step to declare ties? Lopez didn't do this.
#tie_with_prev = mark_ties(ranking, di_edges)
tie_with_prev=None
return make_rows(ranking, tie_with_prev)
def rank(answer_file):
""" Reads the input file and applies ranking. Results
are printed to stdout.
Args:
Returns:
Raises:
"""
# Build data structures
src2rank = parse_answer_file(answer_file)
# Iterate over each source sentence and rank
# Write to stdout
write_header = True
csv_out = csv.writer(sys.stdout)
for src_id in sorted(src2rank.keys()):
row_list = sort_tgts(src2rank[src_id])
for row in row_list:
if write_header:
csv_out.writerow(list(row._fields))
write_header = False
# sort_tgts does not set the src_id field
row = row._replace(src_id=src_id)
columns = [x for x in row._asdict().itervalues()]
csv_out.writerow(columns)
def main():
desc='Converts the output of wmtformat.py to a global ranking using the algorithm of Lopez (2012).'
parser = ArgumentParser(description=desc)
parser.add_argument('answer_csv',
help='Output of wmtformat.py')
args = parser.parse_args()
rank(args.answer_csv)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
Contributing to Elasticsearch
=============================
Elasticsearch is a free and open project and we love to receive contributions from our community — you! There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests or writing code which can be incorporated into Elasticsearch itself.
If you want to be rewarded for your contributions, sign up for the [Elastic Contributor Program](https://www.elastic.co/community/contributor). Each time you
make a valid contribution, you’ll earn points that increase your chances of winning prizes and being recognized as a top contributor.
Bug reports
-----------
If you think you have found a bug in Elasticsearch, first make sure that you are testing against the [latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch) - your issue may already have been fixed. If not, search our [issues list](https://github.com/elastic/elasticsearch/issues) on GitHub in case a similar issue has already been opened.
It is very helpful if you can prepare a reproduction of the bug. In other words, provide a small test case which we can run to confirm your bug. It makes it easier to find the problem and to fix it. Test cases should be provided as `curl` commands which we can copy and paste into a terminal to run it locally, for example:
```sh
# delete the index
curl -XDELETE localhost:9200/test
# insert a document
curl -XPUT localhost:9200/test/test/1 -d '{
"title": "test document"
}'
# this should return XXXX but instead returns YYY
curl ....
```
Provide as much information as you can. You may think that the problem lies with your query, when actually it depends on how your data is indexed. The easier it is for us to recreate your problem, the faster it is likely to be fixed.
Feature requests
----------------
If you find yourself wishing for a feature that doesn't exist in Elasticsearch, you are probably not alone. There are bound to be others out there with similar needs. Many of the features that Elasticsearch has today have been added because our users saw the need.
Open an issue on our [issues list](https://github.com/elastic/elasticsearch/issues) on GitHub which describes the feature you would like to see, why you need it, and how it should work.
Contributing code and documentation changes
-------------------------------------------
If you would like to contribute a new feature or a bug fix to Elasticsearch,
please discuss your idea first on the GitHub issue. If there is no GitHub issue
for your idea, please open one. It may be that somebody is already working on
it, or that there are particular complexities that you should know about before
starting the implementation. There are often a number of ways to fix a problem
and it is important to find the right approach before spending time on a PR
that cannot be merged.
We add the `help wanted` label to existing GitHub issues for which community
contributions are particularly welcome, and we use the `good first issue` label
to mark issues that we think will be suitable for new contributors.
We generally do not assign issues to contributors outside of Elastic, but
please check the discussion on the issue to see if anyone else is working on
the same topic. If there are other active contributors in the same area then it
is a good idea to reach out to them so you can work together on the issue. If
there hasn't been any discussion for a while then go ahead and start working on
the issue yourself.
The process for contributing to any of the [Elastic repositories](https://github.com/elastic/) is similar. Details for individual projects can be found below.
### Fork and clone the repository
You will need to fork the main Elasticsearch code or documentation repository and clone it to your local machine. See
[GitHub help page](https://help.github.com/articles/fork-a-repo) for help.
Further instructions for specific projects are given below.
### Tips for code changes
Following these tips prior to raising a pull request will speed up the review
cycle.
* Add appropriate unit tests (details on writing tests can be found in the
[TESTING](TESTING.asciidoc) file)
* Add integration tests, if applicable
* Make sure the code you add follows the [formatting guidelines](#java-language-formatting-guidelines)
* Lines that are not part of your change should not be edited (e.g. don't format
unchanged lines, don't reorder existing imports)
* Add the appropriate [license headers](#license-headers) to any new files
* For contributions involving the Elasticsearch build you can find details about the build setup in the
[BUILDING](BUILDING.md) file
### Submitting your changes
Once your changes and tests are ready to submit for review:
1. Test your changes
Run the test suite to make sure that nothing is broken. See the
[TESTING](TESTING.asciidoc) file for help running tests.
2. Sign the Contributor License Agreement
Please make sure you have signed our [Contributor License Agreement](https://www.elastic.co/contributor-agreement/). We are not asking you to assign copyright to us, but to give us the right to distribute your code without restriction. We ask this of all contributors in order to assure our users of the origin and continuing existence of the code. You only need to sign the CLA once.
3. Rebase your changes
Update your local repository with the most recent code from the main Elasticsearch repository, and rebase your branch on top of the latest main branch. We prefer your initial changes to be squashed into a single commit. Later, if we ask you to make changes, add them as separate commits. This makes them easier to review. As a final step before merging we will either ask you to squash all commits yourself or we'll do it for you.
4. Submit a pull request
Push your local changes to your forked copy of the repository and [submit a pull request](https://help.github.com/articles/using-pull-requests). In the pull request, choose a title which sums up the changes that you have made, and in the body provide more details about what your changes do. Also mention the number of the issue where discussion has taken place, eg "Closes #123".
Then sit back and wait. There will probably be discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into Elasticsearch. A yaml changelog entry will be automatically created, there is no need for external contributors to manually edit it, unless requested by the reviewer.
Please adhere to the general guideline that you should never force push
to a publicly shared branch. Once you have opened your pull request, you
should consider your branch publicly shared. Instead of force pushing
you can just add incremental commits; this is generally easier on your
reviewers. If you need to pick up changes from main, you can merge
main into your branch. A reviewer might ask you to rebase a
long-running pull request in which case force pushing is okay for that
request. Note that squashing at the end of the review process should
also not be done, that can be done when the pull request is [integrated
via GitHub](https://github.com/blog/2141-squash-your-commits).
Contributing to the Elasticsearch codebase
------------------------------------------
**Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch)
JDK 21 is required to build Elasticsearch. You must have a JDK 21 installation
with the environment variable `JAVA_HOME` referencing the path to Java home for
your JDK 21 installation.
Elasticsearch uses the Gradle wrapper for its build. You can execute Gradle
using the wrapper via the `gradlew` script on Unix systems or `gradlew.bat`
script on Windows in the root of the repository. The examples below show the
usage on Unix.
We support development in [IntelliJ IDEA] versions 2020.1 and onwards.
[Docker](https://docs.docker.com/install/) is required for building some Elasticsearch artifacts and executing certain test suites. You can run Elasticsearch without building all the artifacts with:
./gradlew :run
That'll spend a while building Elasticsearch and then it'll start Elasticsearch,
writing its log above Gradle's status message. We log a lot of stuff on startup,
specifically these lines tell you that Elasticsearch is ready:
[2020-05-29T14:50:35,167][INFO ][o.e.h.AbstractHttpServerTransport] [runTask-0] publish_address {127.0.0.1:9200}, bound_addresses {[::1]:9200}, {127.0.0.1:9200}
[2020-05-29T14:50:35,169][INFO ][o.e.n.Node ] [runTask-0] started
But to be honest it's typically easier to wait until the console stops scrolling
and then run `curl` in another window like this:
curl -u elastic:password localhost:9200
To send requests to this Elasticsearch instance, either use the built-in `elastic`
user and password as above or use the pre-configured `elastic-admin` user:
curl -u elastic-admin:elastic-password localhost:9200
Security can also be disabled altogether:
./gradlew :run -Dtests.es.xpack.security.enabled=false
The definition of this Elasticsearch cluster can be found [here](build-tools-internal/src/main/groovy/elasticsearch.run.gradle).
### Importing the project into IntelliJ IDEA
The minimum IntelliJ IDEA version required to import the Elasticsearch project is 2020.1.
Elasticsearch builds using Java 21. When importing into IntelliJ you will need
to define an appropriate SDK. The convention is that **this SDK should be named
"21"** so that the project import will detect it automatically. For more details
on defining an SDK in IntelliJ please refer to [their documentation](https://www.jetbrains.com/help/idea/sdk.html#define-sdk).
SDK definitions are global, so you can add the JDK from any project, or after
project import. Importing with a missing JDK will still work, IntelliJ will
simply report a problem and will refuse to build until resolved.
You can import the Elasticsearch project into IntelliJ IDEA via:
- Select **File > Open**
- In the subsequent dialog navigate to the root `build.gradle` file
- In the subsequent dialog select **Open as Project**
#### Checkstyle
IntelliJ should automatically configure checkstyle. It does so by running
`configureIdeCheckstyle` on import. That makes `.idea/checkstyle-idea.xml`
configuration file. IntelliJ points checkstyle at that.
Things like `./gradlew clean` or `git clean -xdf` can nuke the file. You can
regenerate it by running `./gradlew -Didea.active=true configureIdeCheckstyle`,
but generally shouldn't have to.
#### Formatting
Elasticsearch code is automatically formatted with [Spotless], backed by the
Eclipse formatter. You can do the same in IntelliJ with the
[Eclipse Code Formatter] so that you can apply the correct formatting directly in
your IDE. The configuration for the plugin is held in
`.idea/eclipseCodeFormatter.xml` and should be automatically applied, but manual
instructions are below in case you need them.
1. Open **Preferences > Other Settings > Eclipse Code Formatter**
2. Click "Use the Eclipse Code Formatter"
3. Under "Eclipse formatter config", select "Eclipse workspace/project
folder or config file"
4. Click "Browse", and navigate to the file `build-conventions/formatterConfig.xml`
5. **IMPORTANT** - make sure "Optimize Imports" is **NOT** selected.
6. Click "OK"
7. Optional: If you like to format code changes on save automatically, open
**Preferences > Tools > Actions on Save** and check "Reformat Code", making sure to
configure Java files.
Alternative manual steps for IntelliJ.
1. Open **File > Settings/Preferences > Code Style > Java**
2. Gear icon > Import Scheme > Eclipse XML Profile
3. Navigate to the file `build-conventions/formatterConfig.xml`
4. Click "OK"
#### Options
When importing to IntelliJ, we offer a few options that can be used to
configure the behaviour of the import:
| Property | Description | Values (* = default) |
|--------------------------------------------|------------------------------------------------------------------------------------------------------|----------------------|
| `org.elasticsearch.idea-configuration-cache` | Should IntelliJ enable the Gradle Configuration cache to speed up builds when generating run configs | *`true`, `false` |
| `org.elasticsearch.idea-delegate-to-gradle` | Should IntelliJ use Gradle for all generated run / test configs or prompt each time | `true`, *`false` |
These options can be set anywhere on the Gradle config path including in `~/.gradle/gradle.properties`
### REST endpoint conventions
Elasticsearch typically uses singular nouns rather than plurals in URLs.
For example:
/_ingest/pipeline
/_ingest/pipeline/{id}
but not:
/_ingest/pipelines
/_ingest/pipelines/{id}
You may find counterexamples, but new endpoints should use the singular
form.
### Java language formatting guidelines
Java files in the Elasticsearch codebase are automatically formatted using
the [Spotless Gradle] plugin. All new projects are automatically formatted,
while existing projects are gradually being opted-in. The formatting check
is run automatically via the `precommit` task, but it can be run explicitly with:
./gradlew spotlessJavaCheck
It is usually more useful, and just as fast, to just reformat the project. You
can do this with:
./gradlew spotlessApply
These tasks can also be run for specific subprojects, e.g.
./gradlew server:spotlessJavaCheck
Please follow these formatting guidelines:
* Java indent is 4 spaces
* Line width is 140 characters
* Lines of code surrounded by `// tag::NAME` and `// end::NAME` comments are included
in the documentation and should only be 76 characters wide not counting
leading indentation. Such regions of code are not formatted automatically as
it is not possible to change the line length rule of the formatter for
part of a file. Please format such sections sympathetically with the rest
of the code, while keeping lines to maximum length of 76 characters.
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause
the build to fail.
* If *absolutely* necessary, you can disable formatting for regions of code
with the `// tag::noformat` and `// end::noformat` directives, but
only do this where the benefit clearly outweighs the decrease in formatting
consistency.
* Note that Javadoc and block comments i.e. `/* ... */` are not formatted,
but line comments i.e. `// ...` are.
* Negative boolean expressions must use the form `foo == false` instead of
`!foo` for better readability of the code. This is enforced via
Checkstyle. Conversely, you should not write e.g. `if (foo == true)`, but
just `if (foo)`.
#### Editor / IDE support
IntelliJ IDEs can
[import](https://blog.jetbrains.com/idea/2014/01/intellij-idea-13-importing-code-formatter-settings-from-eclipse/)
the same settings file, and / or use the [Eclipse Code Formatter] plugin.
You can also tell Spotless to [format a specific
file](https://github.com/diffplug/spotless/tree/main/plugin-gradle#can-i-apply-spotless-to-specific-files)
from the command line.
### Javadoc
Good Javadoc can help with navigating and understanding code. Elasticsearch
has some guidelines around when to write Javadoc and when not to, but note
that we don't want to be overly prescriptive. The intent of these guidelines
is to be helpful, not to turn writing code into a chore.
#### The short version
1. Always add Javadoc to new code.
2. Add Javadoc to existing code if you can.
3. Document the "why", not the "how", unless that's important to the
"why".
4. Don't document anything trivial or obvious (e.g. getters and
setters). In other words, the Javadoc should add some value.
#### The long version
1. If you add a new Java package, please also add package-level
Javadoc that explains what the package is for. This can just be a
reference to a more foundational / parent package if appropriate. An
example would be a package hierarchy for a new feature or plugin -
the package docs could explain the purpose of the feature, any
caveats, and possibly some examples of configuration and usage.
2. New classes and interfaces must have class-level Javadoc that
describes their purpose. There are a lot of classes in the
Elasticsearch repository, and it's easier to navigate when you
can quickly find out what is the purpose of a class. This doesn't
apply to inner classes or interfaces, unless you expect them to be
explicitly used outside their parent class.
3. New public methods must have Javadoc, because they form part of the
contract between the class and its consumers. Similarly, new abstract
methods must have Javadoc because they are part of the contract
between a class and its subclasses. It's important that contributors
know why they need to implement a method, and the Javadoc should make
this clear. You don't need to document a method if it's overriding an
abstract method (either from an abstract superclass or an interface),
unless your implementation is doing something "unexpected" e.g. deviating
from the intent of the original method.
4. Following on from the above point, please add docs to existing public
methods if you are editing them, or to abstract methods if you can.
5. Non-public, non-abstract methods don't require Javadoc, but if you feel
that adding some would make it easier for other developers to
understand the code, or why it's written in a particular way, then please
do so.
6. Properties don't need to have Javadoc, but please add some if there's
something useful to say.
7. Javadoc should not go into low-level implementation details unless
this is critical to understanding the code e.g. documenting the
subtleties of the implementation of a private method. The point here
is that implementations will change over time, and the Javadoc is
less likely to become out-of-date if it only talks about
the purpose of the code, not what it does.
8. Examples in Javadoc can be very useful, so feel free to add some if
you can reasonably do so i.e. if it takes a whole page of code to set
up an example, then Javadoc probably isn't the right place for it.
Longer or more elaborate examples are probably better suited
to the package docs.
9. Test methods are a good place to add Javadoc, because you can use it
to succinctly describe e.g. preconditions, actions and expectations
of the test, more easily that just using the test name alone. Please
consider documenting your tests in this way.
10. Sometimes you shouldn't add Javadoc:
1. Where it adds no value, for example where a method's
implementation is trivial such as with getters and setters, or a
method just delegates to another object.
2. However, you should still add Javadoc if there are caveats around
calling a method that are not immediately obvious from reading the
method's implementation in isolation.
3. You can omit Javadoc for simple classes, e.g. where they are a
simple container for some data. However, please consider whether a
reader might still benefit from some additional background, for
example about why the class exists at all.
11. Not all comments need to be Javadoc. Sometimes it will make more
sense to add comments in a method's body, for example due to important
implementation decisions or "gotchas". As a general guide, if some
information forms part of the contract between a method and its callers,
then it should go in the Javadoc, otherwise you might consider using
regular comments in the code. Remember as well that Elasticsearch
has extensive [user documentation](./docs), and it is not the role
of Javadoc to replace that.
* If a method's performance is "unexpected" then it's good to call that
out in the Javadoc. This is especially helpful if the method is usually fast but sometimes
very slow (shakes fist at caching).
12. Please still try to make class, method or variable names as
descriptive and concise as possible, as opposed to relying solely on
Javadoc to describe something.
13. Use `@link` to add references to related resources in the codebase. Or
outside the code base.
1. `@see` is much more limited than `@link`. You can use it but most of
the time `@link` flows better.
14. If you need help writing Javadoc, just ask!
Finally, use your judgement! Base your decisions on what will help other
developers - including yourself, when you come back to some code
3 months in the future, having forgotten how it works.
### License headers
We require license headers on all Java files. With the exception of the
top-level `x-pack` directory, all contributed code should have the following
license header unless instructed otherwise:
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
The top-level `x-pack` directory contains code covered by the [Elastic
license](licenses/ELASTIC-LICENSE-2.0.txt). Community contributions to this code are
welcome, and should have the following license header unless instructed
otherwise:
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
It is important that the only code covered by the Elastic licence is contained
within the top-level `x-pack` directory. The build will fail its pre-commit
checks if contributed code does not have the appropriate license headers.
> [!NOTE]
> If you have imported the project into IntelliJ IDEA the project will
> be automatically configured to add the correct license header to new source
> files based on the source location.
### Type-checking, generics and casting
You should try to write code that does not require suppressing any warnings from
the compiler, e.g. suppressing type-checking, raw generics, and so on. However,
this isn't always possible or practical. In such cases, you should use the
`@SuppressWarnings` annotations to silence the compiler warning, trying to keep
the scope of the suppression as small as possible. Where a piece of code
requires a lot of suppressions, it may be better to apply a single suppression
at a higher level e.g. at the method or even class level. Use your judgement.
There are also cases where the compiler simply refuses to accept an assignment
or cast of any kind, because it lacks the information to know that the types are
OK. In such cases, you can use
the [`Types.forciblyCast`](libs/core/src/main/java/org/elasticsearch/core/Types.java)
utility method. As the name suggests, you can coerce any type to any other type,
so please use it as a last resort.
### Logging
The Elasticsearch server logs are vitally useful for diagnosing problems in a
running cluster. You should make sure that your contribution uses logging
appropriately: log enough detail to inform users about key events and help them
understand what happened when things go wrong without logging so much detail
that the logs fill up with noise and the useful signal is lost.
Elasticsearch uses Log4J for logging. In most cases you should log via a
`Logger` named after the class that is writing the log messages, which you can
do by declaring a static field of the class. For example:
class Foo {
private static final Logger logger = LogManager.getLogger(Foo.class);
}
In rare situations you may want to configure your `Logger` slightly
differently, perhaps specifying a different class or maybe using one of the
methods on `org.elasticsearch.common.logging.Loggers` instead.
If the log message includes values from your code then you must use
placeholders rather than constructing the string yourself using simple
concatenation. Consider wrapping the values in `[...]` to help distinguish them
from the static part of the message:
logger.debug("operation failed [{}] times in [{}]ms", failureCount, elapsedMillis);
You can also pass in an exception to log it including its stack trace, and any
causes and their causes, as well as any suppressed exceptions and so on:
logger.debug("operation failed", exception);
If you wish to use placeholders and an exception at the same time, construct a
`Supplier<String>` and use `org.elasticsearch.core.Strings.format`
- note java.util.Formatter syntax
logger.debug(() -> Strings.format("failed at offset [%s]", offset), exception);
You can also use a `java.util.Supplier<String>` to avoid constructing
expensive messages that will usually be discarded:
logger.debug(() -> "rarely seen output [" + expensiveMethod() + "]");
Logging is an important behaviour of the system and sometimes deserves its own
unit tests, especially if there is complex logic for computing what is logged
and when to log it. You can use a `org.elasticsearch.test.MockLog` to
make assertions about the logs that are being emitted.
Logging is a powerful diagnostic technique, but it is not the only possibility.
You should also consider exposing some information about your component via an
API instead of in logs. For instance, you can implement APIs to report its
current status, various statistics, and maybe even details of recent failures.
#### Log levels
Each log message is written at a particular _level_. By default, Elasticsearch
will suppress messages at the two most verbose levels, `TRACE` and `DEBUG`, and
will output messages at all other levels. Users can configure which levels of
message are written by each logger at runtime, but you should expect everyone
to run with the default configuration almost all the time and choose your
levels accordingly.
The guidance in this section is subjective in some areas. When in doubt,
discuss your choices with reviewers.
##### `TRACE`
This is the most verbose level, disabled by default, and it is acceptable if it
generates a very high volume of logs. The target audience of `TRACE` logs
comprises developers who are trying to deeply understand some unusual runtime
behaviour of a system. For instance `TRACE` logs may be useful when
understanding an unexpected interleaving of concurrent actions or some
unexpected consequences of a delayed response from a remote node.
`TRACE` logs will normally only make sense when read alongside the code, and
typically they will be read as a whole sequence of messages rather than in
isolation. For example, the `InternalClusterInfoService` uses `TRACE` logs to
record certain key events in its periodic refresh process:
logger.trace("starting async refresh");
// ...
logger.trace("received node stats response");
// ...
logger.trace("received indices stats response");
// ...
logger.trace("stats all received, computing cluster info and notifying listeners");
// ...
logger.trace("notifying [{}] of new cluster info", listener);
Even though `TRACE` logs may be very verbose, you should still exercise some
judgement when deciding when to use them. In many cases it will be easier to
understand the behaviour of the system using tests or by analysing the code
itself rather than by trawling through hundreds of trivial log messages.
It may not be easy, or even possible, to obtain `TRACE` logs from a production
system. Therefore they are not appropriate for information that you would
normally expect to be useful in diagnosing problems in production.
##### `DEBUG`
This is the next least verbose level and is also disabled by default. The
target audience of this level typically comprises users or developers who are
trying to diagnose an unexpected problem in a production system, perhaps to
help determine whether a fault lies within Elasticsearch or elsewhere.
Users should expect to be able to enable `DEBUG` logging on their production
systems for a whole subsystem for an extended period of time without
overwhelming the system or filling up their disks with logs, so it is important
to limit the volume of messages logged at this level. On the other hand, these
messages must still provide enough detail to diagnose the sorts of problems
that you expect Elasticsearch to encounter. In some cases it works well to
collect information over a period of time and then log a complete summary,
rather than recording every step of a process in its own message.
For example, the `Coordinator` uses `DEBUG` logs to record a change in mode,
including various internal details for context, because this event is fairly
rare but not important enough to notify users by default:
logger.debug(
"{}: coordinator becoming CANDIDATE in term {} (was {}, lastKnownLeader was [{}])",
method,
getCurrentTerm(),
mode,
lastKnownLeader
);
It's possible that the reader of `DEBUG` logs is also reading the code, but
that is less likely than for `TRACE` logs. Strive to avoid terminology that
only makes sense when reading the code, and also aim for messages at this level
to be self-contained rather than intending them to be read as a sequence.
It's often useful to log exceptions and other deviations from the "happy path"
at `DEBUG` level. Exceptions logged at `DEBUG` should generally include the
complete stack trace.
##### `INFO`
This is the next least verbose level, and the first level that is enabled by
default. It is appropriate for recording important events in the life of the
cluster, such as an index being created or deleted or a snapshot starting or
completing. Users will mostly ignore log messages at `INFO` level, but may use
these messages to construct a high-level timeline of events leading up to an
incident.
For example, the `MetadataIndexTemplateService` uses `INFO` logs to record when
an index template is created or updated:
logger.info(
"{} index template [{}] for index patterns {}",
existing == null ? "adding" : "updating",
name,
template.indexPatterns()
);
`INFO`-level logging is enabled by default so its target audience is the
general population of users and administrators. You should use user-facing
terminology and ensure that messages at this level are self-contained. In
general, you shouldn't log unusual events, particularly exceptions with stack
traces, at `INFO` level. If the event is relatively benign then use `DEBUG`,
whereas if the user should be notified then use `WARN`.
Bear in mind that users will be reading the logs when they're trying to
determine why their node is not behaving the way they expect. If a log message
sounds like an error then some users will interpret it as one, even if it is
logged at `INFO` level. Where possible, `INFO` messages should prefer factual
over judgemental language, for instance saying `Did not find ...` rather than
`Failed to find ...`.
##### `WARN`
This is the next least verbose level, and is also enabled by default. Ideally a
healthy cluster will emit no `WARN`-level logs, but this is the appropriate
level for recording events that the cluster administrator should investigate,
or which indicate a bug. Some production environments require the cluster to
emit no `WARN`-level logs during acceptance testing, so you must ensure that
any logs at this level really do indicate a problem that needs addressing.
As with the `INFO` level, you should use user-facing terminology at the `WARN`
level, and also ensure that messages are self-contained. Strive to make them
actionable too since you should be logging at this level when the user should
take some investigative action.
For example, the `DiskThresholdMonitor` uses `WARN` logs to record that a disk
threshold has been breached:
logger.warn(
"flood stage disk watermark [{}] exceeded on {}, all indices on this node will be marked read-only",
diskThresholdSettings.describeFloodStageThreshold(total, false),
usage
);
Unlike at the `INFO` level, it is often appropriate to log an exception,
complete with stack trace, at `WARN` level. Although the stack trace may not be
useful to the user, it may contain information that is vital for a developer to
fully understand the problem and its wider context.
In a situation where occasional transient failures are expected and handled,
but a persistent failure requires the user's attention, consider implementing a
mechanism to detect that a failure is unacceptably persistent and emit a
corresponding `WARN` log. For example, it may be helpful to log every tenth
consecutive failure at `WARN` level, or log at `WARN` if an operation has not
completed within a certain time limit. This is much more user-friendly than
failing persistently and silently by default and requiring the user to enable
`DEBUG` logging to investigate the problem.
If an exception occurs as a direct result of a request received from a client
then it should only be logged as a `WARN` if the server administrator is the
right person to address it. In most cases the server administrator cannot do
anything about faulty client requests, and the person running the client is
often unable to see the server logs, so you should include the exception in the
response back to the client and not log a warning. Bear in mind that clients
may submit requests at a high rate, so any per-request logging can easily flood
the logs.
##### `ERROR`
This is the next least verbose level after `WARN`. In theory, it is possible for
users to suppress messages at `WARN` and below, believing this to help them
focus on the most important `ERROR` messages, but in practice in Elasticsearch
this will hide so much useful information that the resulting logs will be
useless, so we do not expect users to do this kind of filtering.
On the other hand, users may be familiar with the `ERROR` level from elsewhere.
Log4J for instance documents this level as meaning "an error in the
application, possibly recoverable". The implication here is that the error is
possibly _not_ recoverable too, and we do encounter users that get very worried
by logs at `ERROR` level for this reason.
Therefore you should try and avoid logging at `ERROR` level unless the error
really does indicate that Elasticsearch is now running in a degraded state from
which it will not recover. For instance, the `FsHealthService` uses `ERROR`
logs to record that the data path failed some basic health checks and hence the
node cannot continue to operate as a member of the cluster:
logger.error(() -> "health check of [" + path + "] failed", ex);
Errors like this should be very rare. When in doubt, prefer `WARN` to `ERROR`.
### Versioning Elasticsearch
There are various concepts used to identify running node versions,
and the capabilities and compatibility of those nodes. For more information,
see `docs/internal/Versioning.md`
### Creating a distribution
Run all build commands from within the root directory:
cd elasticsearch/
To build a darwin-tar distribution, run this command:
./gradlew -p distribution/archives/darwin-tar assemble
You will find the distribution under:
./distribution/archives/darwin-tar/build/distributions/
To create all build artifacts (e.g., plugins and Javadocs) as well as
distributions in all formats, run this command:
./gradlew assemble
> **NOTE:** Running the task above will fail if you don't have an available
> Docker installation.
The package distributions (Debian and RPM) can be found under:
./distribution/packages/(deb|rpm|oss-deb|oss-rpm)/build/distributions/
The archive distributions (tar and zip) can be found under:
./distribution/archives/(darwin-tar|linux-tar|windows-zip|oss-darwin-tar|oss-linux-tar|oss-windows-zip)/build/distributions/
### Running the full test suite
Before submitting your changes, run the test suite to make sure that nothing is broken, with:
./gradlew check
If your changes affect only the documentation, run:
./gradlew -p docs check
For more information about testing code examples in the documentation, see
https://github.com/elastic/elasticsearch/blob/main/docs/README.asciidoc
### Only running failed tests
When you open your pull-request it may be approved for review. If so, the full
test suite is run within Elasticsearch's CI environment. If a test fails,
you can see how to run that particular test by searching for the `REPRODUCE`
string in the CI's console output.
Elasticsearch's testing suite takes advantage of randomized testing. Consequently,
a test that passes locally, may actually fail later due to random settings
or data input. To make tests repeatable, a `REPRODUCE` line in CI will also include
the `-Dtests.seed` parameter.
When running locally, Gradle does its best to take advantage of cached results.
So, if the code is unchanged, running the same test with the same `-Dtests.seed`
repeatedly may not actually run the test if it has passed with that seed
in the previous execution. A way around this is to pass a separate parameter
to adjust the command options seen by Gradle.
A simple option may be to add the parameter `-Dtests.timestamp=$(date +%s)`
which will give the current time stamp as a parameter, thus making the parameters
sent to Gradle unique and bypassing the cache.
### Project layout
This repository is split into many top level directories. The most important
ones are:
#### `docs`
Documentation for the project.
#### `distribution`
Builds our tar and zip archives and our rpm and deb packages.
#### `libs`
Libraries used to build other parts of the project. These are meant to be
internal rather than general purpose. We have no plans to
[semver](https://semver.org/) their APIs or accept feature requests for them.
We publish them to Maven Central because they are dependencies of our plugin
test framework, high level rest client, and jdbc driver, but they really aren't
general purpose enough to *belong* in Maven Central. We're still working out
what to do here.
#### `modules`
Features that are shipped with Elasticsearch by default but are not built in to
the server. We typically separate features from the server because they require
permissions that we don't believe *all* of Elasticsearch should have or because
they depend on libraries that we don't believe *all* of Elasticsearch should
depend on.
For example, reindex requires the `connect` permission so it can perform
reindex-from-remote, but we don't believe that the *all* of Elasticsearch should
have the "connect". For another example, Painless is implemented using antlr4
and asm and we don't believe that *all* of Elasticsearch should have access to
them.
#### `plugins`
Officially supported plugins to Elasticsearch. We decide that a feature should
be a plugin rather than shipped as a module because we feel that it is only
important to a subset of users, especially if it requires extra dependencies.
The canonical example of this is the ICU analysis plugin. It is important for
folks who want the fairly language neutral ICU analyzer but the library to
implement the analyzer is 11MB so we don't ship it with Elasticsearch by
default.
Another example is the `discovery-gce` plugin. It is *vital* to folks running
in [GCP](https://cloud.google.com/) but useless otherwise and it depends on a
dozen extra jars.
#### `qa`
Honestly this is kind of in flux and we're not 100% sure where we'll end up.
Right now the directory contains
* Tests that require multiple modules or plugins to work
* Tests that form a cluster made up of multiple versions of Elasticsearch like
full cluster restart, rolling restarts, and mixed version tests
* Tests that test the Elasticsearch clients in "interesting" places like the
`wildfly` project.
* Tests that test Elasticsearch in funny configurations like with ingest
disabled
* Tests that need to do strange things like install plugins that thrown
uncaught `Throwable`s or add a shutdown hook
But we're not convinced that all of these things *belong* in the qa directory.
We're fairly sure that tests that require multiple modules or plugins to work
should just pick a "home" plugin. We're fairly sure that the multi-version
tests *do* belong in qa. Beyond that, we're not sure. If you want to add a new
qa project, open a PR and be ready to discuss options.
#### `server`
The server component of Elasticsearch that contains all of the modules and
plugins. Right now things like the high level rest client depend on the server,
but we'd like to fix that in the future.
#### `test`
Our test framework and test fixtures. We use the test framework for testing the
server, the plugins, and modules, and pretty much everything else. We publish
the test framework so folks who develop Elasticsearch plugins can use it to
test the plugins. The test fixtures are external processes that we start before
running specific tests that rely on them.
For example, we have an hdfs test that uses mini-hdfs to test our
repository-hdfs plugin.
#### `x-pack`
Commercially licensed code that integrates with the rest of Elasticsearch. The
`docs` subdirectory functions just like the top level `docs` subdirectory and
the `qa` subdirectory functions just like the top level `qa` subdirectory. The
`plugin` subdirectory contains the x-pack module which runs inside the
Elasticsearch process.
### Gradle build
We use Gradle to build Elasticsearch because it is flexible enough to not only
build and package Elasticsearch, but also orchestrate all of the ways that we
have to test Elasticsearch.
#### Configurations
Gradle organizes dependencies and build artifacts into "configurations" and
allows you to use these configurations arbitrarily. Here are some of the most
common configurations in our build and how we use them:
<dl>
<dt>`implementation`</dt><dd>Dependencies that are used by the project
at compile and runtime but are not exposed as a compile dependency to other dependent projects.
Dependencies added to the `implementation` configuration are considered an implementation detail
that can be changed at a later date without affecting any dependent projects.</dd>
<dt>`api`</dt><dd>Dependencies that are used as compile and runtime dependencies of a project
and are considered part of the external api of the project.</dd>
<dt>`runtimeOnly`</dt><dd>Dependencies that not on the classpath at compile time but
are on the classpath at runtime. We mostly use this configuration to make sure that
we do not accidentally compile against dependencies of our dependencies also
known as "transitive" dependencies".</dd>
<dt>`compileOnly`</dt><dd>Code that is on the classpath at compile time but that
should not be shipped with the project because it is "provided" by the runtime
somehow. Elasticsearch plugins use this configuration to include dependencies
that are bundled with Elasticsearch's server.</dd>
<dt>`testImplementation`</dt><dd>Code that is on the classpath for compiling tests
that are part of this project but not production code. The canonical example
of this is `junit`.</dd>
</dl>
Reviewing and accepting your contribution
-----------------------------------------
We review every contribution carefully to ensure that the change is of high
quality and fits well with the rest of the Elasticsearch codebase. If accepted,
we will merge your change and usually take care of backporting it to
appropriate branches ourselves.
We really appreciate everyone who is interested in contributing to
Elasticsearch and regret that we sometimes have to reject contributions even
when they might appear to make genuine improvements to the system. Reviewing
contributions can be a very time-consuming task, yet the team is small and our
time is very limited. In some cases the time we would need to spend on reviews
would outweigh the benefits of a change by preventing us from working on other
more beneficial changes instead.
Please discuss your change in a GitHub issue before spending much time on its
implementation. We sometimes have to reject contributions that duplicate other
efforts, take the wrong approach to solving a problem, or solve a problem which
does not need solving. An up-front discussion often saves a good deal of wasted
time in these cases.
We normally immediately reject isolated PRs that only perform simple
refactorings or otherwise "tidy up" certain aspects of the code. We think the
benefits of this kind of change are very small, and in our experience it is not
worth investing the substantial effort needed to review them. This especially
includes changes suggested by tools.
We normally immediately reject PRs which target platforms or system
configurations that are not in the [official support
matrix](https://www.elastic.co/support/matrix). We choose to support particular
platforms with care because we must work to ensure that every Elasticsearch
release works completely on every platform, and we must spend time
investigating test failures and performance regressions there too. We cannot
determine whether PRs which target unsupported platforms or configurations meet
our quality standards, nor can we guarantee that the change they introduce will
continue to work in future releases. We do not want Elasticsearch to suddenly
stop working on a particular platform after an upgrade.
We sometimes reject contributions due to the low quality of the submission
since low-quality submissions tend to take unreasonable effort to review
properly. Quality is rather subjective so it is hard to describe exactly how to
avoid this, but there are some basic steps you can take to reduce the chances
of rejection. Follow the guidelines listed above when preparing your changes.
You should add tests that correspond with your changes, and your PR should pass
affected test suites too. It makes it much easier to review if your code is
formatted correctly and does not include unnecessary extra changes.
We sometimes reject contributions if we find ourselves performing many review
iterations without making enough progress. Some iteration is expected,
particularly on technically complicated changes, and there's no fixed limit on
the acceptable number of review cycles since it depends so much on the nature
of the change. You can help to reduce the number of iterations by reviewing
your contribution yourself or in your own team before asking us for a review.
You may be surprised how many comments you can anticipate and address by taking
a short break and then carefully looking over your changes again.
We expect you to follow up on review comments somewhat promptly, but recognise
that everyone has many priorities for their time and may not be able to respond
for several days. We will understand if you find yourself without the time to
complete your contribution, but please let us know that you have stopped
working on it. We will try to send you a reminder if we haven't heard from you
in a while, but may end up closing your PR if you do not respond for too long.
If your contribution is rejected we will close the pull request with a comment
explaining why. This decision isn't always final: if you feel we have
misunderstood your intended change or otherwise think that we should reconsider
then please continue the conversation with a comment on the pull request and
we'll do our best to address any further points you raise.
Contributing as part of a class
-------------------------------
In general Elasticsearch is happy to accept contributions that were created as
part of a class but strongly advise against making the contribution as part of
the class. So if you have code you wrote for a class feel free to submit it.
Please, please, please do not assign contributing to Elasticsearch as part of a
class. If you really want to assign writing code for Elasticsearch as an
assignment then the code contributions should be made to your private clone and
opening PRs against the primary Elasticsearch clone must be optional, fully
voluntary, not for a grade, and without any deadlines.
Because:
* While the code review process is likely very educational, it can take wildly
varying amounts of time depending on who is available, where the change is, and
how deep the change is. There is no way to predict how long it will take unless
we rush.
* We do not rush reviews without a very, very good reason. Class deadlines
aren't a good enough reason for us to rush reviews.
* We deeply discourage opening a PR you don't intend to work through the entire
code review process because it wastes our time.
* We don't have the capacity to absorb an entire class full of new contributors,
especially when they are unlikely to become long time contributors.
Finally, we require that you run `./gradlew check` before submitting a
non-documentation contribution. This is mentioned above, but it is worth
repeating in this section because it has come up in this context.
[IntelliJ IDEA]: https://www.jetbrains.com/idea/
[Checkstyle]: https://plugins.jetbrains.com/plugin/1065-checkstyle-idea
[Spotless]: https://github.com/diffplug/spotless
[Eclipse Code Formatter]: https://plugins.jetbrains.com/plugin/6546-eclipse-code-formatter
[Spotless Gradle]: https://github.com/diffplug/spotless/tree/main/plugin-gradle | unknown | github | https://github.com/elastic/elasticsearch | CONTRIBUTING.md |
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.format.yamlf module."""
from __future__ import print_function
from __future__ import unicode_literals
import logging
from rptk.format import BaseFormat
import yaml
try:
from yaml import CDumper as Dumper
except ImportError as e: # pragma: no cover
logging.getLogger(__name__).warning("%s: falling back to python dumper", e)
from yaml import Dumper
class YamlFormat(BaseFormat):
"""Renders result object as a YAML document."""
description = "YAML object representation"
content_type = "application/x-yaml"
def format(self, result=None):
"""Render output as YAML."""
self.log_method_enter(method=self.current_method)
super(self.__class__, self).format(result=result)
self.log.debug(msg="creating json output")
try:
output = yaml.dump(result, Dumper=Dumper, indent=4,
explicit_start=True, explicit_end=True,
default_flow_style=False)
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self.log_method_exit(method=self.current_method)
return output | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.MockTime;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Timer;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TimedRequestStateTest {
private static final long DEFAULT_TIMEOUT_MS = 30000;
private final Time time = new MockTime();
@Test
public void testIsExpired() {
TimedRequestState state = new TimedRequestState(
new LogContext(),
this.getClass().getSimpleName(),
100,
1000,
time.timer(DEFAULT_TIMEOUT_MS)
);
assertFalse(state.isExpired());
time.sleep(DEFAULT_TIMEOUT_MS);
assertTrue(state.isExpired());
}
@Test
public void testRemainingMs() {
TimedRequestState state = new TimedRequestState(
new LogContext(),
this.getClass().getSimpleName(),
100,
1000,
time.timer(DEFAULT_TIMEOUT_MS)
);
assertEquals(DEFAULT_TIMEOUT_MS, state.remainingMs());
time.sleep(DEFAULT_TIMEOUT_MS);
assertEquals(0, state.remainingMs());
}
@Test
public void testDeadlineTimer() {
long deadlineMs = time.milliseconds() + DEFAULT_TIMEOUT_MS;
Timer timer = TimedRequestState.deadlineTimer(time, deadlineMs);
assertEquals(DEFAULT_TIMEOUT_MS, timer.remainingMs());
timer.sleep(DEFAULT_TIMEOUT_MS);
assertEquals(0, timer.remainingMs());
}
@Test
public void testAllowOverdueDeadlineTimer() {
long deadlineMs = time.milliseconds() - DEFAULT_TIMEOUT_MS;
Timer timer = TimedRequestState.deadlineTimer(time, deadlineMs);
assertEquals(0, timer.remainingMs());
}
@Test
public void testToStringUpdatesTimer() {
TimedRequestState state = new TimedRequestState(
new LogContext(),
this.getClass().getSimpleName(),
100,
1000,
time.timer(DEFAULT_TIMEOUT_MS)
);
assertToString(state, DEFAULT_TIMEOUT_MS);
time.sleep(DEFAULT_TIMEOUT_MS);
assertToString(state, 0);
}
private void assertToString(TimedRequestState state, long timerMs) {
assertTrue(state.toString().contains("remainingMs=" + timerMs + "}"));
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/clients/consumer/internals/TimedRequestStateTest.java |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
Checks a policy_templates.json file for conformity to its syntax specification.
'''
import json
import optparse
import os
import re
import sys
LEADING_WHITESPACE = re.compile('^([ \t]*)')
TRAILING_WHITESPACE = re.compile('.*?([ \t]+)$')
# Matches all non-empty strings that contain no whitespaces.
NO_WHITESPACE = re.compile('[^\s]+$')
# Convert a 'type' to the schema types it may be converted to.
# The 'dict' type represents structured JSON data, and can be converted
# to an 'object' or an 'array'.
TYPE_TO_SCHEMA = {
'int': [ 'integer' ],
'list': [ 'array' ],
'dict': [ 'object', 'array' ],
'main': [ 'boolean' ],
'string': [ 'string' ],
'int-enum': [ 'integer' ],
'string-enum': [ 'string' ],
'string-enum-list': [ 'array' ],
'external': [ 'object' ],
}
# List of boolean policies that have been introduced with negative polarity in
# the past and should not trigger the negative polarity check.
LEGACY_INVERTED_POLARITY_WHITELIST = [
'DeveloperToolsDisabled',
'DeviceAutoUpdateDisabled',
'Disable3DAPIs',
'DisableAuthNegotiateCnameLookup',
'DisablePluginFinder',
'DisablePrintPreview',
'DisableSafeBrowsingProceedAnyway',
'DisableScreenshots',
'DisableSpdy',
'DisableSSLRecordSplitting',
'DriveDisabled',
'DriveDisabledOverCellular',
'ExternalStorageDisabled',
'SavingBrowserHistoryDisabled',
'SyncDisabled',
]
class PolicyTemplateChecker(object):
def __init__(self):
self.error_count = 0
self.warning_count = 0
self.num_policies = 0
self.num_groups = 0
self.num_policies_in_groups = 0
self.options = None
self.features = []
def _Error(self, message, parent_element=None, identifier=None,
offending_snippet=None):
self.error_count += 1
error = ''
if identifier is not None and parent_element is not None:
error += 'In %s %s: ' % (parent_element, identifier)
print error + 'Error: ' + message
if offending_snippet is not None:
print ' Offending:', json.dumps(offending_snippet, indent=2)
def _CheckContains(self, container, key, value_type,
optional=False,
parent_element='policy',
container_name=None,
identifier=None,
offending='__CONTAINER__',
regexp_check=None):
'''
Checks |container| for presence of |key| with value of type |value_type|.
If |value_type| is string and |regexp_check| is specified, then an error is
reported when the value does not match the regular expression object.
|value_type| can also be a list, if more than one type is supported.
The other parameters are needed to generate, if applicable, an appropriate
human-readable error message of the following form:
In |parent_element| |identifier|:
(if the key is not present):
Error: |container_name| must have a |value_type| named |key|.
Offending snippet: |offending| (if specified; defaults to |container|)
(if the value does not have the required type):
Error: Value of |key| must be a |value_type|.
Offending snippet: |container[key]|
Returns: |container[key]| if the key is present, None otherwise.
'''
if identifier is None:
try:
identifier = container.get('name')
except:
self._Error('Cannot access container name of "%s".' % container_name)
return None
if container_name is None:
container_name = parent_element
if offending == '__CONTAINER__':
offending = container
if key not in container:
if optional:
return
else:
self._Error('%s must have a %s "%s".' %
(container_name.title(), value_type.__name__, key),
container_name, identifier, offending)
return None
value = container[key]
value_types = value_type if isinstance(value_type, list) else [ value_type ]
if not any(isinstance(value, type) for type in value_types):
self._Error('Value of "%s" must be one of [ %s ].' %
(key, ', '.join([type.__name__ for type in value_types])),
container_name, identifier, value)
if str in value_types and regexp_check and not regexp_check.match(value):
self._Error('Value of "%s" must match "%s".' %
(key, regexp_check.pattern),
container_name, identifier, value)
return value
def _AddPolicyID(self, id, policy_ids, policy):
'''
Adds |id| to |policy_ids|. Generates an error message if the
|id| exists already; |policy| is needed for this message.
'''
if id in policy_ids:
self._Error('Duplicate id', 'policy', policy.get('name'),
id)
else:
policy_ids.add(id)
def _CheckPolicyIDs(self, policy_ids):
'''
Checks a set of policy_ids to make sure it contains a continuous range
of entries (i.e. no holes).
Holes would not be a technical problem, but we want to ensure that nobody
accidentally omits IDs.
'''
for i in range(len(policy_ids)):
if (i + 1) not in policy_ids:
self._Error('No policy with id: %s' % (i + 1))
def _CheckPolicySchema(self, policy, policy_type):
'''Checks that the 'schema' field matches the 'type' field.'''
self._CheckContains(policy, 'schema', dict)
if isinstance(policy.get('schema'), dict):
self._CheckContains(policy['schema'], 'type', str)
schema_type = policy['schema'].get('type')
if schema_type not in TYPE_TO_SCHEMA[policy_type]:
self._Error('Schema type must match the existing type for policy %s' %
policy.get('name'))
# Checks that boolean policies are not negated (which makes them harder to
# reason about).
if (schema_type == 'boolean' and
'disable' in policy.get('name').lower() and
policy.get('name') not in LEGACY_INVERTED_POLARITY_WHITELIST):
self._Error(('Boolean policy %s uses negative polarity, please make ' +
'new boolean policies follow the XYZEnabled pattern. ' +
'See also http://crbug.com/85687') % policy.get('name'))
def _CheckPolicy(self, policy, is_in_group, policy_ids):
if not isinstance(policy, dict):
self._Error('Each policy must be a dictionary.', 'policy', None, policy)
return
# There should not be any unknown keys in |policy|.
for key in policy:
if key not in ('name', 'type', 'caption', 'desc', 'device_only',
'supported_on', 'label', 'policies', 'items',
'example_value', 'features', 'deprecated', 'future',
'id', 'schema', 'max_size'):
self.warning_count += 1
print ('In policy %s: Warning: Unknown key: %s' %
(policy.get('name'), key))
# Each policy must have a name.
self._CheckContains(policy, 'name', str, regexp_check=NO_WHITESPACE)
# Each policy must have a type.
policy_types = ('group', 'main', 'string', 'int', 'list', 'int-enum',
'string-enum', 'string-enum-list', 'dict', 'external')
policy_type = self._CheckContains(policy, 'type', str)
if policy_type not in policy_types:
self._Error('Policy type must be one of: ' + ', '.join(policy_types),
'policy', policy.get('name'), policy_type)
return # Can't continue for unsupported type.
# Each policy must have a caption message.
self._CheckContains(policy, 'caption', str)
# Each policy must have a description message.
self._CheckContains(policy, 'desc', str)
# If 'label' is present, it must be a string.
self._CheckContains(policy, 'label', str, True)
# If 'deprecated' is present, it must be a bool.
self._CheckContains(policy, 'deprecated', bool, True)
# If 'future' is present, it must be a bool.
self._CheckContains(policy, 'future', bool, True)
if policy_type == 'group':
# Groups must not be nested.
if is_in_group:
self._Error('Policy groups must not be nested.', 'policy', policy)
# Each policy group must have a list of policies.
policies = self._CheckContains(policy, 'policies', list)
# Check sub-policies.
if policies is not None:
for nested_policy in policies:
self._CheckPolicy(nested_policy, True, policy_ids)
# Groups must not have an |id|.
if 'id' in policy:
self._Error('Policies of type "group" must not have an "id" field.',
'policy', policy)
# Statistics.
self.num_groups += 1
else: # policy_type != group
# Each policy must have a protobuf ID.
id = self._CheckContains(policy, 'id', int)
self._AddPolicyID(id, policy_ids, policy)
# 'schema' is the new 'type'.
# TODO(joaodasilva): remove the 'type' checks once 'schema' is used
# everywhere.
self._CheckPolicySchema(policy, policy_type)
# Each policy must have a supported_on list.
supported_on = self._CheckContains(policy, 'supported_on', list)
if supported_on is not None:
for s in supported_on:
if not isinstance(s, str):
self._Error('Entries in "supported_on" must be strings.', 'policy',
policy, supported_on)
# Each policy must have a 'features' dict.
features = self._CheckContains(policy, 'features', dict)
# All the features must have a documenting message.
if features:
for feature in features:
if not feature in self.features:
self._Error('Unknown feature "%s". Known features must have a '
'documentation string in the messages dictionary.' %
feature, 'policy', policy.get('name', policy))
# All user policies must have a per_profile feature flag.
if (not policy.get('device_only', False) and
not policy.get('deprecated', False) and
not filter(re.compile('^chrome_frame:.*').match, supported_on)):
self._CheckContains(features, 'per_profile', bool,
container_name='features',
identifier=policy.get('name'))
# All policies must declare whether they allow changes at runtime.
self._CheckContains(features, 'dynamic_refresh', bool,
container_name='features',
identifier=policy.get('name'))
# Each policy must have an 'example_value' of appropriate type.
if policy_type == 'main':
value_type = item_type = bool
elif policy_type in ('string', 'string-enum'):
value_type = item_type = str
elif policy_type in ('int', 'int-enum'):
value_type = item_type = int
elif policy_type in ('list', 'string-enum-list'):
value_type = list
item_type = str
elif policy_type == 'external':
value_type = item_type = dict
elif policy_type == 'dict':
value_type = item_type = [ dict, list ]
else:
raise NotImplementedError('Unimplemented policy type: %s' % policy_type)
self._CheckContains(policy, 'example_value', value_type)
# Statistics.
self.num_policies += 1
if is_in_group:
self.num_policies_in_groups += 1
if policy_type in ('int-enum', 'string-enum', 'string-enum-list'):
# Enums must contain a list of items.
items = self._CheckContains(policy, 'items', list)
if items is not None:
if len(items) < 1:
self._Error('"items" must not be empty.', 'policy', policy, items)
for item in items:
# Each item must have a name.
# Note: |policy.get('name')| is used instead of |policy['name']|
# because it returns None rather than failing when no key called
# 'name' exists.
self._CheckContains(item, 'name', str, container_name='item',
identifier=policy.get('name'),
regexp_check=NO_WHITESPACE)
# Each item must have a value of the correct type.
self._CheckContains(item, 'value', item_type, container_name='item',
identifier=policy.get('name'))
# Each item must have a caption.
self._CheckContains(item, 'caption', str, container_name='item',
identifier=policy.get('name'))
if policy_type == 'external':
# Each policy referencing external data must specify a maximum data size.
self._CheckContains(policy, 'max_size', int)
def _CheckMessage(self, key, value):
# |key| must be a string, |value| a dict.
if not isinstance(key, str):
self._Error('Each message key must be a string.', 'message', key, key)
return
if not isinstance(value, dict):
self._Error('Each message must be a dictionary.', 'message', key, value)
return
# Each message must have a desc.
self._CheckContains(value, 'desc', str, parent_element='message',
identifier=key)
# Each message must have a text.
self._CheckContains(value, 'text', str, parent_element='message',
identifier=key)
# There should not be any unknown keys in |value|.
for vkey in value:
if vkey not in ('desc', 'text'):
self.warning_count += 1
print 'In message %s: Warning: Unknown key: %s' % (key, vkey)
def _LeadingWhitespace(self, line):
match = LEADING_WHITESPACE.match(line)
if match:
return match.group(1)
return ''
def _TrailingWhitespace(self, line):
match = TRAILING_WHITESPACE.match(line)
if match:
return match.group(1)
return ''
def _LineError(self, message, line_number):
self.error_count += 1
print 'In line %d: Error: %s' % (line_number, message)
def _LineWarning(self, message, line_number):
self.warning_count += 1
print ('In line %d: Warning: Automatically fixing formatting: %s'
% (line_number, message))
def _CheckFormat(self, filename):
if self.options.fix:
fixed_lines = []
with open(filename) as f:
indent = 0
line_number = 0
for line in f:
line_number += 1
line = line.rstrip('\n')
# Check for trailing whitespace.
trailing_whitespace = self._TrailingWhitespace(line)
if len(trailing_whitespace) > 0:
if self.options.fix:
line = line.rstrip()
self._LineWarning('Trailing whitespace.', line_number)
else:
self._LineError('Trailing whitespace.', line_number)
if self.options.fix:
if len(line) == 0:
fixed_lines += ['\n']
continue
else:
if line == trailing_whitespace:
# This also catches the case of an empty line.
continue
# Check for correct amount of leading whitespace.
leading_whitespace = self._LeadingWhitespace(line)
if leading_whitespace.count('\t') > 0:
if self.options.fix:
leading_whitespace = leading_whitespace.replace('\t', ' ')
line = leading_whitespace + line.lstrip()
self._LineWarning('Tab character found.', line_number)
else:
self._LineError('Tab character found.', line_number)
if line[len(leading_whitespace)] in (']', '}'):
indent -= 2
if line[0] != '#': # Ignore 0-indented comments.
if len(leading_whitespace) != indent:
if self.options.fix:
line = ' ' * indent + line.lstrip()
self._LineWarning('Indentation should be ' + str(indent) +
' spaces.', line_number)
else:
self._LineError('Bad indentation. Should be ' + str(indent) +
' spaces.', line_number)
if line[-1] in ('[', '{'):
indent += 2
if self.options.fix:
fixed_lines.append(line + '\n')
# If --fix is specified: backup the file (deleting any existing backup),
# then write the fixed version with the old filename.
if self.options.fix:
if self.options.backup:
backupfilename = filename + '.bak'
if os.path.exists(backupfilename):
os.remove(backupfilename)
os.rename(filename, backupfilename)
with open(filename, 'w') as f:
f.writelines(fixed_lines)
def Main(self, filename, options):
try:
with open(filename) as f:
data = eval(f.read())
except:
import traceback
traceback.print_exc(file=sys.stdout)
self._Error('Invalid Python/JSON syntax.')
return 1
if data == None:
self._Error('Invalid Python/JSON syntax.')
return 1
self.options = options
# First part: check JSON structure.
# Check (non-policy-specific) message definitions.
messages = self._CheckContains(data, 'messages', dict,
parent_element=None,
container_name='The root element',
offending=None)
if messages is not None:
for message in messages:
self._CheckMessage(message, messages[message])
if message.startswith('doc_feature_'):
self.features.append(message[12:])
# Check policy definitions.
policy_definitions = self._CheckContains(data, 'policy_definitions', list,
parent_element=None,
container_name='The root element',
offending=None)
if policy_definitions is not None:
policy_ids = set()
for policy in policy_definitions:
self._CheckPolicy(policy, False, policy_ids)
self._CheckPolicyIDs(policy_ids)
# Second part: check formatting.
self._CheckFormat(filename)
# Third part: summary and exit.
print ('Finished checking %s. %d errors, %d warnings.' %
(filename, self.error_count, self.warning_count))
if self.options.stats:
if self.num_groups > 0:
print ('%d policies, %d of those in %d groups (containing on '
'average %.1f policies).' %
(self.num_policies, self.num_policies_in_groups, self.num_groups,
(1.0 * self.num_policies_in_groups / self.num_groups)))
else:
print self.num_policies, 'policies, 0 policy groups.'
if self.error_count > 0:
return 1
return 0
def Run(self, argv, filename=None):
parser = optparse.OptionParser(
usage='usage: %prog [options] filename',
description='Syntax check a policy_templates.json file.')
parser.add_option('--fix', action='store_true',
help='Automatically fix formatting.')
parser.add_option('--backup', action='store_true',
help='Create backup of original file (before fixing).')
parser.add_option('--stats', action='store_true',
help='Generate statistics.')
(options, args) = parser.parse_args(argv)
if filename is None:
if len(args) != 2:
parser.print_help()
sys.exit(1)
filename = args[1]
return self.Main(filename, options)
if __name__ == '__main__':
sys.exit(PolicyTemplateChecker().Run(sys.argv)) | unknown | codeparrot/codeparrot-clean | ||
/* Auto-generated by generate-wrappers.py script. Do not modify */
#if defined(__arm__) || defined(__aarch64__)
#include <q8gemm/4x8c2-xzp-neon.c>
#endif /* defined(__arm__) || defined(__aarch64__) */ | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/quantized/cpu/qnnpack/wrappers/q8gemm/4x8c2-xzp-neon.c |
# Licensed under a MIT licence - see file `license`
import copy
from warnings import warn
import numpy as np
from scipy.ndimage import binary_dilation
from . import regions
from . import findbase
from . import optregion
from . import fit as fitpsf
__all__ = ['RegionError', 'PSFIndexError',
'BasePSFFitter', 'SimpleSubtraction',
'UseAllPixelsSubtraction',
'LOCI', 'LOCIAllPixelsSubtraction',
'CepheidSnapshotpaper'
]
class RegionError(Exception):
'''Region does not have the right shape or dtype'''
pass
class PSFIndexError(Exception):
'''PSF Index array does not have the right shape or dtype'''
pass
class BasePSFFitter(object):
'''Base object for PSF fitting.
Parameters
----------
psfbase : np.ndarray of shape (n, m, k)
array of psfbases. (n, m) are the dimensions of each image
and there are k potential elements of the PSF base.
image : np.array of shape (n,m) or None
N, M array. If ``None``, the image has to be set later.
'''
'''Regions with fewer than ``min_pixels_in_region`` will be ignored for speed-up.'''
min_pixels_in_region = 1
'''Can this fitter deal with masked data?
This attribute is not fool-proof; it is set by hand for the pre-defined fitters.
If you define your own fitter, you will ahve to check yourself if if works with
masked data.'''
_allow_masked_data = True
_image = None
_psf = None
def __init__(self, psfbase, image=None):
if len(psfbase.shape) != 3:
raise ValueError('psfbase must have 3 dim [im_x, im_y, n]')
if not self._allow_masked_data and (np.ma.getmask(psfbase).sum() > 0):
raise ValueError('This fitter cannot deal with masked data.')
self.psfbase = psfbase
self.image = image
### Convenience functions and infrastructure ###
@property
def image_dim(self):
'''Dimenension of the image that this fitter works on.'''
return self.psfbase.shape[:2]
@property
def image(self):
'''Image.
np.array of shape (n, m) or None (if not set yet).
'''
if self._image is None:
raise AttributeError('image not set yet.')
else:
return self._image
@image.setter
def image(self, im):
if im is None:
self._image = im
else:
if im.shape != self.image_dim:
raise ValueError('PSF base is set for images of size ({0}, {1})'.format(self.image_dim[0], self.image_dim[1]))
if not self._allow_masked_data and (np.ma.getmask(im).sum() > 0):
raise ValueError('This fitter cannot deal with masked data.')
self._image = im
self._psf = None
@property
def image1d(self):
'''Image flatted to a 1 d vector.'''
return self.dim2to1(self.image)
@property
def psfbase1d(self):
'''PSF base flattened to an 2d array (stack of 1d images)'''
return self.psfbase.reshape((-1, self.psfbase.shape[2]))
@property
def psf(self):
'''Fitted Point-Spread-Function (PSF)'''
if self._psf is None:
self._psf = self.fit_psf()
return self._psf
def dim2to1(self, array2d):
'''Flatten image'''
return array2d.ravel()
def dim1to2(self, array1d):
'''Reshape flattened image to 2 d.'''
return array1d.reshape(self.image_dim)
### Functions that should be overwritten by child classes ###
def regions(self):
'''This function should be overwritten by derived classes.'''
raise NotImplementedError
def findbase(self, region):
'''This function should be overwritten by derived classes.'''
raise NotImplementedError
def optregion(self, region, indpsf):
'''This function should be overwritten by derived classes.'''
raise NotImplementedError
def fitpsfcoeff(self, img, base):
'''This function should be overwritten by derived classes.'''
raise NotImplementedError
### Some wrappers around the above classes to unify output formats, check
### validity etc.
def anyreg_to_mask(self, reg):
'''Convert any type of a region definition to a 1d boolean mask.
Also check that the region has the correct size.
Parameters
----------
reg : boolean mask of size image in 1d or 2d or 1d integer array
'''
r = np.asanyarray(reg)
# Index array like [1,5,12,23]
if (r.ndim == 1) and np.issubdtype(r.dtype, np.int64):
region = np.zeros((self.image_dim[0] * self.image_dim[1]), dtype=bool)
region[r] = True
r = region
if r.ndim == 2:
r = r.ravel()
if r.shape != (self.image_dim[0] * self.image_dim[1], ):
raise RegionError("Every region must have the same shape as the image.")
return r
def baseind_to_mask(self, indpsf):
'''Convert any type of psf base index to boolen mask.'''
indpsf = np.asanyarray(indpsf)
if (indpsf.ndim == 1) and np.issubdtype(indpsf.dtype, np.int64):
ind = np.zeros((self.psfbase.shape[2]), dype=bool)
ind[indpsf] = True
indpsf = ind
if indpsf.shape != (self.psfbase.shape[2], ):
raise PSFIndexError("PSF index shape does not match the shape of the psf base.")
return indpsf
def iter_regions(self):
'''Convert regions to 1d boolean mask array and iterate'''
for r in self.regions():
reg = self.anyreg_to_mask(r)
if reg.sum() >= self.min_pixels_in_region:
yield reg
else:
warn('Skipping region that includes no pixels.')
### Here the actual work is done ###
def fit_psf(self, image=None):
'''Fit the PSF for an image
Parameters
----------
image : np.array of shape (n, m) or None
N, M array. If ``None``, use the image set previously.
Returns
-------
psf : np.array of shape (n, m)
Fitted PSF.
'''
if image is not None:
self.image = image
# This line triggers in bug in numpy < 1.9
# psf = np.zeros_like(self.image1d)
# It results in psf.mask is self.image1d.mask
# Here is a different way to get the same result:
psf = np.ma.zeros(len(self.image1d))
psf[:] = np.ma.masked
for region in self.iter_regions():
# select which bases to use
indpsf = self.baseind_to_mask(self.findbase(region))
# Select which region to use in the optimization
optregion = self.anyreg_to_mask(self.optregion(region, indpsf))
# Check for consistency
if not self.check_fittable(region, optregion, indpsf):
raise RegionError('Fit region contains to few data points.')
# Perform fit on the optregion
psf_coeff = self.fitpsfcoeff(self.image1d[optregion],
self.psfbase1d[:, indpsf][optregion, :])
# Use psfcoeff to estimate the psf in `region`
psf[region] = np.dot(self.psfbase1d[:, indpsf][region, :],
psf_coeff)
self._psf = self.dim1to2(psf)
return self.dim1to2(psf)
def remove_psf(self, image=None):
'''Remove te PSF from the image.
Parameters
----------
image : np.array of shape (n, m) or None
N, M array. If ``None``, use the image set previously.
Returns
-------
resid : np.array of shape (n, m)
original image with PSF subtracted.
'''
if image is not None:
self.image = image
return self.image - self.psf
def check_fittable(self, region, optregion, indpsf):
n_data = (optregion & ~np.ma.getmaskarray(self.image1d)).sum()
n_pars = indpsf.sum()
if n_data.sum() <= n_pars.sum():
return False
else:
return True
class SimpleSubtraction(BasePSFFitter):
'''Simple examples of PSF fitting.
The whole (unmasked) image is fit at once using all bases.
'''
regions = regions.image_unmasked
findbase = findbase.allbases
optregion = optregion.all_unmasked
fitpsfcoeff = fitpsf.psf_from_projection
class UseAllPixelsSubtraction(BasePSFFitter):
'''Use all available pixels of the image.
For unmasked image pixel the maximal set of PSF templates that are
unmasked at that position is used.
Pixels are then group in regions that make use of the same PSF templates.
'''
regions = regions.group_by_basis
findbase = findbase.nonmaskedbases
optregion = optregion.all_unmasked
fitpsfcoeff = fitpsf.psf_from_projection
class LOCI(BasePSFFitter):
'''LOCI fitter (locally optimized combination of images)
The loci algorithm was introduced in the following paper
`Lafreniere et al. 2007, ApJ, 660, 770 <http://adsabs.harvard.edu/abs/2007ApJ...660..770L>`_.
The default parameters in this fitter are chosen similar to the shape of
the regions used in that paper.
'''
'''Can this fitter deal with masked data?
No, in this case, because it is possible that sectors defines a region
where no unmasked bases exist.'''
_allow_masked_data = False
regions = regions.sectors
@property
def sector_radius(self):
return np.logspace(self.sector_radius_inner,
np.log10(self.image.shape[1]),
self.sector_radius_n)
sector_radius_inner = 0
sector_radius_n = 10
sector_phi = 12
findbase = findbase.nonmaskedbases
optregion = optregion.wrapper_ignore_all_masked(optregion.around_region)
dilation_region = 10
fitpsfcoeff = fitpsf.psf_from_projection
class LOCIAllPixelsSubtraction(LOCI):
'''LOCI fitter that splits LOCI regions according to the available PSF bases.
For unmasked image pixel the maximal set of PSF templates that are
unmasked at that position is used.
Pixels are then grouped in regions that make use of the same PSF templates.
'''
'''Can this fitter deal with masked data?'''
_allow_masked_data = True
regions = regions.sectors_by_basis
class CepheidSnapshotpaper(LOCI):
_allow_masked_data = True
regions = regions.wrapper_min_size(regions.sectors_by_basis)
optregion = optregion.wrapper_ignore_all_masked(
optregion.wrapper_optmask(optregion.around_region))
mask_around_mask = 0
border_around_region = 5
def __init__(self, *args, **kwargs):
super(CepheidSnapshotpaper, self).__init__(*args, **kwargs)
self.manual_optmask = np.zeros(self.image_dim, dtype=bool)
@property
def optmask(self):
selem = np.ones((2 * self.mask_around_mask + 1,
2 * self.mask_around_mask + 1))
mask_around_mask = binary_dilation(np.ma.getmaskarray(self.image), selem)
if hasattr(self, 'manual_optmask'):
return self.dim2to1(mask_around_mask | self.manual_optmask)
else:
return self.dim2to1(mask_around_mask)
def check_fittable(self, region, optregion, indpsf):
default_dilation = copy.copy(self.dilation_region)
while not super().check_fittable(region, optregion, indpsf):
# This means that the optregion is too small.
# That can happen for regions close to the bleed column where
# a lot of pixels are masked.
self.dilation_region += 2
if self.dilation_region >= 2 * max(self.image_dim):
raise RegionError('Too few data pixels even when increasing the size of the optimization region.')
# We need to change optregion in place otherwise the changes here
# do not take effect outside of this function.
optregion[:] = self.anyreg_to_mask(self.optregion(region, indpsf))
self.dilation_region += 2
self.dilation_region = default_dilation
return True
class CSPFakeSourcePartialFitting(CepheidSnapshotpaper):
'''A modified version of the CSP fitter, that refits only some regions.
This is used when inserting fake sources into images. Only the regions that
actually overlap with the inserted source need to be refit, the value
of the residual image for the remaining area can just be taken from a
previously reduced image. This class implements a remove_psf function that
can do that.
Note a subtlety here: Inserting a new fake source should change the
opt_mask region and thus may influence other source in the field, too. This
class ignore that effect to speed up calculations.
'''
regions = regions.wrapper_overlap_region(
regions.wrapper_min_size(regions.sectors_by_basis))
def remove_psf(self, image, region_to_use, residual_image):
self.region_to_use = region_to_use
resid_im = super().remove_psf(image)
resid_im[resid_im.mask] = residual_image[resid_im.mask]
return resid_im | unknown | codeparrot/codeparrot-clean | ||
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show() | unknown | codeparrot/codeparrot-clean | ||
#AATC crypto module
import codecs,recvall,ast,os,AATC_Config,AATC_CryptoBeta
from Crypto.Cipher import AES,PKCS1_OAEP
from Crypto.PublicKey import RSA
class Crypter:
"""
A class to encrypt and decrypt a connection with AES. Takes a connection object and mode and sets up a secret key for both participants.
This key is then used to create an AES object which encrypts and decrypts messages via the Encrypt and Decrypt functions.
These strings must be in binary format. The length of the string will be padded to a length multiple of 16 in size.
The object will communicate with another Crypter object via the socket and pass public keys.
Best for use with communication with string versions of python objects and resulting converstion using 'ast' as this will remove padding whitespace.
If in 'CLIENT' mode the Crypter will start communication. It will send a message ('PublicKey,(Client_Public_Key,)) and the server will return the servers public key.
The CLIENT will then send an 'Exit' message to end communication.
Per default the Crypter will assume it should generate the keys when __init__ is called. This can be disabled by creating the object with AutoGenerate set to False.
Mode can be changed via SetMode. Normal options are 'CLIENT' and 'SERVER'.
"""
def __init__(self, con, mode = "CLIENT",AutoGenerate = True):
self._con = con
self.SetMode(mode)
if AutoGenerate:
self.GenerateKey()
def SetMode(self,mode):
self._mode = mode
def GenerateKey(self,key_size = AATC_Config.DEFAULT_RSA_KEYSIZE):
print("Generating encryption keys. Please stand by...") #Generating keys takes a long time and have found no way to shorted key length
if self._mode == "SERVER":
self.ServerGenerateKey()
elif self._mode == "CLIENT":
self.ClientGenerateKey(key_size)
else:
raise ValueError("Crypter: Incorrect mode set")
print("Encryption keys generated",self._AESKey)
def ClientGenerateKey(self,RSA_KeySize,AES_KeySize= AATC_Config.DEFAULT_AES_KEYSIZE): #Start the relevant key exchange system
if AATC_Config.SET_ENCRYPTION_KEYS_ENABLE: #Allows preset encryption keys to be used
self.SetEncryptionKeys(AATC_Config.SET_AES_KEY, AATC_Config.SET_IV_KEY)
elif AATC_Config.ENCRYPTION_USE_PRESHARED_KEYS: #Uses preshared certificates
self.ClientPreSharedKeys(RSA_KeySize,AES_KeySize)
else:
self.ClientExchangeKeys(RSA_KeySize,AES_KeySize) #Exchange AES keys using RSA
self.Send(("Exit",()))
Sucess,Message,Data = self.SplitData(self.Recv())
if not Sucess:
raise Exception("Server failed to exit"+Message)
def ClientPreSharedKeys(self,RSA_KeySize,AES_KeySize): #Swap keys using certificate authentication
self.Send(("GetServerCertificateChain",()))
Sucess,Message,CertificateChain = self.SplitData(self.Recv())
if not Sucess:
raise Exception("Server did not respond to command")
if AES_KeySize not in AATC_Config.ALLOWED_AES_KEYSIZES:
raise Exception("AES key size not in ALLOWED_AES_KEYSIZES. Change keysize to an allowed value") #Only accept allowed key sizes
AESKey,IV = GenerateKeys(AES_KeySize)
PublicKey = AATC_CryptoBeta.VerifyCertificates(CertificateChain,AATC_Config.ROOT_CERTIFICATES,self._con) #Verify the certificate chain is valid.
if PublicKey: #If the chain is valid
PKO = PKCS1_OAEP.new(RSA.import_key(PublicKey))
EncryptedAESKey = PKO.encrypt(AESKey)
EncryptedIV = PKO.encrypt(IV)
self.SetEncryptionKeys(AESKey,IV)
self.Send(("SetKey",(EncryptedAESKey,EncryptedIV))) #Swap keys encrypted using server public key
Sucess,Message,Data = self.SplitData(self.Recv())
if not Sucess:
raise Exception("Server rejected setting AES_Keys"+Message)
else:
print("Certificate Chain is not valid")
if AATC_Config.AUTO_GENERATE_FALLBACK:
self.ClientExchangeKeys(RSA_KeySize,AES_KeySize) #Fall back on key exchange if allowed
else:
raise Exception("Certificate Chain is not valid. Exception raised") #Raise exception if not allowed
def ClientExchangeKeys(self,RSA_KeySize,AES_KeySize): #Exchange the keys using RSA encryption
RSAKey = RSA.generate(RSA_KeySize)
privateKey = RSAKey.exportKey("DER")
publicKey = RSAKey.publickey().exportKey("DER") #Get the RAS keys.
self.Send(("GenerateKey",(publicKey,AES_KeySize)))
Sucess,Message,data = self.SplitData(self.Recv())
RSAPrivateKey = RSA.import_key(privateKey)
RSAPrivateObject = PKCS1_OAEP.new(RSAPrivateKey)
AESKey = RSAPrivateObject.decrypt(data[0]) #Decrypt the recieved keys
IV = RSAPrivateObject.decrypt(data[1])
if Sucess == False:
raise Exception("Error occured while exchanging keys")
self.SetEncryptionKeys(AESKey,IV)
################################################################
def ServerGenerateKey(self): #Server select the correct mode.
if AATC_Config.SET_ENCRYPTION_KEYS_ENABLE:
self.SetEncryptionKeys(AATC_Config.SET_AES_KEY, AATC_Config.SET_IV_KEY) #Use preset keys if enabled
self._Exit = False
while not self._Exit: #Start a server type loop (responds to commands from client)
data = self.Recv()
Command, Arguments = data[0],data[1]
#Respond to relevant command
if Command == "GenerateKey":
Sucess,Message,Data = self.ServerGenerateKeys(Arguments)
elif Command == "GetServerCertificateChain":
Sucess,Message,Data = self.GetServerCertificateChain(Arguments)
elif Command == "SetKey":
Sucess,Message,Data = self.ServerSetKey(Arguments)
elif Command == "Exit":
Sucess,Message,Data = True,"Exiting",[]
self._Exit = True
else:
Sucess,Message,Data = False,"Command does not exist",[]
self.Send((Sucess,Message,Data))
if not hasattr(self,"_AESKey"): #Only set if sucessfully setup.
raise Exception("Failure during crypter setup")
def ServerGenerateKeys(self,Arguments): #Generate keys and encrypt with the provided RSA key
publicKey,AES_KeySize = Arguments[0],Arguments[1]
if AES_KeySize not in AATC_Config.ALLOWED_AES_KEYSIZES:
AES_KeySize = AATC_Config.DEFAULT_AES_KEYSIZE #If key size is not valid set size to default of AATC_Config.DEFAULT_AES_KEYSIZE
AESKey,IV = GenerateKeys(AES_KeySize)
PublicKeyObject = PKCS1_OAEP.new( RSA.import_key(publicKey))
EncryptedAESKey = PublicKeyObject.encrypt(AESKey)
EncryptedIV = PublicKeyObject.encrypt(IV) #Encrypt AES keys
self.SetEncryptionKeys(AESKey,IV)
return True,"Instated encryption keys",[EncryptedAESKey,EncryptedIV] #Return values to be sent
def GetServerCertificateChain(self,Arguments = None): #Respond to request to get certificate chain.
return True,"Server Certificate Chain",AATC_Config.SERVER_CERTIFICATE_CHAIN
def ServerSetKey(self,Arguments): #Set provided keys encrypted with public key of server
PKO = PKCS1_OAEP.new(RSA.import_key(AATC_Config.SERVER_PRIVATE_KEY))
AESKey,IV = Arguments[0],Arguments[1]
AESKey,IV = PKO.decrypt(AESKey),PKO.decrypt(IV) #Decrypt keys
if len(AESKey) in AATC_Config.ALLOWED_AES_KEYSIZES:
self.SetEncryptionKeys(AESKey,IV)
return True,"Keys set",[]
else:
#self._Exit = True
return False,"AES key size not in ALLOWED_AES_KEYSIZES:"+str(AATC_Config.ALLOWED_AES_KEYSIZES),[]
###############################################
def SetEncryptionKeys(self,AESKey,IV): #Set the encryption keys and AES encryption objects
self._AESKey = AESKey
self._IV = IV
self._EncryptAES = AES.new(self._AESKey,AES.MODE_GCM,self._IV) #Two seperate instances to encrypt and decrypt as non ECB AES is a stream cipher
self._DecryptAES = AES.new(self._AESKey,AES.MODE_GCM,self._IV) #Errors will occur if encrypt and decrypt are not equal in count.
##############################################
def Encrypt(self,data):
return self._EncryptAES.encrypt(data)
def Decrypt(self,data):
return self._DecryptAES.decrypt(data)
def Send(self,data):
self._con.sendall(codecs.encode(str(data)))
def Recv(self):
data = recvall.recvall(self._con)
data = ast.literal_eval(codecs.decode(data))
return data
def SplitData(self,data):
return data[0],data[1],data[2]
def GenerateKeys(AES_KeySize): #Creates random keys using source which is cryptographically random
AESKey = os.urandom(AES_KeySize) # Here to allow regeneration of AES key while still in loop if required.
IV = os.urandom(AES_KeySize)
return AESKey,IV | unknown | codeparrot/codeparrot-clean | ||
import string
import random
import time
from fabric import state
from fabric.api import run, sudo, cd, prompt, task, settings
from fabric.contrib import files
from fabric.colors import green, red
from fabric.context_managers import hide
from fabric.tasks import Task
from fabric.task_utils import crawl
from fabtools import require, python, supervisor, deb, system, nodejs, service
from fabtools.require import file as require_file
'''
Script to set up a cozy cloud environnement from a fresh system
Validated on a Debian squeeze 64 bits up to date.
Once your system is updated, launch
$ fab -H user@Ip.Ip.Ip.Ip:Port install
to install the full Cozy stack.
'''
# Helpers
def id_generator(
size=32,
chars=string.ascii_uppercase + string.digits + string.ascii_lowercase):
return ''.join(random.choice(chars) for x in range(size))
def simple_id_generator(size=40, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
USERNAME = id_generator()
PASSWORD = id_generator()
TOKEN = simple_id_generator()
@task
def is_arm():
result = run('lscpu', quiet=True)
return 'arm' in result
@task
def is_pi():
result = run('Lscpu', quiet=True)
return 'armv6l' in result
def print_failed(module):
print(red('Installation of %s failed.\n' +
'You can join us on our IRC channel: '
+ '#cozycloud on freenode.net to ask for assistance.') % module)
exit()
def cozydo(cmd):
'''Run a command as a cozy user'''
sudo(cmd, user='cozy')
def delete_if_exists(filename):
'''Delete given file if it already exists'''
if files.exists(filename):
sudo('rm -rf %s' % filename)
def su_delete(filename):
'''Delete given file with root permission'''
sudo('rm -rf %s' % filename)
# Tasks
@task
def install():
'''
Install the full cozy stack.
'''
install_tools()
install_node08()
install_couchdb()
install_postfix()
create_cozy_user()
config_couchdb()
install_monitor()
install_controller()
install_indexer()
install_data_system()
install_home()
install_proxy()
#init_domain()
create_cert()
install_nginx()
restart_cozy()
print(green('Cozy installation finished. Now, enjoy !'))
def ask_for_confirmation(module):
'''
Simple function to ask for confirmation before uninstalling a module
installed by this fabfile.
'''
confirm = prompt('Are you sure you want to definitely remove %s from your'
' computer? ' % module, default="no")
return confirm == "yes"
@task
def uninstall_all():
'''
Uninstall the whole stack (work in progress)
'''
sudo('cozy-monitor uninstall-all')
if ask_for_confirmation("Cozy"):
uninstall_cozy()
if ask_for_confirmation("Node.js"):
uninstall_node08()
if ask_for_confirmation("CouchDB"):
uninstall_couchdb()
if ask_for_confirmation("Postfix"):
uninstall_postfix()
if ask_for_confirmation("Cozy users"):
sudo('userdel -r cozy')
sudo('userdel -r cozy-data-system')
sudo('userdel -r cozy-home')
@task
def install_dev():
'''
Install stuff to prepare a virtual machine dedicated to development.
'''
install_tools()
install_node08()
install_couchdb()
install_postfix()
create_cozy_user()
install_monitor()
install_controller_dev()
install_indexer()
install_data_system()
install_home()
install_proxy()
#init_domain()
print(green('The Cozy development environment has been installed.'))
@task
def install_tools():
'''
Install build tools
'''
deb.update_index()
deb.upgrade()
require.deb.packages([
'python',
'python-dev',
'python-setuptools',
'python-pip',
'openssl',
'libssl-dev',
'libxml2-dev',
'libxslt1-dev',
'build-essential',
'git',
'sudo',
'lsb-release',
'imagemagick',
'sqlite3'
])
print(green('Tools successfully installed'))
@task
def install_node08():
'''
Install Node 0.8.18
'''
require.nodejs.installed_from_source('0.8.18')
sudo('npm config set ca ""')
print(green('Node 0.8.18 successfully installed'))
@task
def uninstall_node08():
'''
Uninstall node 0.8.18
'''
sudo('npm uninstall npm')
version = '0.8.18'
folder = 'node-v%s' % version
filename = folder + '.tar.gz'
require_file(url='http://nodejs.org/dist/v%s/%s' % (version, filename))
sudo('tar -xzf %s' % filename)
with cd('%s' % folder):
sudo('./configure')
sudo('make uninstall')
sudo('make distclean')
su_delete('%s*' % folder)
print(green('Node 0.8.18 successfully uninstalled'))
@task
def install_couchdb():
'''
Install CouchDB 1.3.0
'''
packages = [
'erlang',
'libicu-dev',
'libcurl4-openssl-dev',
'curl'
]
if system.distrib_id() == 'Debian' \
and system.distrib_release().startswith('6'):
packages.append('libmozjs-dev')
else:
packages.append('libmozjs185-dev')
require.deb.packages(packages)
require_file(
url='http://apache.crihan.fr/dist/couchdb/source/' +
'1.3.0/apache-couchdb-1.3.0.tar.gz')
run('tar -xzvf apache-couchdb-1.3.0.tar.gz')
with cd('apache-couchdb-1.3.0'):
run('./configure; make')
result = sudo('make install')
installed = result.find('You have installed Apache CouchDB,' +
' time to relax.')
if installed == -1:
print_failed('couchdb')
su_delete('apache-couchdb-1.3.0')
su_delete('rm -rf apache-couchdb-1.3.0.tar.gz')
require.users.user('couchdb', home='/usr/local/var/lib/couchdb')
sudo('chown -R couchdb:couchdb /usr/local/etc/couchdb')
sudo('chown -R couchdb:couchdb /usr/local/var/lib/couchdb')
sudo('chown -R couchdb:couchdb /usr/local/var/log/couchdb')
sudo('chown -R couchdb:couchdb /usr/local/var/run/couchdb')
sudo('chmod 0770 /usr/local/etc/couchdb')
sudo('chmod 0770 /usr/local/var/lib/couchdb')
sudo('chmod 0770 /usr/local/var/log/couchdb')
sudo('chmod 0770 /usr/local/var/run/couchdb')
require.supervisor.process(
'couchdb', user='couchdb',
command='couchdb', autostart='true',
environment='HOME=/usr/local/var/lib/couchdb')
print(green('CouchDB 1.3.0 successfully installed'))
@task
def config_couchdb():
if files.exists('/etc/cozy/couchdb.login'):
# CouchDB has an old admin
with hide('running', 'stdout'):
# Recover old password
logins = sudo('cat /etc/cozy/couchdb.login')
logsCouchDB = logins.split('\r\n')
# Add new admin
couch_admin_path = '@127.0.0.1:5984/_config/admins/'
run('curl -X PUT http://%s:%s%s%s -d \'\"%s\"\'' %
(
logsCouchDB[0],
logsCouchDB[1],
couch_admin_path,
USERNAME,
PASSWORD,
))
# Delete old admin
run('curl -X DELETE ' +
'http://%s:%s@127.0.0.1:5984/_config/admins/%s' %
(USERNAME, PASSWORD, logsCouchDB[0]))
sudo('rm -rf /etc/cozy/couchdb.login')
else:
# CouchDB has not an admin
# Create admin
with hide('running', 'stdout'):
couch_admin_path = '127.0.0.1:5984/_config/admins/'
run('curl -X PUT http://%s%s -d \'\"%s\"\'' %
(couch_admin_path, USERNAME, PASSWORD))
sudo('mkdir -p /etc/cozy')
# Create file to keep admin's password
require.files.file(
path='/etc/cozy/couchdb.login',
contents=USERNAME + '\n' + PASSWORD,
use_sudo=True,
owner='cozy-data-system',
mode='700'
)
print(green('CouchDB 1.3.0 successfully configured'))
@task
def uninstall_couchdb():
'''
Install CouchDB 1.3.0
'''
require_file(
url='http://apache.crihan.fr/dist/couchdb/source/' +
'1.3.0/apache-couchdb-1.3.0.tar.gz')
run('tar -xzvf apache-couchdb-1.3.0.tar.gz')
with cd('apache-couchdb-1.3.0'):
sudo('./configure')
sudo('make uninstall')
sudo('make distclean')
su_delete('/usr/local/share/couchdb')
su_delete('/usr/local/lib/couchdb')
su_delete('/usr/local/var/lib/couchdb')
su_delete('/usr/local/var/log/couchdb')
su_delete('/usr/local/var/run/couchdb')
su_delete('/usr/local/share/doc/couchdb')
su_delete('/usr/local/bin/couchjs')
su_delete('/usr/local/bin/couchdb')
su_delete('apache-couchdb-1.3.0')
su_delete('apache-couchdb-1.3.0.tar.gz')
su_delete('/etc/supervisor/conf.d/couchdb.conf')
su_delete('/etc/cozy/couchdb.login')
supervisor.update_config()
print(green('CouchDB 1.3.0 successfully uninstalled'))
@task
def install_postfix():
'''
Install a postfix instance (required for mail sending).
'''
#domain = prompt('Enter your domain name:',
# default='myinstance.cozycloud.cc')
require.postfix.server('cozycloud.cc')
print(green('Postfix successfully installed'))
@task
def uninstall_postfix():
'''
Uninstall postfix.
'''
require.deb.uninstall('postfix')
print(green('Postfix successfully uninstalled'))
@task
def uninstall_cozy():
'''
Uninstall postfix.
'''
supervisor.stop_process('cozy-controller')
supervisor.stop_process('cozy-indexer')
su_delete('/usr/local/var/cozy-indexer')
su_delete('/usr/local/cozy-indexer')
su_delete('/usr/local/cozy')
su_delete('/home/cozy*')
su_delete('/etc/cozy')
su_delete('/etc/nginx/conf.d/cozy.conf')
su_delete('/etc/supervisor/conf.d/cozy-controller.conf')
su_delete('/etc/supervisor/conf.d/cozy-indexer.conf')
supervisor.update_config()
print(green('Cozy successfully uninstalled'))
@task
def create_cozy_user():
'''
Add Cozy user with no home directory.
'''
require.user('cozy', home=False, create_home=False)
require.user('cozy-data-system', create_home=True)
require.user('cozy-home', create_home=True)
print(green('Cozy users successfully added'))
@task
def install_monitor():
'''
Install Coffeescript, Compound and Cozy Monitor.
'''
require.nodejs.package('coffee-script')
require.nodejs.package('cozy-monitor')
require.nodejs.package('brunch', version='1.6.3')
print(green('Monitor, compound, brunch and coffee script ' +
'successfully installed'))
@task
def install_controller():
'''
Install Cozy Controller Application Manager. Daemonize with supervisor.
'''
require.nodejs.package('cozy-controller')
sudo('mkdir -p /etc/cozy')
sudo('mkdir -p /etc/cozy/pids')
require.files.file(
path='/etc/cozy/controller.token',
mode='700',
contents=TOKEN,
use_sudo=True,
owner='cozy-home'
)
path = '/usr/local/lib/node_modules/cozy-controller/bin/cozy-controller'
require.supervisor.process(
'cozy-controller',
command="%s -u --auth --per 755" % path,
environment='NODE_ENV="production"',
user='root'
)
supervisor.stop_process('cozy-controller')
## In case where two cozy-controllers are started
with settings(warn_only=True):
sudo('pkill -9 node')
supervisor.start_process('cozy-controller')
if is_arm():
time.sleep(20)
else:
time.sleep(10)
with hide('running', 'stdout'):
result = run('curl -X GET http://127.0.0.1:9002/')
if result != '{"error":"Wrong auth token"}':
print_failed('cozy-controller')
print(green('Cozy Controller successfully started'))
@task
def install_controller_dev():
'''
Install Cozy Controller Application Manager. Daemonize with supervisor.
'''
require.nodejs.package('cozy-controller')
require.supervisor.process(
'cozy-controller',
command='cozy-controller -c -u --per 755',
environment='NODE_ENV="development"',
user='root'
)
supervisor.restart_process('cozy-controller')
import time
time.sleep(5)
with hide('running', 'stdout'):
result = run('curl -X GET http://127.0.0.1:9002/')
if result != '{"message":"No drones specified"}':
print_failed("cozy-controller")
print(green('Cozy Controller successfully started'))
@task
def install_indexer():
'''
Install Cozy Data Indexer. Use supervisord to daemonize it.
'''
home = '/usr/local/cozy-indexer'
indexer_dir = '%s/cozy-data-indexer' % home
indexer_env_dir = '%s/virtualenv' % indexer_dir
python_exe = indexer_dir + '/virtualenv/bin/python'
indexer_exe = 'server.py'
process_name = 'cozy-indexer'
require.files.directory(home, use_sudo=True)
with cd(home):
if files.exists('cozy-data-indexer'):
su_delete('cozy-data-indexer')
sudo('git clone https://github.com/mycozycloud/cozy-data-indexer.git')
require.python.virtualenv(indexer_env_dir, use_sudo=True)
with python.virtualenv(indexer_env_dir):
sudo(
'pip install --use-mirrors -r %s/requirements/common.txt' %
indexer_dir)
sudo('chown -R cozy:cozy %s' % home)
require.supervisor.process(
process_name,
command='%s %s' % (python_exe, indexer_exe),
directory=indexer_dir,
user='cozy'
)
supervisor.restart_process(process_name)
time.sleep(10)
result = run('curl -X GET http://127.0.0.1:9102/')
is_installed = result.find("Cozy Data Indexer")
if is_installed == -1:
print_failed("cozy-data-indexer")
print(green("Data Indexer successfully started"))
@task
def install_data_system():
'''
Install Cozy Data System. Daemonize with Haibu.
'''
result = sudo('cozy-monitor install data-system')
installedApp = result.find('successfully installed')
if installedApp == -1:
print_failed('data-system')
else:
print(green('Data-system successfully installed'))
@task
def install_home():
'''
Install Cozy Home
'''
result = sudo('cozy-monitor install home')
installedApp = result.find('successfully installed')
if installedApp == -1:
print_failed('home')
else:
print(green('Home successfully installed'))
@task
def install_proxy():
'''
Install Cozy Proxy
'''
result = sudo('cozy-monitor install proxy')
installedApp = result.find('successfully installed')
if installedApp == -1:
print_failed('proxy')
else:
print(green('Proxy successfully installed'))
@task
def init_domain():
'''
Register domain name inside Cozy Home.
'''
domain = prompt('What is your domain name (ex: cozycloud.cc)?')
with cd('ls /usr/local/cozy/apps/home/home/cozy-home/'):
cozydo('coffee commands setdomain %s' % domain)
print(green('Domain set to: %s' % domain))
@task
def create_cert():
'''
Create SSL certificates.
'''
etc_dir = '/etc/cozy'
require.files.directory(etc_dir, use_sudo=True, owner='cozy')
with cd(etc_dir):
sudo('openssl genrsa -out ./server.key 1024')
sudo(
'openssl req -new -x509 -days 3650 -key ' +
'./server.key -out ./server.crt -batch')
sudo('chmod 640 server.key')
require.group('ssl-cert')
sudo('chown cozy:ssl-cert ./server.key')
print(green('Certificates successfully created.'))
def reset_cert():
'''
Reset SSL certificates
'''
delete_if_exists('/etc/cozy/server.crt')
delete_if_exists('/etc/cozy/server.key')
print(green('Previous certificates successfully deleted.'))
create_cert()
PROXIED_SITE_TEMPLATE = '''
server {
listen %(port)s;
server_name %(server_name)s;
ssl_certificate /etc/cozy/server.crt;
ssl_certificate_key /etc/cozy/server.key;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
ssl_protocols SSLv3 TLSv1;
ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP;
ssl_prefer_server_ciphers on;
ssl on;
gzip_vary on;
client_max_body_size 1024M;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect http:// https://;
proxy_pass %(proxy_url)s;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
access_log /var/log/nginx/%(server_name)s.log;
}
'''
@task
def install_nginx():
'''
Install NGINX and make it use certs.
'''
if system.distrib_id() == 'Debian':
require_file(url='http://nginx.org/packages/keys/nginx_signing.key')
deb.add_apt_key('nginx_signing.key')
su_delete('nginx_signing.key')
url = 'http://nginx.org/packages/debian/'
distrib = 'squeeze'
if system.distrib_release().startswith('7'):
distrib = 'wheezy'
require.deb.source('nginx', url, distrib, 'nginx')
require.deb.package('nginx')
contents = PROXIED_SITE_TEMPLATE % {
'server_name': 'cozy',
'port': 443,
'proxy_url': 'http://127.0.0.1:9104'
}
require.files.file(
'/etc/nginx/conf.d/cozy.conf',
contents=contents,
use_sudo=True)
else:
require.deb.ppa('ppa:nginx/stable')
require.nginx.site(
'cozy',
template_contents=PROXIED_SITE_TEMPLATE,
enabled=True,
port=443,
proxy_url='http://127.0.0.1:9104'
)
if files.exists('/etc/nginx/conf.d/default.conf'):
su_delete('/etc/nginx/conf.d/default.conf')
if files.exists('/etc/nginx/conf.d/example_ssl.conf'):
su_delete('/etc/nginx/conf.d/example_ssl.conf')
service.restart('nginx')
print(green('Nginx successfully installed.'))
@task
def restart_cozy():
sudo('cozy-monitor restart data-system')
sudo('cozy-monitor restart home')
sudo('cozy-monitor restart proxy')
print(green('Stack restarted successfully.'))
## No setup tasks
@task
def update_stack():
'''
Update applications
'''
nodejs.update_package('cozy-controller')
nodejs.update_package('cozy-monitor')
if is_pi():
sudo('/etc/init.d/cozy-controller restart')
else:
supervisor.restart_process('cozy-controller')
sudo('cozy-monitor update data-system')
sudo('cozy-monitor update home')
sudo('cozy-monitor update proxy')
update_indexer()
print(green('Stack updated successfully.'))
@task
def update_all_apps():
sudo('cozy-monitor update-all')
print(green('All apps successfully updated.'))
@task
def update_indexer():
'''
Update Cozy indexer module.
'''
home = '/usr/local/cozy-indexer'
indexer_dir = '%s/cozy-data-indexer' % home
indexer_env_dir = '%s/virtualenv' % indexer_dir
with cd(indexer_dir):
sudo('git pull origin master')
with python.virtualenv(indexer_env_dir):
sudo(
'pip install --use-mirrors --upgrade -r %s/requirements/common.txt' %
indexer_dir)
supervisor.restart_process('cozy-indexer')
@task
def reset_account():
'''
Delete current user account
'''
with cd('ls /usr/local/cozy/apps/home/home/cozy-home/'):
sudo('coffee commands cleanuser')
print(green('Current account deleted.'))
@task
def reset_controller_token():
'''
Reset controller token
'''
file_path = '/etc/cozy/controller.token'
delete_if_exists(file_path)
print(green('Controller token successfully deleted.'))
require.files.file(
path=file_path,
mode='700',
contents=TOKEN,
use_sudo=True,
owner='cozy-home'
)
print(green('Controller token successfully generated.'))
@task
def reset_security_tokens():
'''
Reset all the security tokens for the Cozy (SSL certificates,
Controller token, CouchDB superuser)
'''
reset_cert()
reset_controller_token()
config_couchdb()
print(green('All the tokens have been reset.'))
"""Help tasks"""
@task
def help(name=None):
"""Display help for a given task
Options:
name - The task to display help on.
To display a list of available tasks type:
$ fab -l
To display help on a specific task type:
$ fab help:<name>
"""
if name is None:
name = "help"
task = crawl(name, state.commands)
if isinstance(task, Task):
doc = getattr(task, "__doc__", None)
if doc is not None:
print("Help on {0:s}:".format(name))
print(doc)
else:
print("No help available for {0:s}".format(name))
else:
print("No such task {0:s}".format(name))
print("For a list of tasks type: fab -l")
@task
def fix_npm_ca_config():
sudo('npm config set ca ""')
print(green('NPM certificate configuration fixed.')) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import operator
import re
import urllib
import urllib2
import webkitpy.common.config.urls as config_urls
from webkitpy.common.memoized import memoized
from webkitpy.common.net.failuremap import FailureMap
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.networktransaction import NetworkTransaction
from webkitpy.common.net.regressionwindow import RegressionWindow
from webkitpy.common.system.logutils import get_logger
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
_log = get_logger(__file__)
class Builder(object):
def __init__(self, name, buildbot):
self._name = name
self._buildbot = buildbot
self._builds_cache = {}
self._revision_to_build_number = None
from webkitpy.thirdparty.autoinstalled.mechanize import Browser
self._browser = Browser()
self._browser.set_handle_robots(False) # The builder pages are excluded by robots.txt
def name(self):
return self._name
def results_url(self):
return "%s/results/%s" % (self._buildbot.buildbot_url, self.url_encoded_name())
# In addition to per-build results, the build.chromium.org builders also
# keep a directory that accumulates test results over many runs.
def accumulated_results_url(self):
return None
def latest_layout_test_results_url(self):
return self.accumulated_results_url() or self.latest_cached_build().results_url();
@memoized
def latest_layout_test_results(self):
return self.fetch_layout_test_results(self.latest_layout_test_results_url())
def _fetch_file_from_results(self, results_url, file_name):
# It seems this can return None if the url redirects and then returns 404.
result = urllib2.urlopen("%s/%s" % (results_url, file_name))
if not result:
return None
# urlopen returns a file-like object which sometimes works fine with str()
# but sometimes is a addinfourl object. In either case calling read() is correct.
return result.read()
def fetch_layout_test_results(self, results_url):
# FIXME: This should cache that the result was a 404 and stop hitting the network.
results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "full_results.json"))
return LayoutTestResults.results_from_string(results_file)
def url_encoded_name(self):
return urllib.quote(self._name)
def url(self):
return "%s/builders/%s" % (self._buildbot.buildbot_url, self.url_encoded_name())
# This provides a single place to mock
def _fetch_build(self, build_number):
build_dictionary = self._buildbot._fetch_build_dictionary(self, build_number)
if not build_dictionary:
return None
revision_string = build_dictionary['sourceStamp']['revision']
return Build(self,
build_number=int(build_dictionary['number']),
# 'revision' may be None if a trunk build was started by the force-build button on the web page.
revision=(int(revision_string) if revision_string else None),
# Buildbot uses any nubmer other than 0 to mean fail. Since we fetch with
# filter=1, passing builds may contain no 'results' value.
is_green=(not build_dictionary.get('results')),
)
def build(self, build_number):
if not build_number:
return None
cached_build = self._builds_cache.get(build_number)
if cached_build:
return cached_build
build = self._fetch_build(build_number)
self._builds_cache[build_number] = build
return build
def latest_cached_build(self):
revision_build_pairs = self.revision_build_pairs_with_results()
revision_build_pairs.sort(key=lambda i: i[1])
latest_build_number = revision_build_pairs[-1][1]
return self.build(latest_build_number)
def force_build(self, username="webkit-patch", comments=None):
def predicate(form):
try:
return form.find_control("username")
except Exception, e:
return False
# ignore false positives for missing Browser methods - pylint: disable=E1102
self._browser.open(self.url())
self._browser.select_form(predicate=predicate)
self._browser["username"] = username
if comments:
self._browser["comments"] = comments
return self._browser.submit()
file_name_regexp = re.compile(r"r(?P<revision>\d+) \((?P<build_number>\d+)\)")
def _revision_and_build_for_filename(self, filename):
# Example: "r47483 (1)/" or "r47483 (1).zip"
match = self.file_name_regexp.match(filename)
if not match:
return None
return (int(match.group("revision")), int(match.group("build_number")))
def _fetch_revision_to_build_map(self):
# All _fetch requests go through _buildbot for easier mocking
# FIXME: This should use NetworkTransaction's 404 handling instead.
try:
# FIXME: This method is horribly slow due to the huge network load.
# FIXME: This is a poor way to do revision -> build mapping.
# Better would be to ask buildbot through some sort of API.
print "Loading revision/build list from %s." % self.results_url()
print "This may take a while..."
result_files = self._buildbot._fetch_twisted_directory_listing(self.results_url())
except urllib2.HTTPError, error:
if error.code != 404:
raise
_log.debug("Revision/build list failed to load.")
result_files = []
return dict(self._file_info_list_to_revision_to_build_list(result_files))
def _file_info_list_to_revision_to_build_list(self, file_info_list):
# This assumes there was only one build per revision, which is false but we don't care for now.
revisions_and_builds = []
for file_info in file_info_list:
revision_and_build = self._revision_and_build_for_filename(file_info["filename"])
if revision_and_build:
revisions_and_builds.append(revision_and_build)
return revisions_and_builds
def _revision_to_build_map(self):
if not self._revision_to_build_number:
self._revision_to_build_number = self._fetch_revision_to_build_map()
return self._revision_to_build_number
def revision_build_pairs_with_results(self):
return self._revision_to_build_map().items()
# This assumes there can be only one build per revision, which is false, but we don't care for now.
def build_for_revision(self, revision, allow_failed_lookups=False):
# NOTE: This lookup will fail if that exact revision was never built.
build_number = self._revision_to_build_map().get(int(revision))
if not build_number:
return None
build = self.build(build_number)
if not build and allow_failed_lookups:
# Builds for old revisions with fail to lookup via buildbot's json api.
build = Build(self,
build_number=build_number,
revision=revision,
is_green=False,
)
return build
def find_regression_window(self, red_build, look_back_limit=30):
if not red_build or red_build.is_green():
return RegressionWindow(None, None)
common_failures = None
current_build = red_build
build_after_current_build = None
look_back_count = 0
while current_build:
if current_build.is_green():
# current_build can't possibly have any failures in common
# with red_build because it's green.
break
results = current_build.layout_test_results()
# We treat a lack of results as if all the test failed.
# This occurs, for example, when we can't compile at all.
if results:
failures = set(results.failing_tests())
if common_failures == None:
common_failures = failures
else:
common_failures = common_failures.intersection(failures)
if not common_failures:
# current_build doesn't have any failures in common with
# the red build we're worried about. We assume that any
# failures in current_build were due to flakiness.
break
look_back_count += 1
if look_back_count > look_back_limit:
return RegressionWindow(None, current_build, failing_tests=common_failures)
build_after_current_build = current_build
current_build = current_build.previous_build()
# We must iterate at least once because red_build is red.
assert(build_after_current_build)
# Current build must either be green or have no failures in common
# with red build, so we've found our failure transition.
return RegressionWindow(current_build, build_after_current_build, failing_tests=common_failures)
def find_blameworthy_regression_window(self, red_build_number, look_back_limit=30, avoid_flakey_tests=True):
red_build = self.build(red_build_number)
regression_window = self.find_regression_window(red_build, look_back_limit)
if not regression_window.build_before_failure():
return None # We ran off the limit of our search
# If avoid_flakey_tests, require at least 2 bad builds before we
# suspect a real failure transition.
if avoid_flakey_tests and regression_window.failing_build() == red_build:
return None
return regression_window
class Build(object):
def __init__(self, builder, build_number, revision, is_green):
self._builder = builder
self._number = build_number
self._revision = revision
self._is_green = is_green
@staticmethod
def build_url(builder, build_number):
return "%s/builds/%s" % (builder.url(), build_number)
def url(self):
return self.build_url(self.builder(), self._number)
def results_url(self):
results_directory = "r%s (%s)" % (self.revision(), self._number)
return "%s/%s" % (self._builder.results_url(), urllib.quote(results_directory))
def results_zip_url(self):
return "%s.zip" % self.results_url()
@memoized
def layout_test_results(self):
return self._builder.fetch_layout_test_results(self.results_url())
def builder(self):
return self._builder
def revision(self):
return self._revision
def is_green(self):
return self._is_green
def previous_build(self):
# previous_build() allows callers to avoid assuming build numbers are sequential.
# They may not be sequential across all master changes, or when non-trunk builds are made.
return self._builder.build(self._number - 1)
class BuildBot(object):
_builder_factory = Builder
_default_url = config_urls.buildbot_url
def __init__(self, url=None):
self.buildbot_url = url if url else self._default_url
self._builder_by_name = {}
def _parse_last_build_cell(self, builder, cell):
status_link = cell.find('a')
if status_link:
# Will be either a revision number or a build number
revision_string = status_link.string
# If revision_string has non-digits assume it's not a revision number.
builder['built_revision'] = int(revision_string) \
if not re.match('\D', revision_string) \
else None
# FIXME: We treat slave lost as green even though it is not to
# work around the Qts bot being on a broken internet connection.
# The real fix is https://bugs.webkit.org/show_bug.cgi?id=37099
builder['is_green'] = not re.search('fail', cell.renderContents()) or \
not not re.search('lost', cell.renderContents())
status_link_regexp = r"builders/(?P<builder_name>.*)/builds/(?P<build_number>\d+)"
link_match = re.match(status_link_regexp, status_link['href'])
builder['build_number'] = int(link_match.group("build_number"))
else:
# We failed to find a link in the first cell, just give up. This
# can happen if a builder is just-added, the first cell will just
# be "no build"
# Other parts of the code depend on is_green being present.
builder['is_green'] = False
builder['built_revision'] = None
builder['build_number'] = None
def _parse_current_build_cell(self, builder, cell):
activity_lines = cell.renderContents().split("<br />")
builder["activity"] = activity_lines[0] # normally "building" or "idle"
# The middle lines document how long left for any current builds.
match = re.match("(?P<pending_builds>\d) pending", activity_lines[-1])
builder["pending_builds"] = int(match.group("pending_builds")) if match else 0
def _parse_builder_status_from_row(self, status_row):
status_cells = status_row.findAll('td')
builder = {}
# First cell is the name
name_link = status_cells[0].find('a')
builder["name"] = unicode(name_link.string)
self._parse_last_build_cell(builder, status_cells[1])
self._parse_current_build_cell(builder, status_cells[2])
return builder
def _matches_regexps(self, builder_name, name_regexps):
for name_regexp in name_regexps:
if re.match(name_regexp, builder_name):
return True
return False
# FIXME: This method needs to die, but is used by a unit test at the moment.
def _builder_statuses_with_names_matching_regexps(self, builder_statuses, name_regexps):
return [builder for builder in builder_statuses if self._matches_regexps(builder["name"], name_regexps)]
# FIXME: These _fetch methods should move to a networking class.
def _fetch_build_dictionary(self, builder, build_number):
# Note: filter=1 will remove None and {} and '', which cuts noise but can
# cause keys to be missing which you might otherwise expect.
# FIXME: The bot sends a *huge* amount of data for each request, we should
# find a way to reduce the response size further.
json_url = "%s/json/builders/%s/builds/%s?filter=1" % (self.buildbot_url, urllib.quote(builder.name()), build_number)
try:
return json.load(urllib2.urlopen(json_url))
except urllib2.URLError, err:
build_url = Build.build_url(builder, build_number)
_log.error("Error fetching data for %s build %s (%s, json: %s): %s" % (builder.name(), build_number, build_url, json_url, err))
return None
except ValueError, err:
build_url = Build.build_url(builder, build_number)
_log.error("Error decoding json data from %s: %s" % (build_url, err))
return None
def _fetch_one_box_per_builder(self):
build_status_url = "%s/one_box_per_builder" % self.buildbot_url
return urllib2.urlopen(build_status_url)
def _file_cell_text(self, file_cell):
"""Traverses down through firstChild elements until one containing a string is found, then returns that string"""
element = file_cell
while element.string is None and element.contents:
element = element.contents[0]
return element.string
def _parse_twisted_file_row(self, file_row):
string_or_empty = lambda string: unicode(string) if string else u""
file_cells = file_row.findAll('td')
return {
"filename": string_or_empty(self._file_cell_text(file_cells[0])),
"size": string_or_empty(self._file_cell_text(file_cells[1])),
"type": string_or_empty(self._file_cell_text(file_cells[2])),
"encoding": string_or_empty(self._file_cell_text(file_cells[3])),
}
def _parse_twisted_directory_listing(self, page):
soup = BeautifulSoup(page)
# HACK: Match only table rows with a class to ignore twisted header/footer rows.
file_rows = soup.find('table').findAll('tr', {'class': re.compile(r'\b(?:directory|file)\b')})
return [self._parse_twisted_file_row(file_row) for file_row in file_rows]
# FIXME: There should be a better way to get this information directly from twisted.
def _fetch_twisted_directory_listing(self, url):
return self._parse_twisted_directory_listing(urllib2.urlopen(url))
def builders(self):
return [self.builder_with_name(status["name"]) for status in self.builder_statuses()]
# This method pulls from /one_box_per_builder as an efficient way to get information about
def builder_statuses(self):
soup = BeautifulSoup(self._fetch_one_box_per_builder())
return [self._parse_builder_status_from_row(status_row) for status_row in soup.find('table').findAll('tr')]
def builder_with_name(self, name):
builder = self._builder_by_name.get(name)
if not builder:
builder = self._builder_factory(name, self)
self._builder_by_name[name] = builder
return builder
def failure_map(self):
failure_map = FailureMap()
revision_to_failing_bots = {}
for builder_status in self.builder_statuses():
if builder_status["is_green"]:
continue
builder = self.builder_with_name(builder_status["name"])
regression_window = builder.find_blameworthy_regression_window(builder_status["build_number"])
if regression_window:
failure_map.add_regression_window(builder, regression_window)
return failure_map
# This makes fewer requests than calling Builder.latest_build would. It grabs all builder
# statuses in one request using self.builder_statuses (fetching /one_box_per_builder instead of builder pages).
def _latest_builds_from_builders(self):
builder_statuses = self.builder_statuses()
return [self.builder_with_name(status["name"]).build(status["build_number"]) for status in builder_statuses]
def _build_at_or_before_revision(self, build, revision):
while build:
if build.revision() <= revision:
return build
build = build.previous_build()
def _fetch_builder_page(self, builder):
builder_page_url = "%s/builders/%s?numbuilds=100" % (self.buildbot_url, urllib2.quote(builder.name()))
return urllib2.urlopen(builder_page_url)
def _revisions_for_builder(self, builder):
soup = BeautifulSoup(self._fetch_builder_page(builder))
revisions = []
for status_row in soup.find('table').findAll('tr'):
revision_anchor = status_row.find('a')
table_cells = status_row.findAll('td')
if not table_cells or len(table_cells) < 3 or not table_cells[2].string:
continue
if revision_anchor and revision_anchor.string and re.match(r'^\d+$', revision_anchor.string):
revisions.append((int(revision_anchor.string), 'success' in table_cells[2].string))
return revisions
def _find_green_revision(self, builder_revisions):
revision_statuses = {}
for builder in builder_revisions:
for revision, succeeded in builder_revisions[builder]:
revision_statuses.setdefault(revision, set())
if succeeded and revision_statuses[revision] != None:
revision_statuses[revision].add(builder)
else:
revision_statuses[revision] = None
# In descending order, look for a revision X with successful builds
# Once we found X, check if remaining builders succeeded in the neighborhood of X.
revisions_in_order = sorted(revision_statuses.keys(), reverse=True)
for i, revision in enumerate(revisions_in_order):
if not revision_statuses[revision]:
continue
builders_succeeded_in_future = set()
for future_revision in sorted(revisions_in_order[:i + 1]):
if not revision_statuses[future_revision]:
break
builders_succeeded_in_future = builders_succeeded_in_future.union(revision_statuses[future_revision])
builders_succeeded_in_past = set()
for past_revision in revisions_in_order[i:]:
if not revision_statuses[past_revision]:
break
builders_succeeded_in_past = builders_succeeded_in_past.union(revision_statuses[past_revision])
if len(builders_succeeded_in_future) == len(builder_revisions) and len(builders_succeeded_in_past) == len(builder_revisions):
return revision
return None | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.aop.support;
import java.io.Serializable;
import java.util.LinkedHashSet;
import java.util.Set;
import org.aopalliance.aop.Advice;
import org.jspecify.annotations.Nullable;
import org.springframework.aop.ClassFilter;
import org.springframework.aop.DynamicIntroductionAdvice;
import org.springframework.aop.IntroductionAdvisor;
import org.springframework.aop.IntroductionInfo;
import org.springframework.core.Ordered;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
/**
* Simple {@link org.springframework.aop.IntroductionAdvisor} implementation
* that by default applies to any class.
*
* @author Rod Johnson
* @author Juergen Hoeller
* @since 11.11.2003
*/
@SuppressWarnings("serial")
public class DefaultIntroductionAdvisor implements IntroductionAdvisor, ClassFilter, Ordered, Serializable {
private final Advice advice;
private final Set<Class<?>> interfaces = new LinkedHashSet<>();
private int order = Ordered.LOWEST_PRECEDENCE;
/**
* Create a DefaultIntroductionAdvisor for the given advice.
* @param advice the Advice to apply (may implement the
* {@link org.springframework.aop.IntroductionInfo} interface)
* @see #addInterface
*/
public DefaultIntroductionAdvisor(Advice advice) {
this(advice, (advice instanceof IntroductionInfo introductionInfo ? introductionInfo : null));
}
/**
* Create a DefaultIntroductionAdvisor for the given advice.
* @param advice the Advice to apply
* @param introductionInfo the IntroductionInfo that describes
* the interface to introduce (may be {@code null})
*/
public DefaultIntroductionAdvisor(Advice advice, @Nullable IntroductionInfo introductionInfo) {
Assert.notNull(advice, "Advice must not be null");
this.advice = advice;
if (introductionInfo != null) {
Class<?>[] introducedInterfaces = introductionInfo.getInterfaces();
if (introducedInterfaces.length == 0) {
throw new IllegalArgumentException(
"IntroductionInfo defines no interfaces to introduce: " + introductionInfo);
}
for (Class<?> ifc : introducedInterfaces) {
addInterface(ifc);
}
}
}
/**
* Create a DefaultIntroductionAdvisor for the given advice.
* @param advice the Advice to apply
* @param ifc the interface to introduce
*/
public DefaultIntroductionAdvisor(DynamicIntroductionAdvice advice, Class<?> ifc) {
Assert.notNull(advice, "Advice must not be null");
this.advice = advice;
addInterface(ifc);
}
/**
* Add the specified interface to the list of interfaces to introduce.
* @param ifc the interface to introduce
*/
public void addInterface(Class<?> ifc) {
Assert.notNull(ifc, "Interface must not be null");
if (!ifc.isInterface()) {
throw new IllegalArgumentException("Specified class [" + ifc.getName() + "] must be an interface");
}
this.interfaces.add(ifc);
}
@Override
public Class<?>[] getInterfaces() {
return ClassUtils.toClassArray(this.interfaces);
}
@Override
public void validateInterfaces() throws IllegalArgumentException {
for (Class<?> ifc : this.interfaces) {
if (this.advice instanceof DynamicIntroductionAdvice dynamicIntroductionAdvice &&
!dynamicIntroductionAdvice.implementsInterface(ifc)) {
throw new IllegalArgumentException("DynamicIntroductionAdvice [" + this.advice + "] " +
"does not implement interface [" + ifc.getName() + "] specified for introduction");
}
}
}
public void setOrder(int order) {
this.order = order;
}
@Override
public int getOrder() {
return this.order;
}
@Override
public Advice getAdvice() {
return this.advice;
}
@Override
public ClassFilter getClassFilter() {
return this;
}
@Override
public boolean matches(Class<?> clazz) {
return true;
}
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof DefaultIntroductionAdvisor otherAdvisor &&
this.advice.equals(otherAdvisor.advice) &&
this.interfaces.equals(otherAdvisor.interfaces)));
}
@Override
public int hashCode() {
return this.advice.hashCode() * 13 + this.interfaces.hashCode();
}
@Override
public String toString() {
return getClass().getName() + ": advice [" + this.advice + "]; interfaces " +
ClassUtils.classNamesToString(this.interfaces);
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-aop/src/main/java/org/springframework/aop/support/DefaultIntroductionAdvisor.java |
/** @import { AST } from '#compiler' */
/** @import { Context } from '../types' */
import * as e from '../../../errors.js';
import { mark_subtree_dynamic } from './shared/fragment.js';
const valid = ['onerror', 'failed', 'pending'];
/**
* @param {AST.SvelteBoundary} node
* @param {Context} context
*/
export function SvelteBoundary(node, context) {
for (const attribute of node.attributes) {
if (attribute.type !== 'Attribute' || !valid.includes(attribute.name)) {
e.svelte_boundary_invalid_attribute(attribute);
}
if (
attribute.value === true ||
(Array.isArray(attribute.value) &&
(attribute.value.length !== 1 || attribute.value[0].type !== 'ExpressionTag'))
) {
e.svelte_boundary_invalid_attribute_value(attribute);
}
}
mark_subtree_dynamic(context.path);
context.next();
} | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/src/compiler/phases/2-analyze/visitors/SvelteBoundary.js |
from Components.Console import Console
from os import listdir as os_listdir, path as os_path
from re import compile as re_compile
from enigma import eEnv
class Keyboard:
def __init__(self):
self.keyboardmaps = []
self.readKeyboardMapFiles()
def readKeyboardMapFiles(self):
for keymapfile in os_listdir(eEnv.resolve('${datadir}/keymaps/')):
if (keymapfile.endswith(".info")):
mapfile = None
mapname = None
for line in open(eEnv.resolve('${datadir}/keymaps/') + keymapfile):
m = re_compile('^\s*(\w+)\s*=\s*(.*)\s*$').match(line)
if m:
key, val = m.groups()
if key == 'kmap':
mapfile = val
if key == 'name':
mapname = val
if (mapfile is not None) and (mapname is not None):
self.keyboardmaps.append(( mapfile,mapname))
def activateKeyboardMap(self, index):
try:
keymap = self.keyboardmaps[index]
print "Activating keymap:",keymap[1]
keymappath = eEnv.resolve('${datadir}/keymaps/') + keymap[0]
if os_path.exists(keymappath):
Console().ePopen(("loadkmap < " + str(keymappath)))
except:
print "Selected keymap does not exist!"
def getKeyboardMaplist(self):
return self.keyboardmaps
def getDefaultKeyboardMap(self):
return 'default.kmap'
keyboard = Keyboard() | unknown | codeparrot/codeparrot-clean | ||
'use strict';
const common = require('../common.js');
const { EventEmitter } = require('events');
const bench = common.createBenchmark(main, {
newListener: [0, 1],
removeListener: [0, 1],
n: [1e6],
});
function main({ newListener, removeListener, n }) {
const ee = new EventEmitter();
const listeners = [];
for (let k = 0; k < 10; k += 1)
listeners.push(() => {});
if (newListener === 1)
ee.on('newListener', (event, listener) => {});
if (removeListener === 1)
ee.on('removeListener', (event, listener) => {});
bench.start();
for (let i = 0; i < n; i += 1) {
const dummy = (i % 2 === 0) ? 'dummy0' : 'dummy1';
for (let k = listeners.length; --k >= 0; /* empty */) {
ee.on(dummy, listeners[k]);
}
for (let k = listeners.length; --k >= 0; /* empty */) {
ee.removeListener(dummy, listeners[k]);
}
}
bench.end(n);
} | javascript | github | https://github.com/nodejs/node | benchmark/events/ee-add-remove.js |
#### Note: this error code is no longer emitted by the compiler.
The type does not fulfill the required lifetime.
Erroneous code example:
```compile_fail
use std::sync::Mutex;
struct MyString<'a> {
data: &'a str,
}
fn i_want_static_closure<F>(a: F)
where F: Fn() + 'static {}
fn print_string<'a>(s: Mutex<MyString<'a>>) {
i_want_static_closure(move || { // error: this closure has lifetime 'a
// rather than 'static
println!("{}", s.lock().unwrap().data);
});
}
```
In this example, the closure does not satisfy the `'static` lifetime constraint.
To fix this error, you need to double check the lifetime of the type. Here, we
can fix this problem by giving `s` a static lifetime:
```
use std::sync::Mutex;
struct MyString<'a> {
data: &'a str,
}
fn i_want_static_closure<F>(a: F)
where F: Fn() + 'static {}
fn print_string(s: Mutex<MyString<'static>>) {
i_want_static_closure(move || { // ok!
println!("{}", s.lock().unwrap().data);
});
}
``` | unknown | github | https://github.com/rust-lang/rust | compiler/rustc_error_codes/src/error_codes/E0477.md |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.GetTelemetrySubscriptionsResponseData;
import org.apache.kafka.common.protocol.Errors;
import org.junit.jupiter.api.Test;
import java.util.Collections;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class GetTelemetrySubscriptionsResponseTest {
@Test
public void testErrorCountsReturnsNoneWhenNoErrors() {
GetTelemetrySubscriptionsResponseData data = new GetTelemetrySubscriptionsResponseData()
.setErrorCode(Errors.NONE.code());
GetTelemetrySubscriptionsResponse response = new GetTelemetrySubscriptionsResponse(data);
assertEquals(Collections.singletonMap(Errors.NONE, 1), response.errorCounts());
}
@Test
public void testErrorCountsReturnsOneError() {
GetTelemetrySubscriptionsResponseData data = new GetTelemetrySubscriptionsResponseData()
.setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code());
data.setErrorCode(Errors.INVALID_CONFIG.code());
GetTelemetrySubscriptionsResponse response = new GetTelemetrySubscriptionsResponse(data);
assertEquals(Collections.singletonMap(Errors.INVALID_CONFIG, 1), response.errorCounts());
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsResponseTest.java |
# coding: utf-8
"""
Harbor API
These APIs provide services for manipulating Harbor project.
OpenAPI spec version: 1.4.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
sys.path.append(os.environ["SWAGGER_CLIENT_PATH"])
import unittest
import testutils
from swagger_client.models.ldap_conf import LdapConf
from pprint import pprint
#Testcase
# Define a LDAP group with harbor admin
class TestLdapPing(unittest.TestCase):
"""AccessLog unit test stubs"""
product_api = testutils.GetProductApi("admin", "Harbor12345")
project_id = 0
def setUp(self):
pass
def tearDown(self):
pass
def testLdapPing(self):
"""Test LdapAdminRole"""
result = self.product_api.ldap_ping_post(ldapconf=LdapConf(ldap_url="10.161.127.236", ldap_search_dn="cn=admin,dc=example,dc=com", ldap_search_password="admin", ldap_scope=2))
pprint(result)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
import logging
from celery import task
from celery_utils.persist_on_failure import LoggedPersistOnFailureTask
from django.conf import settings
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = logging.getLogger(__name__)
DEFAULT_ALL_COURSES = False
DEFAULT_CHUNK_SIZE = 50
DEFAULT_FORCE_UPDATE = False
def chunks(sequence, chunk_size):
return (sequence[index: index + chunk_size] for index in xrange(0, len(sequence), chunk_size))
def _task_options(routing_key):
task_options = {}
if getattr(settings, 'HIGH_MEM_QUEUE', None):
task_options['routing_key'] = settings.HIGH_MEM_QUEUE
if routing_key:
task_options['routing_key'] = routing_key
return task_options
def enqueue_async_course_overview_update_tasks(
course_ids,
all_courses=False,
force_update=False,
chunk_size=DEFAULT_CHUNK_SIZE,
routing_key=None
):
if all_courses:
course_keys = [course.id for course in modulestore().get_course_summaries()]
else:
course_keys = [CourseKey.from_string(id) for id in course_ids]
for course_key_group in chunks(course_keys, chunk_size):
course_key_strings = [unicode(key) for key in course_key_group]
options = _task_options(routing_key)
async_course_overview_update.apply_async(
args=course_key_strings,
kwargs={'force_update': force_update},
**options
)
@task(base=LoggedPersistOnFailureTask)
def async_course_overview_update(*args, **kwargs):
course_keys = [CourseKey.from_string(arg) for arg in args]
CourseOverview.update_select_courses(course_keys, force_update=kwargs['force_update']) | unknown | codeparrot/codeparrot-clean | ||
//! [`TryIntoHeaderPair`] trait and implementations.
use super::{
Header, HeaderName, HeaderValue, InvalidHeaderName, InvalidHeaderValue, TryIntoHeaderValue,
};
use crate::error::HttpError;
/// An interface for types that can be converted into a [`HeaderName`] + [`HeaderValue`] pair for
/// insertion into a [`HeaderMap`].
///
/// [`HeaderMap`]: super::HeaderMap
pub trait TryIntoHeaderPair: Sized {
type Error: Into<HttpError>;
fn try_into_pair(self) -> Result<(HeaderName, HeaderValue), Self::Error>;
}
#[derive(Debug)]
pub enum InvalidHeaderPart {
Name(InvalidHeaderName),
Value(InvalidHeaderValue),
}
impl From<InvalidHeaderPart> for HttpError {
fn from(part_err: InvalidHeaderPart) -> Self {
match part_err {
InvalidHeaderPart::Name(err) => err.into(),
InvalidHeaderPart::Value(err) => err.into(),
}
}
}
impl<V> TryIntoHeaderPair for (HeaderName, V)
where
V: TryIntoHeaderValue,
V::Error: Into<InvalidHeaderValue>,
{
type Error = InvalidHeaderPart;
fn try_into_pair(self) -> Result<(HeaderName, HeaderValue), Self::Error> {
let (name, value) = self;
let value = value
.try_into_value()
.map_err(|err| InvalidHeaderPart::Value(err.into()))?;
Ok((name, value))
}
}
impl<V> TryIntoHeaderPair for (&HeaderName, V)
where
V: TryIntoHeaderValue,
V::Error: Into<InvalidHeaderValue>,
{
type Error = InvalidHeaderPart;
fn try_into_pair(self) -> Result<(HeaderName, HeaderValue), Self::Error> {
let (name, value) = self;
let value = value
.try_into_value()
.map_err(|err| InvalidHeaderPart::Value(err.into()))?;
Ok((name.clone(), value))
}
}
impl<V> TryIntoHeaderPair for (&[u8], V)
where
V: TryIntoHeaderValue,
V::Error: Into<InvalidHeaderValue>,
{
type Error = InvalidHeaderPart;
fn try_into_pair(self) -> Result<(HeaderName, HeaderValue), Self::Error> {
let (name, value) = self;
let name = HeaderName::try_from(name).map_err(InvalidHeaderPart::Name)?;
let value = value
.try_into_value()
.map_err(|err| InvalidHeaderPart::Value(err.into()))?;
Ok((name, value))
}
}
impl<V> TryIntoHeaderPair for (&str, V)
where
V: TryIntoHeaderValue,
V::Error: Into<InvalidHeaderValue>,
{
type Error = InvalidHeaderPart;
fn try_into_pair(self) -> Result<(HeaderName, HeaderValue), Self::Error> {
let (name, value) = self;
let name = HeaderName::try_from(name).map_err(InvalidHeaderPart::Name)?;
let value = value
.try_into_value()
.map_err(|err| InvalidHeaderPart::Value(err.into()))?;
Ok((name, value))
}
}
impl<V> TryIntoHeaderPair for (String, V)
where
V: TryIntoHeaderValue,
V::Error: Into<InvalidHeaderValue>,
{
type Error = InvalidHeaderPart;
#[inline]
fn try_into_pair(self) -> Result<(HeaderName, HeaderValue), Self::Error> {
let (name, value) = self;
(name.as_str(), value).try_into_pair()
}
}
impl<T: Header> TryIntoHeaderPair for T {
type Error = <T as TryIntoHeaderValue>::Error;
#[inline]
fn try_into_pair(self) -> Result<(HeaderName, HeaderValue), Self::Error> {
Ok((T::name(), self.try_into_value()?))
}
} | rust | github | https://github.com/actix/actix-web | actix-http/src/header/into_pair.rs |
import numpy as np
def readProbeFile(filename):
with open (filename, 'rb') as csvfile:
data=np.loadtxt(csvfile, delimiter=",",skiprows=1)
time=data[:,0]
data = data[:,1:]
csvfile.seek(0)
header = csvfile.readline()
header = header.replace(","," ")
header = header.split()
probeType = []
for i in range(0,len(header)):
probeType.append(header[i])
datalist = [probeType,time,data]
return datalist
def costapValue(f,tail):
return 0.5*(1.-np.cos(np.pi*float(tail-f)/float(tail)))
def signalFilter(time,data,minfreq,maxfreq, cutoffMax, cutoffMin):
dt = (time[-1]-time[0])/(len(time)-1)
doInterp = False
nfft = len(time)
dt = (time[-1]-time[0])/(len(time)-1)
freq = np.fft.fftfreq(nfft,dt)
fft_x = np.fft.fft(data[:],nfft)
ii = -1
tailMax = cutoffMax - maxfreq
tailMin = minfreq - cutoffMin
if(tailMax < 0 or tailMin < 0):
print "cutoffMax is less than maxfreq or cutoffMin larger than minfreq, this should not be the case"
for ff in freq:
ii+=1
if ff > maxfreq:
if ff - maxfreq < tailMax:
f = ff -maxfreq
fft_x[ii] = costapValue(f,tailMax)*fft_x[ii]
else:
fft_x[ii] = 0.
if ff < -maxfreq:
if -ff - maxfreq < tailMax:
f = -ff -maxfreq
fft_x[ii] = costapValue(f,tailMax)*fft_x[ii]
else:
fft_x[ii] = 0.
if (ff < minfreq and ff > -minfreq and ff!=0.):
fd = abs(ff)
if minfreq - fd < tailMin:
f = minfreq - fd
fft_x[ii] = costapValue(f,tailMin)*fft_x[ii]
else:
fft_x[ii] = 0.
data1 = np.zeros(data.shape)
data1[:] = np.fft.ifft(fft_x)
return data1
def zeroCrossing(time,data,up=True):
trend = np.mean(data)
data = data - trend
data_temp=np.zeros(data.shape,)
data_temp[0:-1] = data[1:]
zc = data_temp*data
zcPoints = np.where(zc<0)[0]
if(up):
if(data[0]<0):
zcPoints = zcPoints[::2]
if(data[0]>0):
zcPoints = zcPoints[1::2]
else:
if(data[0]<0):
zcPoints = zcPoints[1::2]
if(data[0]>0):
zcPoints = zcPoints[::2]
zCH = []
period=[]
for zi in range(1,len(zcPoints)):
i1 = zcPoints[zi-1]
i2 = zcPoints[zi]
zCH.append(max(data[i1:i2])-min(data[i1:i2]))
period.append(time[i2]-time[i1])
zCH = np.array(zCH)
period = np.array(period)
height = np.sort(zCH)
#ii = len(height) - float(len(height))/float(mode)
height = np.mean(height)
period = np.mean(period)
return [period, height]
def reflStat(H1,H2,H3,dx,wavelength):
D = 2*np.pi*dx/wavelength
Amp =np.array([H1/2.,H2/2.,H3/2.])
A1 = Amp[0]*Amp[0]
A2 = Amp[1]*Amp[1]
A3 = Amp[2]*Amp[2]
Lamda = (A1 + A3 - 2.*A2*np.cos(2*D))/(4.*np.sin(D)*np.sin(D))
Gamma = 0.5*np.sqrt(
((2*A2-A1-A3)/(2.*np.sin(D)*np.sin(D)))**2+((A1-A3)/np.sin(2*D))**2)
Hi = np.sqrt(Lamda + Gamma) + np.sqrt(Lamda - Gamma)
Hr = np.sqrt(Lamda + Gamma) - np.sqrt(Lamda - Gamma)
Rf = Hr/(Hi+1e-15)
return [Hi,Hr,Rf] | unknown | codeparrot/codeparrot-clean | ||
{
"INCRBYFLOAT": {
"summary": "Increment the floating point value of a key by a number. Uses 0 as initial value if the key doesn't exist.",
"complexity": "O(1)",
"group": "string",
"since": "2.6.0",
"arity": 3,
"function": "incrbyfloatCommand",
"command_flags": [
"WRITE",
"DENYOOM",
"FAST"
],
"acl_categories": [
"STRING"
],
"key_specs": [
{
"flags": [
"RW",
"ACCESS",
"UPDATE"
],
"begin_search": {
"index": {
"pos": 1
}
},
"find_keys": {
"range": {
"lastkey": 0,
"step": 1,
"limit": 0
}
}
}
],
"reply_schema": {
"type": "string",
"description": "The value of the key after incrementing it."
},
"arguments": [
{
"name": "key",
"type": "key",
"key_spec_index": 0
},
{
"name": "increment",
"type": "double"
}
]
}
} | json | github | https://github.com/redis/redis | src/commands/incrbyfloat.json |
# -*- python -*-
#
# File : wireshark_be.py
#
# Author : Frank Singleton (frank.singleton@ericsson.com)
#
# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
#
# This file is a backend to "omniidl", used to generate "Wireshark"
# dissectors from IDL descriptions. The output language generated
# is "C". It will generate code to use the GIOP/IIOP get_CDR_XXX API.
#
# Please see packet-giop.h in Wireshark distro for API description.
# Wireshark is available at http://www.wireshark.org/
#
# Omniidl is part of the OmniOrb distribution, and is available at
# http://omniorb.sourceforge.net
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Description:
#
# Omniidl Back-end which parses an IDL data structure provided by the frontend
# and generates packet-idl-xxx.[ch] for compiling as a dissector in
# Wireshark IP protocol anlayser.
#
#
#
#
# Strategy.
#
# Crawl all the way down all branches until I hit "Operation", "Enum", "Attribute",
# "Struct" and "Union" nodes. Then store these nodes in lists.
#
# Pass these lists (via an object ref) to the src code
# generator (wireshark_gen) class and let it do the hard work !
#
#
# Dont forget structs can contain embedded structs etc .. so dont forget
# to peek inside and check :-)
#
#
"""Wireshark IDL compiler back-end."""
from omniidl import idlast, idltype, idlvisitor, idlutil, output
import sys, string
from os import path
from wireshark_gen import wireshark_gen_C
#
# This class finds the "Operation" nodes ,Enum Nodes, "Attribute" nodes, Struct Nodes
# and Union Nodes. Then it hands them off to an instance of the source code generator
# class "wireshark_gen"
#
class WiresharkVisitor:
DEBUG = 0 # debug flag
def __init__(self, st):
self.st = st
self.oplist = [] # list of operation nodes
self.enlist = [] # list of enum nodes
self.atlist = [] # list of attribute nodes
self.stlist = [] # list of struct nodes
self.unlist = [] # list of union nodes
def visitAST(self, node):
if self.DEBUG:
print "XXX visitAST() node = ", node
for n in node.declarations():
if isinstance(n, idlast.Module):
self.visitModule(n)
if isinstance(n, idlast.Interface):
self.visitInterface(n)
if isinstance(n, idlast.Operation):
self.visitOperation(n)
if isinstance(n, idlast.Attribute):
self.visitAttribute(n)
if isinstance(n, idlast.Enum):
self.visitEnum(n)
if isinstance(n, idlast.Struct):
self.visitStruct(n)
if isinstance(n, idlast.Union):
self.visitUnion(n)
# Check for Typedef structs and unions
if isinstance(n, idlast.Typedef):
self.visitTypedef(n) # who are you ?
def visitModule(self, node):
if self.DEBUG:
print "XXX visitModule() node = ", node
for n in node.definitions():
if isinstance(n, idlast.Module):
self.visitModule(n)
if isinstance(n, idlast.Interface):
self.visitInterface(n)
if isinstance(n, idlast.Operation):
self.visitOperation(n)
if isinstance(n, idlast.Attribute):
self.visitAttribute(n)
if isinstance(n, idlast.Enum):
self.visitEnum(n)
if isinstance(n, idlast.Struct):
self.visitStruct(n)
if isinstance(n, idlast.Union):
self.visitUnion(n)
# Check for Typedef structs and unions
if isinstance(n, idlast.Typedef):
self.visitTypedef(n) # who are you ?
def visitInterface(self, node):
if self.DEBUG:
print "XXX visitInterface() node = ", node
for c in node.callables():
if isinstance(c, idlast.Operation):
self.visitOperation(c)
if isinstance(c, idlast.Attribute):
self.visitAttribute(c)
for d in node.contents():
if isinstance(d, idlast.Enum):
self.visitEnum(d)
if isinstance(d, idlast.Struct):
self.visitStruct(d)
if isinstance(d, idlast.Union):
self.visitUnion(d)
# Check for Typedef structs and unions
if isinstance(d, idlast.Typedef):
self.visitTypedef(d) # who are you ?
#
# visitOperation
#
# populates the operations node list "oplist"
#
#
def visitOperation(self,opnode):
if not opnode in self.oplist:
self.oplist.append(opnode) # store operation node
#
# visitAttribute
#
# populates the attribute node list "atlist"
#
#
def visitAttribute(self,atnode):
if not atnode in self.atlist:
self.atlist.append(atnode) # store attribute node
#
# visitEnum
#
# populates the Enum node list "enlist"
#
#
def visitEnum(self,enode):
if not enode in self.enlist:
self.enlist.append(enode) # store enum node if unique
#
# visitTypedef
#
# Search to see if its a typedef'd struct, union, or enum
#
# eg: typdef enum colors {red, green, blue } mycolors;
#
def visitTypedef(self,td):
d = td.aliasType() # get Type, possibly Declared
if isinstance(d,idltype.Declared):
self.visitDeclared(d)
#
# visitDeclared
#
# Search to see if its a struct, union, or enum
#
#
def visitDeclared(self,d):
if isinstance(d,idltype.Declared):
sue = d.decl() # grab the struct or union or enum
if isinstance(sue, idlast.Struct):
self.visitStruct(sue)
if isinstance(sue, idlast.Union):
self.visitUnion(sue)
if isinstance(sue, idlast.Enum):
self.visitEnum(sue)
#
# visitStruct
#
# populates the struct node list "stlist"
# and checks its members also
#
#
def visitStruct(self,stnode):
if not stnode in self.stlist:
self.stlist.append(stnode) # store struct node if unique and avoid recursive loops
# if we come across recursive structs
for m in stnode.members(): # find embedded struct definitions within this
mt = m.memberType()
if isinstance(mt,idltype.Declared):
self.visitDeclared(mt) # if declared, then check it out
#
# visitUnion
#
# populates the struct node list "unlist"
# and checks its members also
#
#
def visitUnion(self,unnode):
if not unnode in self.unlist:
self.unlist.append(unnode) # store union node if unique
if unnode.constrType(): # enum defined within switch type
if isinstance(unnode.switchType(),idltype.Declared):
self.visitDeclared(unnode.switchType())
for c in unnode.cases():
ct = c.caseType()
if isinstance(ct,idltype.Declared):
self.visitDeclared(ct) # if declared, then check it out
def run(tree, args):
st = output.Stream(sys.stdout, 4) # set indent for stream
ev = WiresharkVisitor(st) # create visitor object
ev.visitAST(tree) # go find some operations
#
# Grab name of main IDL file being compiled.
#
# Assumption: Name is of the form abcdefg.xyz (eg: CosNaming.idl)
#
fname = path.basename(tree.file()) # grab basename only, dont care about path
nl = string.split(fname,".")[0] # split name of main IDL file using "." as separator
# and grab first field (eg: CosNaming)
if ev.DEBUG:
for i in ev.oplist:
print "XXX - Operation node ", i, " repoId() = ", i.repoId()
for i in ev.atlist:
print "XXX - Attribute node ", i, " identifiers() = ", i.identifiers()
for i in ev.enlist:
print "XXX - Enum node ", i, " repoId() = ", i.repoId()
for i in ev.stlist:
print "XXX - Struct node ", i, " repoId() = ", i.repoId()
for i in ev.unlist:
print "XXX - Union node ", i, " repoId() = ", i.repoId()
# create a C generator object
# and generate some C code
eg = wireshark_gen_C(ev.st, string.upper(nl), string.lower(nl), string.capitalize(nl) + " Dissector Using GIOP API")
eg.genCode(ev.oplist, ev.atlist, ev.enlist, ev.stlist, ev.unlist) # pass them onto the C generator
#
# Editor modelines - http://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
# | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("orgs", "0014_auto_20150722_1419"), ("contacts", "0001_initial")]
operations = [
migrations.CreateModel(
name="Contact",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("uuid", models.CharField(unique=True, max_length=36)),
(
"gender",
models.CharField(
choices=[("M", "Male"), ("F", "Female")],
max_length=1,
blank=True,
help_text="Gender of the contact",
null=True,
verbose_name="Gender",
),
),
("born", models.IntegerField(null=True, verbose_name="Born Field", blank=True)),
(
"occupation",
models.CharField(max_length=255, null=True, verbose_name="Occupation Field", blank=True),
),
("registered_on", models.DateTimeField(null=True, verbose_name="Registration Date", blank=True)),
("state", models.CharField(max_length=255, null=True, verbose_name="State Field")),
("district", models.CharField(max_length=255, null=True, verbose_name="District Field")),
(
"org",
models.ForeignKey(
related_name="contacts", on_delete=models.PROTECT, verbose_name="Organization", to="orgs.Org"
),
),
],
)
] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_user_tacacsplus
short_description: Configure TACACS+ server entries in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify user feature and tacacsplus category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
user_tacacsplus:
description:
- Configure TACACS+ server entries.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
authen_type:
description:
- Allowed authentication protocols/methods.
type: str
choices:
- mschap
- chap
- pap
- ascii
- auto
authorization:
description:
- Enable/disable TACACS+ authorization.
type: str
choices:
- enable
- disable
key:
description:
- Key to access the primary server.
type: str
name:
description:
- TACACS+ server entry name.
required: true
type: str
port:
description:
- Port number of the TACACS+ server.
type: int
secondary_key:
description:
- Key to access the secondary server.
type: str
secondary_server:
description:
- Secondary TACACS+ server CN domain name or IP address.
type: str
server:
description:
- Primary TACACS+ server CN domain name or IP address.
type: str
source_ip:
description:
- source IP for communications to TACACS+ server.
type: str
tertiary_key:
description:
- Key to access the tertiary server.
type: str
tertiary_server:
description:
- Tertiary TACACS+ server CN domain name or IP address.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure TACACS+ server entries.
fortios_user_tacacsplus:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
user_tacacsplus:
authen_type: "mschap"
authorization: "enable"
key: "<your_own_value>"
name: "default_name_6"
port: "7"
secondary_key: "<your_own_value>"
secondary_server: "<your_own_value>"
server: "192.168.100.40"
source_ip: "84.230.14.43"
tertiary_key: "<your_own_value>"
tertiary_server: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_user_tacacsplus_data(json):
option_list = ['authen_type', 'authorization', 'key',
'name', 'port', 'secondary_key',
'secondary_server', 'server', 'source_ip',
'tertiary_key', 'tertiary_server']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def user_tacacsplus(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['user_tacacsplus'] and data['user_tacacsplus']:
state = data['user_tacacsplus']['state']
else:
state = True
user_tacacsplus_data = data['user_tacacsplus']
filtered_data = underscore_to_hyphen(filter_user_tacacsplus_data(user_tacacsplus_data))
if state == "present":
return fos.set('user',
'tacacs+',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('user',
'tacacs+',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_user(data, fos):
if data['user_tacacsplus']:
resp = user_tacacsplus(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"user_tacacsplus": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"authen_type": {"required": False, "type": "str",
"choices": ["mschap", "chap", "pap",
"ascii", "auto"]},
"authorization": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"key": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"port": {"required": False, "type": "int"},
"secondary_key": {"required": False, "type": "str"},
"secondary_server": {"required": False, "type": "str"},
"server": {"required": False, "type": "str"},
"source_ip": {"required": False, "type": "str"},
"tertiary_key": {"required": False, "type": "str"},
"tertiary_server": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_user(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_user(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import sys
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class PrepareChangeLog(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.quiet,
Options.email,
Options.git_commit,
Options.update_changelogs,
]
def _ensure_bug_url(self, state):
if not state.get("bug_id"):
return
bug_id = state.get("bug_id")
changelogs = self.cached_lookup(state, "changelogs")
for changelog_path in changelogs:
changelog = ChangeLog(changelog_path, self._tool.filesystem)
if not changelog.latest_entry().bug_id():
changelog.set_short_description_and_bug_url(
self.cached_lookup(state, "bug_title"),
self._tool.bugs.bug_url_for_bug_id(bug_id))
def _resolve_existing_entry(self, changelog_path):
# When this is called, the top entry in the ChangeLog was just created
# by prepare-ChangeLog, as an clean updated version of the one below it.
with self._tool.filesystem.open_text_file_for_reading(changelog_path) as changelog_file:
entries_gen = ChangeLog.parse_entries_from_file(changelog_file)
entries = zip(entries_gen, range(2))
if not len(entries):
raise Exception("Expected to find at least two ChangeLog entries in %s but found none." % changelog_path)
if len(entries) == 1:
# If we get here, it probably means we've just rolled over to a
# new CL file, so we don't have anything to resolve.
return
(new_entry, _), (old_entry, _) = entries
final_entry = self._merge_entries(old_entry, new_entry)
changelog = ChangeLog(changelog_path, self._tool.filesystem)
changelog.delete_entries(2)
changelog.prepend_text(final_entry)
def _merge_entries(self, old_entry, new_entry):
final_entry = old_entry.contents()
final_entry = final_entry.replace(old_entry.date(), new_entry.date(), 1)
new_bug_desc = new_entry.bug_description()
old_bug_desc = old_entry.bug_description()
if new_bug_desc and old_bug_desc and new_bug_desc != old_bug_desc:
final_entry = final_entry.replace(old_bug_desc, new_bug_desc)
new_touched = new_entry.touched_functions()
old_touched = old_entry.touched_functions()
if new_touched != old_touched:
if old_entry.is_touched_files_text_clean():
final_entry = final_entry.replace(old_entry.touched_files_text(), new_entry.touched_files_text())
else:
final_entry += "\n" + new_entry.touched_files_text()
return final_entry + "\n"
def run(self, state):
if self.cached_lookup(state, "changelogs"):
self._ensure_bug_url(state)
if not self._options.update_changelogs:
return
args = self._tool.port().prepare_changelog_command()
if state.get("bug_id"):
args.append("--bug=%s" % state["bug_id"])
args.append("--description=%s" % self.cached_lookup(state, 'bug_title'))
if self._options.email:
args.append("--email=%s" % self._options.email)
if self._tool.scm().supports_local_commits():
args.append("--merge-base=%s" % self._tool.scm().merge_base(self._options.git_commit))
args.extend(self._changed_files(state))
try:
output = self._tool.executive.run_and_throw_if_fail(args, self._options.quiet, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
_log.error("Unable to prepare ChangeLogs.")
sys.exit(1)
# These are the ChangeLog entries added by prepare-Changelog
changelogs = re.findall(r'Editing the (\S*/ChangeLog) file.', output)
changelogs = set(self._tool.filesystem.join(self._tool.scm().checkout_root, f) for f in changelogs)
for changelog in changelogs & set(self.cached_lookup(state, "changelogs")):
self._resolve_existing_entry(changelog)
self.did_modify_checkout(state) | unknown | codeparrot/codeparrot-clean | ||
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
import sys
sys.path.append('../')
import logging
import traceback as tb
import suds.metrics as metrics
from tests import *
from suds import *
from suds.client import Client
from datetime import datetime
errors = 0
setup_logging()
#logging.getLogger('suds.client').setLevel(logging.DEBUG)
url = 'http://localhost:8080/axis2/services/BasicService?wsdl'
print 'url=%s' % url
#
# create a service client using the wsdl.
#
client = Client(url)
#
# print the service (introspection)
#
print client
print 'printList()'
print client.service.printList(['a','b'])
#
# create a name object using the wsdl
#
print 'create name'
name = client.factory.create('ns2:Name')
name.first = u'jeff'+unichr(1234)
name.last = 'ortel'
print name
#
# create a phone object using the wsdl
#
print 'create phone'
phoneA = client.factory.create('ns2:Phone')
phoneA.npa = 410
phoneA.nxx = 822
phoneA.number = 5138
phoneB = client.factory.create('ns2:Phone')
phoneB.npa = 919
phoneB.nxx = 606
phoneB.number = 4406
#
# create a dog
#
dog = client.factory.create('ns2:Dog')
print dog
dog.name = 'Chance'
dog.trained = True
print dog
#
# create a person object using the wsdl
#
person = client.factory.create('ns2:Person')
#
# inspect empty person
#
print '{empty} person=\n%s' % person
person.name = name
person.age = None
person.birthday = datetime.now()
person.phone.append(phoneA)
person.phone.append(phoneB)
person.pets.append(dog)
#
# inspect person
#
print 'person=\n%s' % person
#
# add the person (using the webservice)
#
print 'addPersion()'
result = client.service.addPerson(person)
print '\nreply(\n%s\n)\n' % result.encode('utf-8')
#
# create a new name object used to update the person
#
newname = client.factory.create('ns2:Name')
newname.first = 'Todd'
newname.last = None
#
# update the person's name (using the webservice) and print return person object
#
print 'updatePersion()'
result = client.service.updatePerson(person, newname)
print '\nreply(\n%s\n)\n' % str(result)
result = client.service.updatePerson(person, None)
print '\nreply(\n%s\n)\n' % str(result)
#
# invoke the echo service
#
print 'echo()'
client.service.echo(None)
result = client.service.echo('this is cool')
print '\nreply( %s )\n' % str(result)
print 'echo() with {none}'
result = client.service.echo(None)
print '\nreply( %s )\n' % str(result)
#
# invoke the hello service
#
print 'hello()'
result = client.service.hello()
print '\nreply( %s )\n' % str(result)
#
# invoke the testVoid service
#
try:
print 'getVoid()'
result = client.service.getVoid()
print '\nreply( %s )\n' % str(result)
except Exception, e:
print e
#
# test list args
#
print 'getList(list)'
mylist = ['my', 'dog', 'likes', 'steak']
result = client.service.printList(mylist)
print '\nreply( %s )\n' % str(result)
# tuple
print 'testListArgs(tuple)'
mylist = ('my', 'dog', 'likes', 'steak')
result = client.service.printList(mylist)
print '\nreply( %s )\n' % str(result)
#
# test list returned
#
for n in range(0, 3):
print 'getList(str, %d)' % n
result = client.service.getList('hello', n)
print '\nreply( %s )\n' % str(result)
assert ( isinstance(result, list) and len(result) == n )
print 'addPet()'
dog = client.factory.create('ns2:Dog')
dog.name = 'Chance'
dog.trained = True
print dog
try:
result = client.service.addPet(person, dog)
print '\nreply( %s )\n' % str(result)
except Exception, e:
print e
print '___________________ E X C E P T I O N S __________________________'
#
# test exceptions
#
try:
print 'throwException() faults=True'
result = client.service.throwException()
print '\nreply( %s )\n' % tostr(result)
except Exception, e:
print e
#
# test faults
#
try:
print 'throwException() faults=False'
client.set_options(faults=False)
result = client.service.throwException()
print '\nreply( %s )\n' % tostr(result)
except Exception, e:
print e
print '\nfinished: errors=%d' % errors | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.scheduling.annotation;
import java.lang.reflect.InaccessibleObjectException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.function.Supplier;
import io.micrometer.observation.Observation;
import io.micrometer.observation.ObservationRegistry;
import io.micrometer.observation.contextpropagation.ObservationThreadLocalAccessor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jspecify.annotations.Nullable;
import org.reactivestreams.Publisher;
import org.reactivestreams.Subscriber;
import org.reactivestreams.Subscription;
import reactor.core.publisher.Flux;
import org.springframework.aop.support.AopUtils;
import org.springframework.core.CoroutinesUtils;
import org.springframework.core.KotlinDetector;
import org.springframework.core.ReactiveAdapter;
import org.springframework.core.ReactiveAdapterRegistry;
import org.springframework.scheduling.SchedulingAwareRunnable;
import org.springframework.scheduling.support.DefaultScheduledTaskObservationConvention;
import org.springframework.scheduling.support.ScheduledTaskObservationContext;
import org.springframework.scheduling.support.ScheduledTaskObservationConvention;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.ReflectionUtils;
import org.springframework.util.StringUtils;
import static org.springframework.scheduling.support.ScheduledTaskObservationDocumentation.TASKS_SCHEDULED_EXECUTION;
/**
* Helper class for @{@link ScheduledAnnotationBeanPostProcessor} to support reactive
* cases without a dependency on optional classes.
*
* @author Simon Baslé
* @author Brian Clozel
* @since 6.1
*/
abstract class ScheduledAnnotationReactiveSupport {
static final boolean REACTOR_PRESENT = ClassUtils.isPresent(
"reactor.core.publisher.Flux", ScheduledAnnotationReactiveSupport.class.getClassLoader());
static final boolean COROUTINES_REACTOR_PRESENT = ClassUtils.isPresent(
"kotlinx.coroutines.reactor.MonoKt", ScheduledAnnotationReactiveSupport.class.getClassLoader());
private static final Log logger = LogFactory.getLog(ScheduledAnnotationReactiveSupport.class);
/**
* Checks that if the method is reactive, it can be scheduled. Methods are considered
* eligible for reactive scheduling if they either return an instance of a type that
* can be converted to {@code Publisher} or are a Kotlin suspending function.
* If the method doesn't match these criteria, this check returns {@code false}.
* <p>For scheduling of Kotlin suspending functions, the Coroutine-Reactor bridge
* {@code kotlinx.coroutines.reactor} must be present at runtime (in order to invoke
* suspending functions as a {@code Publisher}). Provided that is the case, this
* method returns {@code true}. Otherwise, it throws an {@code IllegalStateException}.
* @throws IllegalStateException if the method is reactive but Reactor and/or the
* Kotlin coroutines bridge are not present at runtime
*/
public static boolean isReactive(Method method) {
if (KotlinDetector.isSuspendingFunction(method)) {
// Note that suspending functions declared without args have a single Continuation
// parameter in reflective inspection
Assert.isTrue(method.getParameterCount() == 1,
"Kotlin suspending functions may only be annotated with @Scheduled if declared without arguments");
Assert.isTrue(COROUTINES_REACTOR_PRESENT, "Kotlin suspending functions may only be annotated with " +
"@Scheduled if the Coroutine-Reactor bridge (kotlinx.coroutines.reactor) is present at runtime");
return true;
}
ReactiveAdapterRegistry registry = ReactiveAdapterRegistry.getSharedInstance();
if (!registry.hasAdapters()) {
return false;
}
Class<?> returnType = method.getReturnType();
ReactiveAdapter candidateAdapter = registry.getAdapter(returnType);
if (candidateAdapter == null) {
return false;
}
Assert.isTrue(method.getParameterCount() == 0,
"Reactive methods may only be annotated with @Scheduled if declared without arguments");
Assert.isTrue(candidateAdapter.getDescriptor().isDeferred(),
"Reactive methods may only be annotated with @Scheduled if the return type supports deferred execution");
return true;
}
/**
* Create a {@link Runnable} for the Scheduled infrastructure, allowing for scheduled
* subscription to the publisher produced by a reactive method.
* <p>Note that the reactive method is invoked once, but the resulting {@code Publisher}
* is subscribed to repeatedly, once per each invocation of the {@code Runnable}.
* <p>In the case of a fixed-delay configuration, the subscription inside the
* {@link Runnable} is turned into a blocking call in order to maintain fixed-delay
* semantics (i.e. the task blocks until completion of the Publisher, and the
* delay is applied until the next iteration).
*/
public static Runnable createSubscriptionRunnable(Method method, Object targetBean, Scheduled scheduled,
Supplier<ObservationRegistry> observationRegistrySupplier, List<Runnable> subscriptionTrackerRegistry) {
boolean shouldBlock = (scheduled.fixedDelay() > 0 || StringUtils.hasText(scheduled.fixedDelayString()));
Publisher<?> publisher = getPublisherFor(method, targetBean);
Supplier<ScheduledTaskObservationContext> contextSupplier =
() -> new ScheduledTaskObservationContext(targetBean, method);
String displayName = targetBean.getClass().getName() + "." + method.getName();
return new SubscribingRunnable(publisher, shouldBlock, scheduled.scheduler(),
subscriptionTrackerRegistry, displayName, observationRegistrySupplier, contextSupplier);
}
/**
* Turn the invocation of the provided {@code Method} into a {@code Publisher},
* either by reflectively invoking it and converting the result to a {@code Publisher}
* via {@link ReactiveAdapterRegistry} or by converting a Kotlin suspending function
* into a {@code Publisher} via {@link CoroutinesUtils}.
* <p>The {@link #isReactive(Method)} check is a precondition to calling this method.
* If Reactor is present at runtime, the {@code Publisher} is additionally converted
* to a {@code Flux} with a checkpoint String, allowing for better debugging.
*/
static Publisher<?> getPublisherFor(Method method, Object bean) {
if (KotlinDetector.isSuspendingFunction(method)) {
return CoroutinesUtils.invokeSuspendingFunction(method, bean, (Object[]) method.getParameters());
}
ReactiveAdapterRegistry registry = ReactiveAdapterRegistry.getSharedInstance();
Class<?> returnType = method.getReturnType();
ReactiveAdapter adapter = registry.getAdapter(returnType);
if (adapter == null) {
throw new IllegalArgumentException("Cannot convert @Scheduled reactive method return type to Publisher");
}
if (!adapter.getDescriptor().isDeferred()) {
throw new IllegalArgumentException("Cannot convert @Scheduled reactive method return type to Publisher: " +
returnType.getSimpleName() + " is not a deferred reactive type");
}
Method invocableMethod = AopUtils.selectInvocableMethod(method, bean.getClass());
try {
ReflectionUtils.makeAccessible(invocableMethod);
Object returnValue = invocableMethod.invoke(bean);
Publisher<?> publisher = adapter.toPublisher(returnValue);
// If Reactor is on the classpath, we could benefit from having a checkpoint for debuggability
if (REACTOR_PRESENT) {
return Flux.from(publisher).checkpoint(
"@Scheduled '"+ method.getName() + "()' in '" + method.getDeclaringClass().getName() + "'");
}
else {
return publisher;
}
}
catch (InvocationTargetException ex) {
throw new IllegalArgumentException(
"Cannot obtain a Publisher-convertible value from the @Scheduled reactive method",
ex.getTargetException());
}
catch (IllegalAccessException | InaccessibleObjectException ex) {
throw new IllegalArgumentException(
"Cannot obtain a Publisher-convertible value from the @Scheduled reactive method", ex);
}
}
/**
* Utility implementation of {@code Runnable} that subscribes to a {@code Publisher}
* or subscribes-then-blocks if {@code shouldBlock} is set to {@code true}.
*/
static final class SubscribingRunnable implements SchedulingAwareRunnable {
private static final ScheduledTaskObservationConvention DEFAULT_CONVENTION =
new DefaultScheduledTaskObservationConvention();
private final Publisher<?> publisher;
final boolean shouldBlock;
final String displayName;
private final @Nullable String qualifier;
private final List<Runnable> subscriptionTrackerRegistry;
final Supplier<ObservationRegistry> observationRegistrySupplier;
final Supplier<ScheduledTaskObservationContext> contextSupplier;
SubscribingRunnable(Publisher<?> publisher, boolean shouldBlock,
@Nullable String qualifier, List<Runnable> subscriptionTrackerRegistry,
String displayName, Supplier<ObservationRegistry> observationRegistrySupplier,
Supplier<ScheduledTaskObservationContext> contextSupplier) {
this.publisher = publisher;
this.shouldBlock = shouldBlock;
this.displayName = displayName;
this.qualifier = qualifier;
this.subscriptionTrackerRegistry = subscriptionTrackerRegistry;
this.observationRegistrySupplier = observationRegistrySupplier;
this.contextSupplier = contextSupplier;
}
@Override
public @Nullable String getQualifier() {
return this.qualifier;
}
@Override
public void run() {
Observation observation = TASKS_SCHEDULED_EXECUTION.observation(null, DEFAULT_CONVENTION,
this.contextSupplier, this.observationRegistrySupplier.get());
if (this.shouldBlock) {
CountDownLatch latch = new CountDownLatch(1);
TrackingSubscriber subscriber = new TrackingSubscriber(this.subscriptionTrackerRegistry, observation, latch);
subscribe(subscriber, observation);
try {
latch.await();
}
catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
else {
TrackingSubscriber subscriber = new TrackingSubscriber(this.subscriptionTrackerRegistry, observation);
subscribe(subscriber, observation);
}
}
private void subscribe(TrackingSubscriber subscriber, Observation observation) {
this.subscriptionTrackerRegistry.add(subscriber);
if (REACTOR_PRESENT) {
observation.start();
Flux.from(this.publisher)
.contextWrite(context -> context.put(ObservationThreadLocalAccessor.KEY, observation))
.subscribe(subscriber);
}
else {
this.publisher.subscribe(subscriber);
}
}
@Override
public String toString() {
return this.displayName;
}
}
/**
* A {@code Subscriber} which keeps track of its {@code Subscription} and exposes the
* capacity to cancel the subscription as a {@code Runnable}. Can optionally support
* blocking if a {@code CountDownLatch} is supplied during construction.
*/
private static final class TrackingSubscriber implements Subscriber<Object>, Runnable {
private final List<Runnable> subscriptionTrackerRegistry;
private final Observation observation;
private final @Nullable CountDownLatch blockingLatch;
// Implementation note: since this is created last-minute when subscribing,
// there shouldn't be a way to cancel the tracker externally from the
// ScheduledAnnotationBeanProcessor before the #setSubscription(Subscription)
// method is called.
private @Nullable Subscription subscription;
TrackingSubscriber(List<Runnable> subscriptionTrackerRegistry, Observation observation) {
this(subscriptionTrackerRegistry, observation, null);
}
TrackingSubscriber(List<Runnable> subscriptionTrackerRegistry, Observation observation, @Nullable CountDownLatch latch) {
this.subscriptionTrackerRegistry = subscriptionTrackerRegistry;
this.observation = observation;
this.blockingLatch = latch;
}
@Override
public void run() {
if (this.subscription != null) {
this.subscription.cancel();
this.observation.stop();
}
if (this.blockingLatch != null) {
this.blockingLatch.countDown();
}
}
@Override
public void onSubscribe(Subscription subscription) {
this.subscription = subscription;
subscription.request(Integer.MAX_VALUE);
}
@Override
public void onNext(Object obj) {
// no-op
}
@Override
public void onError(Throwable ex) {
this.subscriptionTrackerRegistry.remove(this);
logger.warn("Unexpected error occurred in scheduled reactive task", ex);
this.observation.error(ex);
this.observation.stop();
if (this.blockingLatch != null) {
this.blockingLatch.countDown();
}
}
@Override
public void onComplete() {
this.subscriptionTrackerRegistry.remove(this);
if (this.observation.getContext() instanceof ScheduledTaskObservationContext context) {
context.setComplete(true);
}
this.observation.stop();
if (this.blockingLatch != null) {
this.blockingLatch.countDown();
}
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/scheduling/annotation/ScheduledAnnotationReactiveSupport.java |
"""Define tests for the OpenUV config flow."""
from regenmaschine.errors import RainMachineError
from homeassistant import data_entry_flow
from homeassistant.components.rainmachine import CONF_ZONE_RUN_TIME, DOMAIN, config_flow
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_IP_ADDRESS, CONF_PASSWORD, CONF_PORT, CONF_SSL
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
conf = {
CONF_IP_ADDRESS: "192.168.1.100",
CONF_PASSWORD: "password",
CONF_PORT: 8080,
CONF_SSL: True,
}
MockConfigEntry(domain=DOMAIN, unique_id="192.168.1.100", data=conf).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_invalid_password(hass):
"""Test that an invalid password throws an error."""
conf = {
CONF_IP_ADDRESS: "192.168.1.100",
CONF_PASSWORD: "bad_password",
CONF_PORT: 8080,
CONF_SSL: True,
}
flow = config_flow.RainMachineFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
with patch(
"regenmaschine.client.Client.load_local",
side_effect=RainMachineError,
):
result = await flow.async_step_user(user_input=conf)
assert result["errors"] == {CONF_PASSWORD: "invalid_auth"}
async def test_options_flow(hass):
"""Test config flow options."""
conf = {
CONF_IP_ADDRESS: "192.168.1.100",
CONF_PASSWORD: "password",
CONF_PORT: 8080,
CONF_SSL: True,
}
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="abcde12345",
data=conf,
options={CONF_ZONE_RUN_TIME: 900},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.rainmachine.async_setup_entry", return_value=True
):
await hass.config_entries.async_setup(config_entry.entry_id)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_ZONE_RUN_TIME: 600}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_ZONE_RUN_TIME: 600}
async def test_show_form(hass):
"""Test that the form is served with no input."""
flow = config_flow.RainMachineFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_user(hass):
"""Test that the user step works."""
conf = {
CONF_IP_ADDRESS: "192.168.1.100",
CONF_PASSWORD: "password",
CONF_PORT: 8080,
CONF_SSL: True,
}
flow = config_flow.RainMachineFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
with patch(
"regenmaschine.client.Client.load_local",
return_value=True,
):
result = await flow.async_step_user(user_input=conf)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "192.168.1.100"
assert result["data"] == {
CONF_IP_ADDRESS: "192.168.1.100",
CONF_PASSWORD: "password",
CONF_PORT: 8080,
CONF_SSL: True,
CONF_ZONE_RUN_TIME: 600,
} | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Foundation\Console;
use Illuminate\Console\GeneratorCommand;
use Symfony\Component\Console\Attribute\AsCommand;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Input\InputOption;
use Symfony\Component\Console\Output\OutputInterface;
use function Laravel\Prompts\confirm;
#[AsCommand(name: 'make:exception')]
class ExceptionMakeCommand extends GeneratorCommand
{
/**
* The console command name.
*
* @var string
*/
protected $name = 'make:exception';
/**
* The console command description.
*
* @var string
*/
protected $description = 'Create a new custom exception class';
/**
* The type of class being generated.
*
* @var string
*/
protected $type = 'Exception';
/**
* Get the stub file for the generator.
*
* @return string
*/
protected function getStub()
{
if ($this->option('render')) {
return $this->option('report')
? __DIR__.'/stubs/exception-render-report.stub'
: __DIR__.'/stubs/exception-render.stub';
}
return $this->option('report')
? __DIR__.'/stubs/exception-report.stub'
: __DIR__.'/stubs/exception.stub';
}
/**
* Determine if the class already exists.
*
* @param string $rawName
* @return bool
*/
protected function alreadyExists($rawName)
{
return class_exists($this->rootNamespace().'Exceptions\\'.$rawName);
}
/**
* Get the default namespace for the class.
*
* @param string $rootNamespace
* @return string
*/
protected function getDefaultNamespace($rootNamespace)
{
return $rootNamespace.'\Exceptions';
}
/**
* Interact further with the user if they were prompted for missing arguments.
*
* @param \Symfony\Component\Console\Input\InputInterface $input
* @param \Symfony\Component\Console\Output\OutputInterface $output
* @return void
*/
protected function afterPromptingForMissingArguments(InputInterface $input, OutputInterface $output)
{
if ($this->didReceiveOptions($input)) {
return;
}
$input->setOption('report', confirm('Should the exception have a report method?', default: false));
$input->setOption('render', confirm('Should the exception have a render method?', default: false));
}
/**
* Get the console command options.
*
* @return array
*/
protected function getOptions()
{
return [
['force', 'f', InputOption::VALUE_NONE, 'Create the class even if the exception already exists'],
['render', null, InputOption::VALUE_NONE, 'Create the exception with an empty render method'],
['report', null, InputOption::VALUE_NONE, 'Create the exception with an empty report method'],
];
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Foundation/Console/ExceptionMakeCommand.php |
#ifndef BOOST_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
#define BOOST_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
// Copyright (c) 2003 David Abrahams
// Copyright (c) 2003 Peter Dimov
// Distributed under the Boost Software License, Version 1.0.
// https://www.boost.org/LICENSE_1_0.txt
#include <boost/config/header_deprecated.hpp>
BOOST_HEADER_DEPRECATED("<boost/smart_ptr/detail/quick_allocator.hpp>")
#include <boost/smart_ptr/detail/quick_allocator.hpp>
#endif // #ifndef BOOST_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/detail/quick_allocator.hpp |
""" Python Character Mapping Codec generated from 'GREEK.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x00b9, # SUPERSCRIPT ONE
0x0082: 0x00b2, # SUPERSCRIPT TWO
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x00b3, # SUPERSCRIPT THREE
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x0385, # GREEK DIALYTIKA TONOS
0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x0384, # GREEK TONOS
0x008c: 0x00a8, # DIAERESIS
0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0092: 0x00a3, # POUND SIGN
0x0093: 0x2122, # TRADE MARK SIGN
0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0096: 0x2022, # BULLET
0x0097: 0x00bd, # VULGAR FRACTION ONE HALF
0x0098: 0x2030, # PER MILLE SIGN
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00a6, # BROKEN BAR
0x009c: 0x00ad, # SOFT HYPHEN
0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x2020, # DAGGER
0x00a1: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00a2: 0x0394, # GREEK CAPITAL LETTER DELTA
0x00a3: 0x0398, # GREEK CAPITAL LETTER THETA
0x00a4: 0x039b, # GREEK CAPITAL LETTER LAMBDA
0x00a5: 0x039e, # GREEK CAPITAL LETTER XI
0x00a6: 0x03a0, # GREEK CAPITAL LETTER PI
0x00a7: 0x00df, # LATIN SMALL LETTER SHARP S
0x00a8: 0x00ae, # REGISTERED SIGN
0x00aa: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00ab: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00ac: 0x00a7, # SECTION SIGN
0x00ad: 0x2260, # NOT EQUAL TO
0x00ae: 0x00b0, # DEGREE SIGN
0x00af: 0x0387, # GREEK ANO TELEIA
0x00b0: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x00b2: 0x2264, # LESS-THAN OR EQUAL TO
0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO
0x00b4: 0x00a5, # YEN SIGN
0x00b5: 0x0392, # GREEK CAPITAL LETTER BETA
0x00b6: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x00b7: 0x0396, # GREEK CAPITAL LETTER ZETA
0x00b8: 0x0397, # GREEK CAPITAL LETTER ETA
0x00b9: 0x0399, # GREEK CAPITAL LETTER IOTA
0x00ba: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x00bb: 0x039c, # GREEK CAPITAL LETTER MU
0x00bc: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00bd: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00be: 0x03a8, # GREEK CAPITAL LETTER PSI
0x00bf: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00c0: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00c1: 0x039d, # GREEK CAPITAL LETTER NU
0x00c2: 0x00ac, # NOT SIGN
0x00c3: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x00c4: 0x03a1, # GREEK CAPITAL LETTER RHO
0x00c5: 0x2248, # ALMOST EQUAL TO
0x00c6: 0x03a4, # GREEK CAPITAL LETTER TAU
0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c9: 0x2026, # HORIZONTAL ELLIPSIS
0x00ca: 0x00a0, # NO-BREAK SPACE
0x00cb: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x00cc: 0x03a7, # GREEK CAPITAL LETTER CHI
0x00cd: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00ce: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00cf: 0x0153, # LATIN SMALL LIGATURE OE
0x00d0: 0x2013, # EN DASH
0x00d1: 0x2015, # HORIZONTAL BAR
0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00d6: 0x00f7, # DIVISION SIGN
0x00d7: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00d8: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00d9: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00da: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00db: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00dc: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00dd: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00de: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00df: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00e0: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e1: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e2: 0x03b2, # GREEK SMALL LETTER BETA
0x00e3: 0x03c8, # GREEK SMALL LETTER PSI
0x00e4: 0x03b4, # GREEK SMALL LETTER DELTA
0x00e5: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00e6: 0x03c6, # GREEK SMALL LETTER PHI
0x00e7: 0x03b3, # GREEK SMALL LETTER GAMMA
0x00e8: 0x03b7, # GREEK SMALL LETTER ETA
0x00e9: 0x03b9, # GREEK SMALL LETTER IOTA
0x00ea: 0x03be, # GREEK SMALL LETTER XI
0x00eb: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00ec: 0x03bb, # GREEK SMALL LETTER LAMBDA
0x00ed: 0x03bc, # GREEK SMALL LETTER MU
0x00ee: 0x03bd, # GREEK SMALL LETTER NU
0x00ef: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00f0: 0x03c0, # GREEK SMALL LETTER PI
0x00f1: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00f2: 0x03c1, # GREEK SMALL LETTER RHO
0x00f3: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00f4: 0x03c4, # GREEK SMALL LETTER TAU
0x00f5: 0x03b8, # GREEK SMALL LETTER THETA
0x00f6: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00f7: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00f8: 0x03c7, # GREEK SMALL LETTER CHI
0x00f9: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00fa: 0x03b6, # GREEK SMALL LETTER ZETA
0x00fb: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00fc: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00fd: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x00fe: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x00ff: None, # UNDEFINED
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
//! Identifier for plugins.
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::num::NonZeroU8;
use thiserror::Error;
const IDENTIFIER_SEPARATOR: u8 = b':';
const PLUGIN_PREFIX: &str = "tauri-plugin-";
const CORE_PLUGIN_IDENTIFIER_PREFIX: &str = "core:";
// <https://doc.rust-lang.org/cargo/reference/manifest.html#the-name-field>
const MAX_LEN_PREFIX: usize = 64 - PLUGIN_PREFIX.len();
const MAX_LEN_BASE: usize = 64;
const MAX_LEN_IDENTIFIER: usize = MAX_LEN_PREFIX + 1 + MAX_LEN_BASE;
/// Plugin identifier.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Identifier {
inner: String,
separator: Option<NonZeroU8>,
}
#[cfg(feature = "schema")]
impl schemars::JsonSchema for Identifier {
fn schema_name() -> String {
"Identifier".to_string()
}
fn schema_id() -> std::borrow::Cow<'static, str> {
// Include the module, in case a type with the same name is in another module/crate
std::borrow::Cow::Borrowed(concat!(module_path!(), "::Identifier"))
}
fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema {
String::json_schema(gen)
}
}
impl AsRef<str> for Identifier {
#[inline(always)]
fn as_ref(&self) -> &str {
&self.inner
}
}
impl Identifier {
/// Get the identifier str.
#[inline(always)]
pub fn get(&self) -> &str {
self.as_ref()
}
/// Get the identifier without prefix.
pub fn get_base(&self) -> &str {
match self.separator_index() {
None => self.get(),
Some(i) => &self.inner[i + 1..],
}
}
/// Get the prefix of the identifier.
pub fn get_prefix(&self) -> Option<&str> {
self.separator_index().map(|i| &self.inner[0..i])
}
/// Set the identifier prefix.
pub fn set_prefix(&mut self) -> Result<(), ParseIdentifierError> {
todo!()
}
/// Get the identifier string and its separator.
pub fn into_inner(self) -> (String, Option<NonZeroU8>) {
(self.inner, self.separator)
}
fn separator_index(&self) -> Option<usize> {
self.separator.map(|i| i.get() as usize)
}
}
#[derive(Debug)]
enum ValidByte {
Separator,
Byte(u8),
}
impl ValidByte {
fn alpha_numeric(byte: u8) -> Option<Self> {
byte.is_ascii_alphanumeric().then_some(Self::Byte(byte))
}
fn alpha_numeric_hyphen(byte: u8) -> Option<Self> {
(byte.is_ascii_alphanumeric() || byte == b'-').then_some(Self::Byte(byte))
}
fn next(&self, next: u8) -> Option<ValidByte> {
match (self, next) {
(ValidByte::Byte(b'-'), IDENTIFIER_SEPARATOR) => None,
(ValidByte::Separator, b'-') => None,
(_, IDENTIFIER_SEPARATOR) => Some(ValidByte::Separator),
(ValidByte::Separator, next) => ValidByte::alpha_numeric(next),
(ValidByte::Byte(b'-'), next) => ValidByte::alpha_numeric_hyphen(next),
(ValidByte::Byte(b'_'), next) => ValidByte::alpha_numeric_hyphen(next),
(ValidByte::Byte(_), next) => ValidByte::alpha_numeric_hyphen(next),
}
}
}
/// Errors that can happen when parsing an identifier.
#[derive(Debug, Error)]
pub enum ParseIdentifierError {
/// Identifier start with the plugin prefix.
#[error("identifiers cannot start with {}", PLUGIN_PREFIX)]
StartsWithTauriPlugin,
/// Identifier empty.
#[error("identifiers cannot be empty")]
Empty,
/// Identifier is too long.
#[error("identifiers cannot be longer than {len}, found {0}", len = MAX_LEN_IDENTIFIER)]
Humongous(usize),
/// Identifier is not in a valid format.
#[error("identifiers can only include lowercase ASCII, hyphens which are not leading or trailing, and a single colon if using a prefix")]
InvalidFormat,
/// Identifier has multiple separators.
#[error(
"identifiers can only include a single separator '{}'",
IDENTIFIER_SEPARATOR
)]
MultipleSeparators,
/// Identifier has a trailing hyphen.
#[error("identifiers cannot have a trailing hyphen")]
TrailingHyphen,
/// Identifier has a prefix without a base.
#[error("identifiers cannot have a prefix without a base")]
PrefixWithoutBase,
}
impl TryFrom<String> for Identifier {
type Error = ParseIdentifierError;
fn try_from(value: String) -> Result<Self, Self::Error> {
if value.starts_with(PLUGIN_PREFIX) {
return Err(Self::Error::StartsWithTauriPlugin);
}
if value.is_empty() {
return Err(Self::Error::Empty);
}
if value.len() > MAX_LEN_IDENTIFIER {
return Err(Self::Error::Humongous(value.len()));
}
let is_core_identifier = value.starts_with(CORE_PLUGIN_IDENTIFIER_PREFIX);
let mut bytes = value.bytes();
// grab the first byte only before parsing the rest
let mut prev = bytes
.next()
.and_then(ValidByte::alpha_numeric)
.ok_or(Self::Error::InvalidFormat)?;
let mut idx = 0;
let mut separator = None;
for byte in bytes {
idx += 1; // we already consumed first item
match prev.next(byte) {
None => return Err(Self::Error::InvalidFormat),
Some(next @ ValidByte::Byte(_)) => prev = next,
Some(ValidByte::Separator) => {
if separator.is_none() || is_core_identifier {
// safe to unwrap because idx starts at 1 and cannot go over MAX_IDENTIFIER_LEN
separator = Some(idx.try_into().unwrap());
prev = ValidByte::Separator
} else {
return Err(Self::Error::MultipleSeparators);
}
}
}
}
match prev {
// empty base
ValidByte::Separator => return Err(Self::Error::PrefixWithoutBase),
// trailing hyphen
ValidByte::Byte(b'-') => return Err(Self::Error::TrailingHyphen),
_ => (),
}
Ok(Self {
inner: value,
separator,
})
}
}
impl<'de> Deserialize<'de> for Identifier {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Self::try_from(String::deserialize(deserializer)?).map_err(serde::de::Error::custom)
}
}
impl Serialize for Identifier {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.get())
}
}
#[cfg(test)]
mod tests {
use super::*;
fn ident(s: impl Into<String>) -> Result<Identifier, ParseIdentifierError> {
Identifier::try_from(s.into())
}
#[test]
fn max_len_fits_in_u8() {
assert!(MAX_LEN_IDENTIFIER < u8::MAX as usize)
}
#[test]
fn format() {
assert!(ident("prefix:base").is_ok());
assert!(ident("prefix3:base").is_ok());
assert!(ident("preFix:base").is_ok());
// bad
assert!(ident("tauri-plugin-prefix:base").is_err());
assert!(ident("-prefix-:-base-").is_err());
assert!(ident("-prefix:base").is_err());
assert!(ident("prefix-:base").is_err());
assert!(ident("prefix:-base").is_err());
assert!(ident("prefix:base-").is_err());
assert!(ident("pre--fix:base--sep").is_ok());
assert!(ident("prefix:base--sep").is_ok());
assert!(ident("pre--fix:base").is_ok());
assert!(ident("prefix::base").is_err());
assert!(ident(":base").is_err());
assert!(ident("prefix:").is_err());
assert!(ident(":prefix:base:").is_err());
assert!(ident("base:").is_err());
assert!(ident("").is_err());
assert!(ident("💩").is_err());
assert!(ident("a".repeat(MAX_LEN_IDENTIFIER + 1)).is_err());
}
#[test]
fn base() {
assert_eq!(ident("prefix:base").unwrap().get_base(), "base");
assert_eq!(ident("base").unwrap().get_base(), "base");
}
#[test]
fn prefix() {
assert_eq!(ident("prefix:base").unwrap().get_prefix(), Some("prefix"));
assert_eq!(ident("base").unwrap().get_prefix(), None);
}
}
#[cfg(feature = "build")]
mod build {
use proc_macro2::TokenStream;
use quote::{quote, ToTokens, TokenStreamExt};
use super::*;
impl ToTokens for Identifier {
fn to_tokens(&self, tokens: &mut TokenStream) {
let s = self.get();
tokens
.append_all(quote! { ::tauri::utils::acl::Identifier::try_from(#s.to_string()).unwrap() })
}
}
} | rust | github | https://github.com/tauri-apps/tauri | crates/tauri-utils/src/acl/identifier.rs |
<!--
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# 如何创建自定义流水线?
在本指南中,我们将演示如何创建一个自定义流水线并分享到 [Hub](https://hf.co/models),或将其添加到 🤗 Transformers 库中。
首先,你需要决定流水线将能够接受的原始条目。它可以是字符串、原始字节、字典或任何看起来最可能是期望的输入。
尽量保持输入为纯 Python 语言,因为这样可以更容易地实现兼容性(甚至通过 JSON 在其他语言之间)。
这些将是流水线 (`preprocess`) 的 `inputs`。
然后定义 `outputs`。与 `inputs` 相同的策略。越简单越好。这些将是 `postprocess` 方法的输出。
首先继承基类 `Pipeline`,其中包含实现 `preprocess`、`_forward`、`postprocess` 和 `_sanitize_parameters` 所需的 4 个方法。
```python
from transformers import Pipeline
class MyPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
return preprocess_kwargs, {}, {}
def preprocess(self, inputs, maybe_arg=2):
model_input = Tensor(inputs["input_ids"])
return {"model_input": model_input}
def _forward(self, model_inputs):
# model_inputs == {"model_input": model_input}
outputs = self.model(**model_inputs)
# Maybe {"logits": Tensor(...)}
return outputs
def postprocess(self, model_outputs):
best_class = model_outputs["logits"].softmax(-1)
return best_class
```
这种分解的结构旨在为 CPU/GPU 提供相对无缝的支持,同时支持在不同线程上对 CPU 进行预处理/后处理。
`preprocess` 将接受最初定义的输入,并将其转换为可供模型输入的内容。它可能包含更多信息,通常是一个 `Dict`。
`_forward` 是实现细节,不应直接调用。`forward` 是首选的调用方法,因为它包含保障措施,以确保一切都在预期的设备上运作。
如果任何内容与实际模型相关,它应该属于 `_forward` 方法,其他内容应该在 preprocess/postprocess 中。
`postprocess` 方法将接受 `_forward` 的输出,并将其转换为之前确定的最终输出。
`_sanitize_parameters` 存在是为了允许用户在任何时候传递任何参数,无论是在初始化时 `pipeline(...., maybe_arg=4)`
还是在调用时 `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`。
`_sanitize_parameters` 的返回值是将直接传递给 `preprocess`、`_forward` 和 `postprocess` 的 3 个关键字参数字典。
如果调用方没有使用任何额外参数调用,则不要填写任何内容。这样可以保留函数定义中的默认参数,这总是更"自然"的。
在分类任务中,一个经典的例子是在后处理中使用 `top_k` 参数。
```python
>>> pipe = pipeline("my-new-task")
>>> pipe("This is a test")
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05}
{"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}]
>>> pipe("This is a test", top_k=2)
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}]
```
为了实现这一点,我们将更新我们的 `postprocess` 方法,将默认参数设置为 `5`,
并编辑 `_sanitize_parameters` 方法,以允许这个新参数。
```python
def postprocess(self, model_outputs, top_k=5):
best_class = model_outputs["logits"].softmax(-1)
# Add logic to handle top_k
return best_class
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
postprocess_kwargs = {}
if "top_k" in kwargs:
postprocess_kwargs["top_k"] = kwargs["top_k"]
return preprocess_kwargs, {}, postprocess_kwargs
```
尽量保持简单输入/输出,最好是可 JSON 序列化的,因为这样可以使流水线的使用非常简单,而不需要用户了解新的对象类型。
通常也相对常见地支持许多不同类型的参数以便使用(例如音频文件,可以是文件名、URL 或纯字节)。
## 将其添加到支持的任务列表中
要将你的 `new-task` 注册到支持的任务列表中,你需要将其添加到 `PIPELINE_REGISTRY` 中:
```python
from transformers.pipelines import PIPELINE_REGISTRY
PIPELINE_REGISTRY.register_pipeline(
"new-task",
pipeline_class=MyPipeline,
pt_model=AutoModelForSequenceClassification,
)
```
如果需要,你可以指定一个默认模型,此时它应该带有一个特定的修订版本(可以是分支名称或提交哈希,这里我们使用了 `"abcdef"`),以及类型:
```python
PIPELINE_REGISTRY.register_pipeline(
"new-task",
pipeline_class=MyPipeline,
pt_model=AutoModelForSequenceClassification,
default={"pt": ("user/awesome_model", "abcdef")},
type="text", # current support type: text, audio, image, multimodal
)
```
## 在 Hub 上分享你的流水线
要在 Hub 上分享你的自定义流水线,你只需要将 `Pipeline` 子类的自定义代码保存在一个 Python 文件中。
例如,假设我们想使用一个自定义流水线进行句对分类,如下所示:
```py
import numpy as np
from transformers import Pipeline
def softmax(outputs):
maxes = np.max(outputs, axis=-1, keepdims=True)
shifted_exp = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
class PairClassificationPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "second_text" in kwargs:
preprocess_kwargs["second_text"] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def preprocess(self, text, second_text=None):
return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs):
logits = model_outputs.logits[0].numpy()
probabilities = softmax(logits)
best_class = np.argmax(probabilities)
label = self.model.config.id2label[best_class]
score = probabilities[best_class].item()
logits = logits.tolist()
return {"label": label, "score": score, "logits": logits}
```
这个实现与框架无关,适用于 PyTorch 和 TensorFlow 模型。如果我们将其保存在一个名为
`pair_classification.py` 的文件中,然后我们可以像这样导入并注册它:
```py
from pair_classification import PairClassificationPipeline
from transformers.pipelines import PIPELINE_REGISTRY
from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification
PIPELINE_REGISTRY.register_pipeline(
"pair-classification",
pipeline_class=PairClassificationPipeline,
pt_model=AutoModelForSequenceClassification,
tf_model=TFAutoModelForSequenceClassification,
)
```
完成这些步骤后,我们可以将其与预训练模型一起使用。例如,`sgugger/finetuned-bert-mrpc`
已经在 MRPC 数据集上进行了微调,用于将句子对分类为是释义或不是释义。
```py
from transformers import pipeline
classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc")
```
然后,我们可以通过在 `Repository` 中使用 `save_pretrained` 方法将其分享到 Hub 上:
```py
from huggingface_hub import Repository
repo = Repository("test-dynamic-pipeline", clone_from="{your_username}/test-dynamic-pipeline")
classifier.save_pretrained("test-dynamic-pipeline")
repo.push_to_hub()
```
这将会复制包含你定义的 `PairClassificationPipeline` 的文件到文件夹 `"test-dynamic-pipeline"` 中,
同时保存流水线的模型和分词器,然后将所有内容推送到仓库 `{your_username}/test-dynamic-pipeline` 中。
之后,只要提供选项 `trust_remote_code=True`,任何人都可以使用它:
```py
from transformers import pipeline
classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True)
```
## 将流水线添加到 🤗 Transformers
如果你想将你的流水线贡献给 🤗 Transformers,你需要在 `pipelines` 子模块中添加一个新模块,
其中包含你的流水线的代码,然后将其添加到 `pipelines/__init__.py` 中定义的任务列表中。
然后,你需要添加测试。创建一个新文件 `tests/test_pipelines_MY_PIPELINE.py`,其中包含其他测试的示例。
`run_pipeline_test` 函数将非常通用,并在每种可能的架构上运行小型随机模型,如 `model_mapping` 和 `tf_model_mapping` 所定义。
这对于测试未来的兼容性非常重要,这意味着如果有人为 `XXXForQuestionAnswering` 添加了一个新模型,
流水线测试将尝试在其上运行。由于模型是随机的,所以不可能检查实际值,这就是为什么有一个帮助函数 `ANY`,它只是尝试匹配流水线的输出类型。
你还 **需要** 实现 2(最好是 4)个测试。
- `test_small_model_pt`:为这个流水线定义一个小型模型(结果是否合理并不重要),并测试流水线的输出。
结果应该与 `test_small_model_tf` 的结果相同。
- `test_small_model_tf`:为这个流水线定义一个小型模型(结果是否合理并不重要),并测试流水线的输出。
结果应该与 `test_small_model_pt` 的结果相同。
- `test_large_model_pt`(可选):在一个真实的流水线上测试流水线,结果应该是有意义的。
这些测试速度较慢,应该被如此标记。这里的目标是展示流水线,并确保在未来的发布中没有漂移。
- `test_large_model_tf`(可选):在一个真实的流水线上测试流水线,结果应该是有意义的。
这些测试速度较慢,应该被如此标记。这里的目标是展示流水线,并确保在未来的发布中没有漂移。 | unknown | github | https://github.com/huggingface/transformers | docs/source/zh/add_new_pipeline.md |
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import LineCollection, PolyCollection
from mpl_toolkits.mplot3d.art3d import Line3DCollection, Poly3DCollection
from . import common
from . import callbacks
def _map_none(value):
if isinstance(value, str):
if value.lower() == 'none':
return 'None'
else:
return value
else:
# NOTE: including None - we want this to fallback on the cycler
return value
def _to_linebreak_list(thing, N=1):
if isinstance(thing, list):
return thing
else:
return [thing]*N
class CallGroup(common.Group):
def __init__(self, items):
super(CallGroup, self).__init__(Call, [], items)
@property
def callbacks(self):
"""
Returns
---------
* (list) a list of <autofig.call.Call.callbacks> for each child
<autofig.call.Call>
"""
return self._get_attrs('callbacks')
def connect_callback(self, callback):
for call in self._items:
call.connect_callback(callback)
@property
def i(self):
"""
Returns
---------
* (list) a list of <autofig.call.Call.i> for each child
<autofig.call.Call>
"""
return CallDimensionGroup(self._get_attrs('i'))
@property
def x(self):
"""
Returns
---------
* (list) a list of <autofig.call.Call.x> for each child
<autofig.call.Call>
"""
return CallDimensionGroup(self._get_attrs('x'))
@property
def y(self):
"""
Returns
---------
* (list) a list of <autofig.call.Call.y> for each child
<autofig.call.Call>
"""
return CallDimensionGroup(self._get_attrs('y'))
@property
def z(self):
"""
Returns
---------
* (list) a list of <autofig.call.Call.z> for each child
<autofig.call.Call>
"""
return CallDimensionGroup(self._get_attrs('z'))
@property
def consider_for_limits(self):
"""
Returns
---------
* (list) a list of <autofig.call.Call.consider_for_limits> for each child
<autofig.call.Call>
"""
return self._get_attrs('consider_for_limits')
@consider_for_limits.setter
def consider_for_limits(self, consider_for_limits):
return self._set_attrs('consider_for_limits', consider_for_limits)
def draw(self, *args, **kwargs):
"""
Calls <autofig.call.Plot.draw> or <autofig.call.Mesh.draw> for each
<autofig.call.Call> in the <autofig.call.CallGroup>.
See also:
* <autofig.draw>
* <autofig.figure.Figure.draw>
* <autofig.axes.Axes.draw>
* <autofig.call.Plot.draw>
* <autofig.call.Mesh.draw>
Arguments
------------
* `*args`: all arguments are passed on to each <autofig.call.Call>.
* `**kwargs`: all keyword arguments are passed on to each
<autofig.call.Call>.
Returns
-----------
* (list): list of all created matplotlib artists
"""
# CallGroup.draw
return_artists = []
for call in self._items:
artists = call.draw(*args, **kwargs)
return_artists += artists
return return_artists
class PlotGroup(CallGroup):
@property
def s(self):
"""
Returns
---------
* (list) a list of <autofig.call.Plot.s> for each child
<autofig.call.Plot>
"""
return CallDimensionSGroup(self._get_attrs('s'))
@property
def c(self):
"""
Returns
---------
* (list) a list of <autofig.call.Plot.c> for each child
<autofig.call.Plot>
"""
return CallDimensionCGroup(self._get_attrs('c'))
@property
def size_scale(self):
"""
Returns
---------
* (list) a list of <autofig.call.Plot.size_scale> for each child
<autofig.call.Plot>
"""
return self._get_attrs('size_scale')
@size_scale.setter
def size_scale(self, size_scale):
return self._set_attrs('size_scale', size_scale)
class MeshGroup(CallGroup):
@property
def fc(self):
"""
Returns
---------
* (list) a list of <autofig.call.Mesh.fc> for each child
<autofig.call.Mesh>
"""
return CallDimensionCGroup(self._get_attrs('fc'))
@property
def ec(self):
"""
Returns
---------
* (list) a list of <autofig.call.Mesh.ec> for each child
<autofig.call.Mesh>
"""
return CallDimensionCGroup(self._get_attrs('ec'))
def make_callgroup(items):
if np.all([isinstance(item, Plot) for item in items]):
return PlotGroup(items)
elif np.all([isinstance(item, Mesh) for item in items]):
return MeshGroup(items)
else:
return CallGroup(items)
class Call(object):
def __init__(self, x=None, y=None, z=None, i=None,
xerror=None, xunit=None, xlabel=None, xnormals=None,
yerror=None, yunit=None, ylabel=None, ynormals=None,
zerror=None, zunit=None, zlabel=None, znormals=None,
iunit=None, itol=0.0,
axorder=None, axpos=None,
title=None,
label=None,
consider_for_limits=True,
uncover=False,
trail=False,
**kwargs):
"""
Create a <autofig.call.Call> object which defines a single call to
matplotlib.
Arguments
-------------
* `x` (list/array, optional, default=None): array of values for the x-axes.
Access via <autofig.call.Call.x>.
* `y` (list/array, optional, default=None): array of values for the y-axes.
Access via <autofig.call.Call.y>.
* `z` (list/array, optional, default=None): array of values for the z-axes.
Access via <autofig.call.Call.z>
* `i` (list/array or string, optional, default=None): array of values for
the independent-variable. If a string, can be one of: 'x', 'y', 'z'
to reference an existing array. Access via <autofig.call.Call.i>.
* `xerror` (float or list/array, optional, default=None): errors for `x`.
See <autofig.call.Call.x> and <autofig.call.CallDimensionX.error>.
* `xunit` (string or astropy unit, optional, default=None): units for `x`.
See <autofig.call.Call.x> and <autofig.call.CallDimensionX.unit>.
* `xlabel` (strong, optional, default=None): label for `x`.
See <autofig.call.Call.x> and <autofig.call.CallDimensionX.label>.
* `xnormals` (list/array, optional, default=None): normals for `x`.
Currently ignored.
* `yerror` (float or list/array, optional, default=None): errors for `y`.
See <autofig.call.Call.y> and <autofig.call.CallDimensionY.error>.
* `yunit` (string or astropy unit, optional, default=None): units for `y`.
See <autofig.call.Call.y> and <autofig.call.CallDimensionY.unit>.
* `ylabel` (strong, optional, default=None): label for `y`.
See <autofig.call.Call.y> and <autofig.call.CallDimensionY.label>.
* `ynormals` (list/array, optional, default=None): normals for `y`.
Currently ignored.
* `zerror` (float or list/array, optional, default=None): errors for `z`.
See <autofig.call.Call.z> and <autofig.call.CallDimensionZ.error>.
* `zunit` (string or astropy unit, optional, default=None): units for `z`.
See <autofig.call.Call.z> and <autofig.call.CallDimensionZ.unit>.
* `zlabel` (strong, optional, default=None): label for `x`.
See <autofig.call.Call.z> and <autofig.call.CallDimensionZ.label>.
* `znormals` (list/array, optional, default=None): normals for `z`.
Currently only used for <autofig.call.Mesh>.
* `iunit` (string or astropy unit, optional, default=None): units for `i`.
See <autofig.call.Call.i> and <autofig.call.CallDimensionI.unit>.
* `itol` (float, optional, default=0.0): see <autofig.call.DimensionI.tol>.
* `axorder` (int, optional, default=None): see <autofig.call.Call.axorder>.
* `axpos` (tuple, optional, default=None): see <autofig.call.Call.axpos>.
* `title` (string, optional, default=None): see <autofig.call.Call.title>.
* `label` (string, optional, default=None): see <autofig.call.Call.label>.
* `consider_for_limits` (bool, optional, default=True): see
<autofig.call.Call.consider_for_limits>.
* `uncover` (bool, optional, default=False): see <autofig.call.Call.uncover>.
* `trail` (bool or Float, optional, default=False): see
<autofig.call.Call.trail>.
* `**kwargs`: additional keyword arguments are stored and passed on when
attaching to a parent axes. See <autofig.axes.Axes.add_call>.
Returns
---------
* the instantiated <autofig.call.Call> object.
"""
self._class = 'Call' # just to avoid circular import in order to use isinstance
self._axes = None
self._backend_objects = []
self._callbacks = []
self._x = CallDimensionX(self, x, xerror, xunit, xlabel, xnormals)
self._y = CallDimensionY(self, y, yerror, yunit, ylabel, ynormals)
self._z = CallDimensionZ(self, z, zerror, zunit, zlabel, znormals)
# defined last so all other dimensions are in place in case indep
# is a reference and needs to access units, etc
self._i = CallDimensionI(self, i, iunit, itol)
self.consider_for_limits = consider_for_limits
self.uncover = uncover
self.trail = trail
self.axorder = axorder
self.axpos = axpos
self.title = title
self.label = label
self.kwargs = kwargs
# TODO: add style
def _get_backend_object():
return self._backend_artists
@property
def callbacks(self):
return self._callbacks
def connect_callback(self, callback):
if not isinstance(callback, str):
callback = callback.__name__
if callback not in self.callbacks:
self._callbacks.append(callback)
@property
def axes(self):
"""
Returns
--------
* (<autofig.axes.Axes> or None): the parent axes, if applicable.
"""
# no setter as this can only be set internally when attaching to an axes
return self._axes
@property
def figure(self):
"""
Returns
--------
* (<autofig.figure.Figure> or None): the parent figure, if applicable.
"""
# no setter as this can only be set internally when attaching to an axes
if self.axes is None:
return None
return self.axes.figure
@property
def i(self):
"""
Returns
----------
* <autofig.call.CallDimensionI>
"""
return self._i
@property
def indep(self):
"""
Shortcut to <autofig.call.Call.i>
Returns
----------
* <autofig.call.CallDimensionI>
"""
return self.i
@property
def x(self):
"""
Returns
----------
* <autofig.call.CallDimensionX>
"""
return self._x
@property
def y(self):
"""
Returns
----------
* <autofig.call.CallDimensionY>
"""
return self._y
@property
def z(self):
"""
Returns
----------
* <autofig.call.CallDimensionZ>
"""
return self._z
@property
def consider_for_limits(self):
"""
Returns
-----------
* (bool): whether the data in this <autofig.call.Call> should be considered
when determining axes limits.
"""
return self._consider_for_limits
@consider_for_limits.setter
def consider_for_limits(self, consider):
if not isinstance(consider, bool):
raise TypeError("consider_for_limits must be of type bool")
self._consider_for_limits = consider
@property
def uncover(self):
"""
Returns
---------
* (bool): whether uncover is enabled
"""
return self._uncover
@uncover.setter
def uncover(self, uncover):
if not isinstance(uncover, bool):
raise TypeError("uncover must be of type bool")
self._uncover = uncover
@property
def trail(self):
"""
Returns
---------
* (bool or Float): whether trail is enabled. If a float, then a value
between 0 and 1 indicating the length of the trail.
"""
return self._trail
@trail.setter
def trail(self, trail):
if not (isinstance(trail, bool) or isinstance(trail, float)):
if isinstance(trail, int):
trail = float(trail)
else:
raise TypeError("trail must be of type bool or float")
if trail < 0 or trail > 1:
raise ValueError("trail must be between 0 and 1")
self._trail = trail
@property
def axorder(self):
"""
See tutorial:
* [Subplot/Axes Positioning](../../tutorials/subplot_positioning.md)
Returns
--------
* (int or None)
"""
return self._axorder
@axorder.setter
def axorder(self, axorder):
if axorder is None:
self._axorder = None
return
if not isinstance(axorder, int):
raise TypeError("axorder must be of type int")
self._axorder = axorder
@property
def axpos(self):
"""
See tutorial:
* [Subplot/Axes Positioning](../../tutorials/subplot_positioning.md)
Returns
--------
* (tuple or None)
"""
return self._axpos
@axpos.setter
def axpos(self, axpos):
if axpos is None:
self._axpos = axpos
return
if isinstance(axpos, list) or isinstance(axpos, np.ndarray):
axpos = tuple(axpos)
if isinstance(axpos, tuple) and (len(axpos) == 3 or len(axpos) == 6) and np.all(isinstance(ap, int) for ap in axpos):
self._axpos = axpos
elif isinstance(axpos, int) and axpos >= 100 and axpos < 1000:
self._axpos = (int(axpos/100), int(axpos/10 % 10), int(axpos % 10))
elif isinstance(axpos, int) and axpos >= 110011 and axpos < 999999:
self._axpos = tuple([int(ap) for ap in str(axpos)])
else:
raise ValueError("axpos must be of type int or tuple between 100 and 999 (subplot syntax: ncols, nrows, ind) or 110011 and 999999 (gridspec syntax: ncols, nrows, indx, indy, widthx, widthy)")
@property
def title(self):
"""
Returns
-----------
* (str): title used for axes title
"""
return self._title
@title.setter
def title(self, title):
if title is None:
self._title = title
return
if not isinstance(title, str):
raise TypeError("title must be of type str")
self._title = title
@property
def label(self):
"""
Returns
-----------
* (str): label used for legends
"""
return self._label
@label.setter
def label(self, label):
if label is None:
self._label = label
return
if not isinstance(label, str):
raise TypeError("label must be of type str")
self._label = label
class Plot(Call):
def __init__(self, x=None, y=None, z=None, c=None, s=None, i=None,
xerror=None, xunit=None, xlabel=None,
yerror=None, yunit=None, ylabel=None,
zerror=None, zunit=None, zlabel=None,
cunit=None, clabel=None, cmap=None,
sunit=None, slabel=None, smap=None, smode=None,
iunit=None, itol=0.0,
axorder=None, axpos=None,
title=None,
label=None,
marker=None,
linestyle=None, linebreak=None,
highlight=True, uncover=False, trail=False,
consider_for_limits=True,
**kwargs):
"""
Create a <autofig.call.Plot> object which defines a single call to
matplotlib.
See also:
* <autofig.call.Mesh>
Note that the following keyword arguments are not allowed and will raise
an error suggesting the appropriate autofig argument:
* `markersize` or `ms`: use `size` or `s`
* `linewidth` or `lw`: use `size` or `s`
Arguments
-------------
* `x` (list/array, optional, default=None): array of values for the x-axes.
Access via <autofig.call.Plot.x>.
* `y` (list/array, optional, default=None): array of values for the y-axes.
Access via <autofig.call.Plot.y>.
* `z` (list/array, optional, default=None): array of values for the z-axes.
Access via <autofig.call.Plot.z>
* `c` or `color` (list/array, optional, default=None): array of values for the
color-direction. Access via <autofig.call.Plot.c>. Note: `color`
takes precedence over `c` if both are provided.
* `s` or `size` (list/array, optional, default=None): array of values for the
size-direction. Access via <autofig.call.Plot.s>. Note: `size` takes
precedence over `s` if both are provided.
* `i` (list/array or string, optional, default=None): array of values for
the independent-variable. If a string, can be one of: 'x', 'y', 'z',
'c', 's' to reference an existing array. Access via
<autofig.call.Plot.i>.
* `xerror` (float or list/array, optional, default=None): errors for `x`.
See <autofig.call.Plot.x> and <autofig.call.CallDimensionX.error>.
* `xunit` (string or astropy unit, optional, default=None): units for `x`.
See <autofig.call.Plot.x> and <autofig.call.CallDimensionX.unit>.
* `xlabel` (strong, optional, default=None): label for `x`.
See <autofig.call.Plot.x> and <autofig.call.CallDimensionX.label>.
* `yerror` (float or list/array, optional, default=None): errors for `y`.
See <autofig.call.Plot.y> and <autofig.call.CallDimensionY.error>.
* `yunit` (string or astropy unit, optional, default=None): units for `y`.
See <autofig.call.Plot.y> and <autofig.call.CallDimensionY.unit>.
* `ylabel` (strong, optional, default=None): label for `y`.
See <autofig.call.Plot.y> and <autofig.call.CallDimensionY.label>.
* `zerror` (float or list/array, optional, default=None): errors for `z`.
See <autofig.call.Plot.z> and <autofig.call.CallDimensionZ.error>.
* `zunit` (string or astropy unit, optional, default=None): units for `z`.
See <autofig.call.Plot.z> and <autofig.call.CallDimensionZ.unit>.
* `zlabel` (strong, optional, default=None): label for `x`.
See <autofig.call.Plot.z> and <autofig.call.CallDimensionZ.label>.
* `cerror` (float or list/array, optional, default=None): errors for `c`.
See <autofig.call.Plot.c> and <autofig.call.CallDimensionC.error>.
* `cunit` (string or astropy unit, optional, default=None): units for `c`.
See <autofig.call.Plot.c> and <autofig.call.CallDimensionC.unit>.
* `clabel` (strong, optional, default=None): label for `c`.
See <autofig.call.Plot.c> and <autofig.call.CallDimensionC.label>.
* `serror` (float or list/array, optional, default=None): errors for `s`.
See <autofig.call.Plot.s> and <autofig.call.CallDimensionS.error>.
* `sunit` (string or astropy unit, optional, default=None): units for `s`.
See <autofig.call.Plot.s> and <autofig.call.CallDimensionS.unit>.
* `slabel` (strong, optional, default=None): label for `s`.
See <autofig.call.Plot.s> and <autofig.call.CallDimensionS.label>.
* `iunit` (string or astropy unit, optional, default=None): units for `i`.
See <autofig.call.Plot.i> and <autofig.call.CallDimensionI.unit>.
* `itol` (float, optional, default=0.0): see <autofig.call.DimensionI.tol>.
* `axorder` (int, optional, default=None): see <autofig.call.Plot.axorder>.
* `axpos` (tuple, optional, default=None): see <autofig.call.Plot.axpos>.
* `title` (string, optional, default=None): see <autofig.call.Plot.title>.
* `label` (string, optional, default=None): see <autofig.call.Plot.label>.
* `marker` or `m` (string, optional, default=None): see <autofig.call.Plot.marker>.
Note: `marker` takes precedence over `m` if both are provided.
* `linestyle` or `ls` (string, optional, default=None): see
<autofig.call.Plot.linestyle>. Note: `linestyle` takes precedence
over `ls` if both are provided.
* `linebreak` (string, optional, default=None): see <autofig.call.Plot.linebreak>.
* `highlight` (bool, optional, default=False): see <autofig.call.Plot.highlight>.
* `highlight_marker` (string, optional, default=None)
* `highlight_linestyle` or `highlight_ls` (string, optional, default=None):
Note: `highlight_linestyle` takes precedence over `highlight_ls` if
both are provided.
* `highlight_size` or `highlight_s` (float, optional, default=None):
Note: `highlight_size` takes precedence over `highlight_s` if both
are provided.
* `highlight_color` or `highlight_c` (string, optional, default=None):
Note: `highlight_color` takes precedence over `highlight_c` if both
are provided.
* `consider_for_limits` (bool, optional, default=True): see
<autofig.call.Call.consider_for_limits>.
* `uncover` (bool, optional, default=False): see <autofig.call.Call.uncover>.
* `trail` (bool or Float, optional, default=False): see
<autofig.call.Call.trail>.
* `**kwargs`: additional keyword arguments are stored and passed on when
attaching to a parent axes. See <autofig.axes.Axes.add_call>.
Returns
---------
* the instantiated <autofig.call.Plot> object.
"""
if 'markersize' in kwargs.keys():
raise ValueError("use 'size' or 's' instead of 'markersize'")
if 'ms' in kwargs.keys():
raise ValueError("use 'size' or 's' instead of 'ms'")
if 'linewidth' in kwargs.keys():
raise ValueError("use 'size' or 's' instead of 'linewidth'")
if 'lw' in kwargs.keys():
raise ValueError("use 'size' or 's' instead of 'lw'")
size = kwargs.pop('size', None)
s = size if size is not None else s
smap = kwargs.pop('sizemap', smap)
self._s = CallDimensionS(self, s, None, sunit, slabel,
smap=smap, mode=smode)
color = kwargs.pop('color', None)
c = color if color is not None else c
cmap = kwargs.pop('colormap', cmap)
self._c = CallDimensionC(self, c, None, cunit, clabel, cmap=cmap)
self._axes = None # super will do this again, but we need it for setting marker, etc
self._axes_c = None
self._axes_s = None
self.highlight = highlight
highlight_marker = kwargs.pop('highlight_marker', None)
self.highlight_marker = highlight_marker
highlight_s = kwargs.pop('highlight_s', None)
highlight_size = kwargs.pop('highlight_size', highlight_s)
self.highlight_size = highlight_size
highlight_c = kwargs.pop('highlight_c', None)
highlight_color = kwargs.pop('highlight_color', highlight_c)
self.highlight_color = highlight_color
highlight_ls = kwargs.pop('highlight_ls', None)
highlight_linestyle = kwargs.pop('highlight_linestyle', highlight_ls)
self.highlight_linestyle = highlight_linestyle
m = kwargs.pop('m', None)
self.marker = marker if marker is not None else m
ls = kwargs.pop('ls', None)
self.linestyle = linestyle if linestyle is not None else ls
self.linebreak = linebreak
super(Plot, self).__init__(i=i, iunit=iunit, itol=itol,
x=x, xerror=xerror, xunit=xunit, xlabel=xlabel,
y=y, yerror=yerror, yunit=yunit, ylabel=ylabel,
z=z, zerror=zerror, zunit=zunit, zlabel=zlabel,
consider_for_limits=consider_for_limits,
uncover=uncover, trail=trail,
axorder=axorder, axpos=axpos,
title=title, label=label,
**kwargs
)
self.connect_callback(callbacks.update_sizes)
def __repr__(self):
dirs = []
for direction in ['i', 'x', 'y', 'z', 's', 'c']:
if getattr(self, direction).value is not None:
dirs.append(direction)
return "<Call:Plot | dims: {}>".format(", ".join(dirs))
@classmethod
def from_dict(cls, dict):
return cls(**dict)
def to_dict(self):
return {'classname': self.__class__.__name__,
'x': self.x.to_dict(),
'y': self.y.to_dict(),
'z': self.z.to_dict(),
'c': self.c.to_dict(),
's': self.s.to_dict(),
'i': self.i.to_dict(),
'axorder': self._axorder,
'axpos': self._axpos,
'title': self._title,
'label': self._label,
'marker': self._marker,
'linestyle': self._linestyle,
'linebreak': self._linebreak,
'highlight': self._highlight,
'highlight_linestyle': self._highlight_linestyle,
'highlight_size': self._highlight_size,
'highlight_color': self._highlight_color,
'highlight_marker': self._highlight_marker,
'uncover': self._uncover,
'trail': self._trail,
'consider_for_limits': self._consider_for_limits}
@property
def axes_c(self):
# currently no setter as this really should be handle by axes.add_call
return self._axes_c
@property
def axes_s(self):
# currently no setter as this really should be handle by axes.add_call
return self._axes_s
@property
def do_sizescale(self):
x = self.x.get_value()
y = self.y.get_value()
z = self.z.get_value()
s = self.s.get_value()
# DETERMINE WHICH SCALINGS WE NEED TO USE
if x is not None and y is not None:
return s is not None and not (isinstance(s, float) or isinstance(s, int))
else:
return False
@property
def do_colorscale(self):
x = self.x.get_value()
y = self.y.get_value()
z = self.z.get_value()
c = self.c.get_value()
# DETERMINE WHICH SCALINGS WE NEED TO USE
if x is not None and y is not None:
return c is not None and not isinstance(c, str)
else:
return False
@property
def highlight(self):
return self._highlight
@highlight.setter
def highlight(self, highlight):
if not isinstance(highlight, bool):
raise TypeError("highlight must be of type bool")
self._highlight = highlight
@property
def highlight_size(self):
if self._highlight_size is None:
# then default to twice the non-highlight size plus an offset
# so that small markers still have a considerably larger marker
# TODO: can we make this dependent on i?
if self.s.mode == 'pt':
return np.mean(self.get_sizes())*2
else:
return np.mean(self.get_sizes())*2
return self._highlight_size
@highlight_size.setter
def highlight_size(self, highlight_size):
if highlight_size is None:
self._highlight_size = None
return
if not (isinstance(highlight_size, float) or isinstance(highlight_size, int)):
raise TypeError("highlight_size must be of type float or int")
if highlight_size <= 0:
raise ValueError("highlight_size must be > 0")
self._highlight_size = highlight_size
@property
def highlight_marker(self):
if self._highlight_marker is None:
return 'o'
return self._highlight_marker
@highlight_marker.setter
def highlight_marker(self, highlight_marker):
if highlight_marker is None:
self._highlight_marker = None
return
if not isinstance(highlight_marker, str):
raise TypeError("highlight_marker must be of type str")
# TODO: make sure valid marker?
self._highlight_marker = highlight_marker
@property
def highlight_color(self):
# if self._highlight_color is None:
# return self.get_color()
return self._highlight_color
@highlight_color.setter
def highlight_color(self, highlight_color):
if highlight_color is None:
self._highlight_color = None
return
if not isinstance(highlight_color, str):
raise TypeError("highlight_color must be of type str")
self._highlight_color = common.coloralias.map(highlight_color)
@property
def highlight_linestyle(self):
if self._highlight_linestyle is None:
return 'None'
return self._highlight_linestyle
@highlight_linestyle.setter
def highlight_linestyle(self, highlight_linestyle):
if highlight_linestyle is None:
self._highlight_linestyle = None
return
if not isinstance(highlight_linestyle, str):
raise TypeError("highlight_linestyle must be of type str")
# TODO: make sure value ls?
self._highlight_linestyle = highlight_linestyle
def get_sizes(self, i=None):
s = self.s.get_value(i=i, unit=self.axes_s.unit if self.axes_s is not None else None)
if self.do_sizescale:
if self.axes_s is not None:
sizes = self.axes_s.normalize(s, i=i)
else:
# fallback on 0.01-0.05 mapping for just this call
sall = self.s.get_value(unit=self.axes_s.unit if self.axes_s is not None else None)
norm = plt.Normalize(np.nanmin(sall), np.nanmax(sall))
sizes = norm(s) * 0.04+0.01
else:
if s is not None:
sizes = s
elif self.s.mode == 'pt':
sizes = 1
else:
sizes = 0.02
return sizes
@property
def s(self):
return self._s
@property
def c(self):
return self._c
def get_color(self, colorcycler=None):
if isinstance(self.c.value, str):
color = self.c.value
else:
# then we'll defer to the cycler. If we want to color by
# the dimension, we should call self.c directly
color = None
if color is None and colorcycler is not None:
color = colorcycler.next_tmp
return color
@property
def color(self):
return self.get_color()
@color.setter
def color(self, color):
# TODO: type and cycler checks
color = common.coloralias.map(_map_none(color))
if self.axes is not None:
self.axes._colorcycler.replace_used(self.get_color(), color)
self._c.value = color
def get_cmap(self, cmapcycler=None):
if isinstance(self.c.value, str):
return None
if self.c.value is None:
return None
cmap = self.c.cmap
if cmap is None and cmapcycler is not None:
cmap = cmapcycler.next_tmp
return cmap
def get_marker(self, markercycler=None):
marker = self._marker
if marker is None:
if markercycler is not None:
marker = markercycler.next_tmp
else:
marker = '.'
return marker
@property
def marker(self):
return self.get_marker()
@marker.setter
def marker(self, marker):
# TODO: type and cycler checks
marker = _map_none(marker)
if self.axes is not None:
self.axes._markercycler.replace_used(self.get_marker(), marker)
self._marker = marker
def get_linestyle(self, linestylecycler=None):
ls = self._linestyle
if ls is None and linestylecycler is not None:
ls = linestylecycler.next_tmp
return ls
@property
def linestyle(self):
return self.get_linestyle()
@linestyle.setter
def linestyle(self, linestyle):
# TODO: type and cycler checks
linestyle = common.linestylealias.map(_map_none(linestyle))
if self.axes is not None:
self.axes._linestylecycler.replace_used(self.get_linestyle(), linestyle)
self._linestyle = linestyle
@property
def linebreak(self):
if self._linebreak is None:
return False
return self._linebreak
@linebreak.setter
def linebreak(self, linebreak):
if linebreak is None:
self._linebreak = linebreak
return
if not isinstance(linebreak, str):
raise TypeError("linebreak must be of type str, found {} {}".format(type(linebreak), linebreak))
if not len(linebreak)==2:
raise ValueError("linebreak must be of length 2")
if linebreak[0] not in common.dimensions:
raise ValueError("linebreak must start with one of {}".format(common.dimensions))
acceptable_ends = ['+', '-']
if linebreak[1] not in acceptable_ends:
raise ValueError("linebreak must end with one of {}".format(acceptable_ends))
self._linebreak = linebreak
def draw(self, ax=None, i=None,
colorcycler=None, markercycler=None, linestylecycler=None):
"""
See also:
* <autofig.draw>
* <autofig.figure.Figure.draw>
* <autofig.axes.Axes.draw>
* <autofig.call.Mesh.draw>
Arguments
-----------
* `ax`
* `i`
* `colorcycler`
* `markercycler`
* `linestylecycler`
"""
# Plot.draw
if ax is None:
ax = plt.gca()
else:
if not isinstance(ax, plt.Axes):
raise TypeError("ax must be of type plt.Axes")
if not (i is None or isinstance(i, float) or isinstance(i, int) or isinstance(i, u.Quantity) or isinstance(i, list) or isinstance(i, np.ndarray)):
raise TypeError("i must be of type float/int/list/None")
kwargs = self.kwargs.copy()
# determine 2D or 3D
axes_3d = isinstance(ax, Axes3D)
if (axes_3d and self.axes.projection=='2d') or (not axes_3d and self.axes.projection=='3d'):
raise ValueError("axes and projection do not agree")
# marker
marker = self.get_marker(markercycler=markercycler)
# linestyle - 'linestyle' has priority over 'ls'
ls = self.get_linestyle(linestylecycler=linestylecycler)
# color (NOTE: not necessarily the dimension c)
color = self.get_color(colorcycler=colorcycler)
# PREPARE FOR PLOTTING AND GET DATA
return_artists = []
# TODO: handle getting in correct units (possibly passed from axes?)
x = self.x.get_value(i=i, unit=self.axes.x.unit)
xerr = self.x.get_error(i=i, unit=self.axes.x.unit)
y = self.y.get_value(i=i, unit=self.axes.y.unit)
yerr = self.y.get_error(i=i, unit=self.axes.y.unit)
z = self.z.get_value(i=i, unit=self.axes.z.unit)
# zerr is handled below, only if axes_3ds
c = self.c.get_value(i=i, unit=self.axes_c.unit if self.axes_c is not None else None)
s = self.s.get_value(i=i, unit=self.axes_s.unit if self.axes_s is not None else None)
# bail on cases where we can't plot. This could possibly be due to
# sending Nones or Nans
# if x is None and y is None:
# return []
# if x is None and len(y) > 1:
# return []
# if y is None and len(x) > 1:
# return []
if axes_3d:
zerr = self.z.get_error(i=i, unit=self.axes.z.unit)
else:
zerr = None
# then we need to loop over the linebreaks
if isinstance(x, list) or isinstance(y, list):
linebreak_n = len(x) if isinstance(x, list) else len(y)
else:
linebreak_n = 1
xs = _to_linebreak_list(x, linebreak_n)
xerrs = _to_linebreak_list(xerr, linebreak_n)
ys = _to_linebreak_list(y, linebreak_n)
yerrs = _to_linebreak_list(yerr, linebreak_n)
zs = _to_linebreak_list(z, linebreak_n)
# zerrs = _to_linebreak_list(zerr, linebreak_n)
cs = _to_linebreak_list(c, linebreak_n)
ss = _to_linebreak_list(s, linebreak_n)
for loop1,(x,xerr,y,yerr,z,c,s) in enumerate(zip(xs, xerrs, ys, yerrs, zs, cs, ss)):
if axes_3d:
data = np.array([x, y, z])
points = np.array([x, y, z]).T.reshape(-1, 1, 3)
else:
data = np.array([x, y])
points = np.array([x, y]).T.reshape(-1, 1, 2)
# segments are used for LineCollection
segments = np.concatenate([points[:-1], points[1:]], axis=1)
# DETERMINE WHICH SCALINGS WE NEED TO USE
do_colorscale = self.do_colorscale
do_sizescale = self.do_sizescale
if x is not None and y is not None:
do_colorscale = c is not None and not isinstance(c, str)
do_sizescale = s is not None and not (isinstance(s, float) or isinstance(s, int))
else:
do_colorscale = False
do_sizescale = False
# DETERMINE PER-DATAPOINT Z-ORDERS
zorders, do_zorder = self.axes.z.get_zorders(z, i=i)
if axes_3d:
# TODO: we probably want to re-implement zorder, but then we need to
# sort in the *projected* z rather than data-z. We'll also need to
# figure out why LineCollection is complaining about the input shape
do_zorder = False
# ALLOW ACCESS TO COLOR FOR I OR LOOP
# TODO: in theory these could be exposed (maybe not the loop, but i)
def get_color_i(i, default=color):
if do_colorscale and self.axes_c is not None:
cmap = self.axes_c.cmap
norm = self.axes_c.get_norm(i=i)
ci = self.axes.c.get_value(i=i)
return plt.get_cmap(cmap)(norm(ci))
else:
return default
def get_color_loop(loop, do_zorder, default=color):
if do_colorscale and self.axes_c is not None:
cmap = self.axes_c.cmap
norm = self.axes_c.get_norm(i=i)
if do_zorder:
cloop = c[loop]
else:
cloop = c
return plt.get_cmap(cmap)(norm(cloop))
else:
return default
# BUILD KWARGS NEEDED FOR EACH CALL TO ERRORBAR
def error_kwargs_loop(xerr, yerr, zerr, loop, do_zorder):
def _get_error(errorarray, loop, do_zorder):
if errorarray is None:
return None
elif do_zorder:
return errorarray[loop]
else:
return errorarray
error_kwargs = {'xerr': _get_error(xerr, loop, do_zorder),
'yerr': _get_error(yerr, loop, do_zorder)}
if axes_3d:
error_kwargs['zerr'] = _get_error(zerr, loop, do_zorder)
error_kwargs['ecolor'] = get_color_loop(loop, do_zorder)
# not so sure that we want the errorbar linewidth to adjust based
# on size-scaling... but in theory we could do something like this:
# error_kwargs['elinewidth'] = sizes[loop]
return error_kwargs
# BUILD KWARGS NEEDED FOR EACH CALL TO LINECOLLECTION
lc_kwargs_const = {}
lc_kwargs_const['linestyle'] = ls
if do_colorscale:
lc_kwargs_const['norm'] = self.axes_c.get_norm(i=i) if self.axes_c is not None else None
lc_kwargs_const['cmap'] = self.axes_c.cmap if self.axes_c is not None else None
else:
lc_kwargs_const['color'] = color
# also set self._sizes so its accessible from the callback which
# will actually handle setting the sizes
sizes = self.get_sizes(i)
self._sizes = sizes
def sizes_loop(loop, do_zorder):
if do_zorder:
if isinstance(sizes, float):
return sizes
return sizes[loop]
else:
return sizes
def lc_kwargs_loop(lc_kwargs, loop, do_zorder):
if do_colorscale:
# nothing to do here, the norm and map are passed rather than values
pass
if do_sizescale:
# linewidth is handled by the callback
pass
return lc_kwargs
# BUILD KWARGS NEEDED FOR EACH CALL TO SCATTER
sc_kwargs_const = {}
sc_kwargs_const['marker'] = marker
sc_kwargs_const['linewidths'] = 0 # linewidths = 0 removes the black edge
sc_kwargs_const['edgecolors'] = 'none'
if do_colorscale:
sc_kwargs_const['norm'] = self.axes_c.get_norm(i=i) if self.axes_c is not None else None
sc_kwargs_const['cmap'] = self.axes_c.cmap if self.axes_c is not None else None
# we'll set sc_kwargs['cmap'] per-loop in the function below
else:
sc_kwargs_const['c'] = color
def sc_kwargs_loop(sc_kwargs, loop, do_zorder):
if do_colorscale:
if do_zorder:
sc_kwargs['c'] = c[loop]
else:
sc_kwargs['c'] = c
# if do_sizescale:
# if do_zorder:
# sc_kwargs['s'] = self.get_markersize(sizes[loop], scatter=True)
# else:
# sc_kwargs['s'] = self.get_markersize(sizes, scatter=True)
return sc_kwargs
# DRAW IF X AND Y ARE ARRAYS
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
# LOOP OVER DATAPOINTS so that each can be drawn with its own zorder
if do_zorder:
datas = data.T
segments = segments
zorders = zorders
else:
datas = [data]
zorders = [zorders]
segments = [segments]
for loop2, (datapoint, segment, zorder) in enumerate(zip(datas, segments, zorders)):
return_artists_this_loop = []
# DRAW ERRORBARS, if applicable
# NOTE: we never pass a label here to avoid duplicate entries
# the actual datapoints are handled and labeled separately.
# Unfortunately this means the error bar will not be included
# in the styling of the legend.
if xerr is not None or yerr is not None or zerr is not None:
artists = ax.errorbar(*datapoint,
fmt='', linestyle='None',
zorder=zorder,
label=None,
**error_kwargs_loop(xerr, yerr, zerr, loop2, do_zorder))
# NOTE: these are currently not included in return_artists
# so they don't scale according to per-element sizes.
# But we may want to return them for completeness and may
# want some way of setting the size of the errobars,
# maybe similar to how highlight_size is handled
# errorbar actually returns a Container object of artists,
# so we need to cast to a list
# for artist_list in list(artists):
# if isinstance(artist_list, tuple):
# return_artists += list(artist_list)
# else:
# return_artists += [artist_list]
if do_colorscale or do_sizescale or do_zorder or marker in ['x', '+']:
# DRAW LINECOLLECTION, if applicable
if ls.lower() != 'none':
# TODO: color and zorder are assigned from the LEFT point in
# the segment. It may be nice to interpolate from LEFT-RIGHT
# by accessing zorder[loop+1] and c[loop+1]
if do_zorder:
segments = (segment,)
else:
segments = segment
if axes_3d:
lccall = Line3DCollection
else:
lccall = LineCollection
# we'll only include this in the legend for the first loop
# and if the marker isn't going to get its own entry.
# Unfortunately this means in these cases the
# marker will get precedent in the legend if both
# marker and linestyle are present
lc = lccall(segments,
zorder=zorder,
label=self.label if loop1==0 and loop2==0 and marker.lower()=='none' else None,
**lc_kwargs_loop(lc_kwargs_const, loop2, do_zorder))
if do_colorscale:
if do_zorder:
lc.set_array(np.array([c[loop2]]))
else:
lc.set_array(c)
return_artists_this_loop.append(lc)
ax.add_collection(lc)
# DRAW SCATTER, if applicable
if marker.lower() != 'none':
artist = ax.scatter(*datapoint,
zorder=zorder,
label=self.label if loop1==0 and loop2==0 else None,
**sc_kwargs_loop(sc_kwargs_const, loop2, do_zorder))
return_artists_this_loop.append(artist)
else:
# let's use plot whenever possible... it'll be faster
# and will guarantee that the linestyle looks correct
artists = ax.plot(*datapoint,
marker=marker,
ls=ls,
mec='none',
color=color,
label=self.label if loop1==0 and loop2==0 else None)
return_artists_this_loop += artists
size_this_loop = sizes_loop(loop2, do_zorder)
for artist in return_artists_this_loop:
# store the sizes so they can be rescaled appropriately by
# the callback
artist._af_sizes = size_this_loop
return_artists += return_artists_this_loop
# DRAW IF X OR Y ARE NOT ARRAYS
if not (isinstance(x, np.ndarray) and isinstance(y, np.ndarray)):
# TODO: can we do anything in 3D?
if x is not None:
artist = ax.axvline(x, ls=ls, color=color, label=self.label)
return_artists += [artist]
if y is not None:
artist = ax.axhline(y, ls=ls, color=color, label=self.label)
return_artists += [artist]
# DRAW HIGHLIGHT, if applicable (outside per-datapoint loop)
if self.highlight and i is not None:
if self.highlight_linestyle != 'None' and self.i.is_reference:
i_direction = self.i.reference
if i_direction == 'x':
linefunc = 'axvline'
elif i_direction == 'y':
linefunc = 'axhline'
else:
# TODO: can we do anything if in z?
linefunc = None
if linefunc is not None:
artist = getattr(ax, linefunc)(i,
ls=self.highlight_linestyle,
color=self.highlight_color if self.highlight_color is not None else color)
artist._af_highlight = True
return_artists += [artist]
if axes_3d:
# I do not understand why, but matplotlib requires these to be
# iterable when in 3d projection
highlight_data = ([self.x.highlight_at_i(i)],
[self.y.highlight_at_i(i)],
[self.z.highlight_at_i(i)])
else:
highlight_data = (self.x.highlight_at_i(i),
self.y.highlight_at_i(i))
artists = ax.plot(*highlight_data,
marker=self.highlight_marker,
ls=self.highlight_linestyle,
color=self.highlight_color if self.highlight_color is not None else color)
for artist in artists:
artist._af_highlight=True
return_artists += artists
self._backend_objects = return_artists
for artist in return_artists:
callbacks._connect_to_autofig(self, artist)
for callback in self.callbacks:
callback_callable = getattr(callbacks, callback)
callback_callable(artist, self)
return return_artists
class FillBetween(Call):
def __init__(self, x=None, y=None, c=None, i=None,
xunit=None, xlabel=None,
yunit=None, ylabel=None,
cunit=None, clabel=None, cmap=None,
iunit=None, itol=0.0,
axorder=None, axpos=None,
title=None,
label=None,
linebreak=None,
uncover=False, trail=False,
consider_for_limits=True,
**kwargs):
"""
Create a <autofig.call.FillBetween> object which defines a single call to
matplotlib.
See also:
* <autofig.call.Plot>
* <autofig.call.Mesh>
Arguments
-------------
* `x` (list/array, optional, default=None): array of values for the x-axes.
Access via <autofig.call.FillBetween.x>.
* `y` (list/array, optional, default=None): array of values for the y-axes.
Must have shape (len(x), 2)
Access via <autofig.call.FillBetween.y>.
* `c` or `color` (list/array, optional, default=None): array of values for the
color-direction. Access via <autofig.call.FillBetween.c>. Note: `color`
takes precedence over `c` if both are provided.
* `i` (list/array or string, optional, default=None): array of values for
the independent-variable. If a string, can be one of: 'x', 'y', 'z',
'c', 's' to reference an existing array. Access via
<autofig.call.FillBetween.i>.
* `xunit` (string or astropy unit, optional, default=None): units for `x`.
See <autofig.call.FillBetween.x> and <autofig.call.CallDimensionX.unit>.
* `xlabel` (strong, optional, default=None): label for `x`.
See <autofig.call.FillBetween.x> and <autofig.call.CallDimensionX.label>.
* `yunit` (string or astropy unit, optional, default=None): units for `y`.
See <autofig.call.FillBetween.y> and <autofig.call.CallDimensionY.unit>.
* `ylabel` (strong, optional, default=None): label for `y`.
See <autofig.call.FillBetween.y> and <autofig.call.CallDimensionY.label>.
* `iunit` (string or astropy unit, optional, default=None): units for `i`.
See <autofig.call.FillBetween.i> and <autofig.call.CallDimensionI.unit>.
* `itol` (float, optional, default=0.0): see <autofig.call.DimensionI.tol>.
* `axorder` (int, optional, default=None): see <autofig.call.FillBetween.axorder>.
* `axpos` (tuple, optional, default=None): see <autofig.call.FillBetween.axpos>.
* `title` (string, optional, default=None): see <autofig.call.FillBetween.title>.
* `label` (string, optional, default=None): see <autofig.call.FillBetween.label>.
* `linebreak` (string, optional, default=None): see <autofig.call.FillBetween.linebreak>.
* `consider_for_limits` (bool, optional, default=True): see
<autofig.call.Call.consider_for_limits>.
* `uncover` (bool, optional, default=False): see <autofig.call.Call.uncover>.
* `trail` (bool or Float, optional, default=False): see
<autofig.call.Call.trail>.
* `**kwargs`: additional keyword arguments are stored and passed on when
attaching to a parent axes. See <autofig.axes.Axes.add_call>.
Returns
---------
* the instantiated <autofig.call.FillBetween> object.
"""
color = kwargs.pop('color', None)
c = color if color is not None else c
cmap = kwargs.pop('colormap', cmap)
self._c = CallDimensionC(self, c, None, cunit, clabel, cmap=cmap)
self._axes = None # super will do this again, but we need it for setting marker, etc
self._axes_c = None
color = kwargs.pop('color', None)
c = color if color is not None else c
cmap = kwargs.pop('colormap', cmap)
self._c = CallDimensionC(self, c, None, cunit, clabel, cmap=cmap)
self.linebreak = linebreak
if x is None:
raise TypeError("x cannot be None for FillBetween")
x = np.asarray(x)
if y is None:
raise TypeError("y cannot be None for FillBetween")
y = np.asarray(y)
if y.shape not in [(len(x), 2), (len(x), 3)]:
raise ValueError("y must be of shape ({}, 2) or ({}, 3), not {}".format(len(x), len(x), y.shape))
super(FillBetween, self).__init__(i=i, iunit=iunit, itol=itol,
x=x, xunit=xunit, xlabel=xlabel,
y=y, yunit=yunit, ylabel=ylabel,
consider_for_limits=consider_for_limits,
uncover=uncover, trail=trail,
axorder=axorder, axpos=axpos,
title=title, label=label,
**kwargs
)
# self.connect_callback(callbacks.update_sizes)
def __repr__(self):
dirs = []
for direction in ['i', 'x', 'y', 'c']:
if getattr(self, direction).value is not None:
dirs.append(direction)
return "<Call:FillBetween | dims: {}>".format(", ".join(dirs))
@classmethod
def from_dict(cls, dict):
return cls(**dict)
def to_dict(self):
return {'classname': self.__class__.__name__,
'x': self.x.to_dict(),
'y': self.y.to_dict(),
'c': self.c.to_dict(),
'i': self.i.to_dict(),
'axorder': self._axorder,
'axpos': self._axpos,
'title': self._title,
'label': self._label,
'uncover': self._uncover,
'trail': self._trail,
'consider_for_limits': self._consider_for_limits}
@property
def axes_c(self):
# currently no setter as this really should be handle by axes.add_call
return self._axes_c
@property
def do_colorscale(self):
x = self.x.get_value()
y = self.y.get_value()
c = self.c.get_value()
# DETERMINE WHICH SCALINGS WE NEED TO USE
if x is not None and y is not None:
return c is not None and not isinstance(c, str)
else:
return False
@property
def c(self):
return self._c
def get_color(self, colorcycler=None):
if isinstance(self.c.value, str):
color = self.c.value
else:
# then we'll defer to the cycler. If we want to color by
# the dimension, we should call self.c directly
color = None
if color is None and colorcycler is not None:
color = colorcycler.next_tmp
return color
@property
def color(self):
return self.get_color()
@color.setter
def color(self, color):
# TODO: type and cycler checks
color = common.coloralias.map(_map_none(color))
if self.axes is not None:
self.axes._colorcycler.replace_used(self.get_color(), color)
self._c.value = color
def get_cmap(self, cmapcycler=None):
if isinstance(self.c.value, str):
return None
if self.c.value is None:
return None
cmap = self.c.cmap
if cmap is None and cmapcycler is not None:
cmap = cmapcycler.next_tmp
return cmap
@property
def linebreak(self):
if self._linebreak is None:
return False
return self._linebreak
@linebreak.setter
def linebreak(self, linebreak):
if linebreak is None:
self._linebreak = linebreak
return
if not isinstance(linebreak, str):
raise TypeError("linebreak must be of type str, found {} {}".format(type(linebreak), linebreak))
if not len(linebreak)==2:
raise ValueError("linebreak must be of length 2")
if linebreak[0] not in common.dimensions:
raise ValueError("linebreak must start with one of {}".format(common.dimensions))
acceptable_ends = ['+', '-']
if linebreak[1] not in acceptable_ends:
raise ValueError("linebreak must end with one of {}".format(acceptable_ends))
self._linebreak = linebreak
def draw(self, ax=None, i=None,
colorcycler=None, markercycler=None, linestylecycler=None):
"""
See also:
* <autofig.draw>
* <autofig.figure.Figure.draw>
* <autofig.axes.Axes.draw>
Arguments
-----------
* `ax`
* `i`
* `colorcycler`
* `markercycler`: ignored
* `linestylecycler`: ignored
"""
# Plot.draw
if ax is None:
ax = plt.gca()
else:
if not isinstance(ax, plt.Axes):
raise TypeError("ax must be of type plt.Axes")
if not (i is None or isinstance(i, float) or isinstance(i, int) or isinstance(i, u.Quantity) or isinstance(i, list) or isinstance(i, np.ndarray)):
raise TypeError("i must be of type float/int/list/None")
kwargs = self.kwargs.copy()
# determine 2D or 3D
axes_3d = isinstance(ax, Axes3D)
if (axes_3d and self.axes.projection=='2d') or (not axes_3d and self.axes.projection=='3d'):
raise ValueError("axes and projection do not agree")
# color (NOTE: not necessarily the dimension c)
color = self.get_color(colorcycler=colorcycler)
# PREPARE FOR PLOTTING AND GET DATA
return_artists = []
# TODO: handle getting in correct units (possibly passed from axes?)
x = self.x.get_value(i=i, unit=self.axes.x.unit)
y = self.y.get_value(i=i, unit=self.axes.y.unit)
if isinstance(y, list):
y = [yi.T for yi in y]
else:
y = y.T
c = self.c.get_value(i=i, unit=self.axes_c.unit if self.axes_c is not None else None)
# then we need to loop over the linebreaks
if isinstance(x, list) or isinstance(y, list):
linebreak_n = len(x) if isinstance(x, list) else len(y)
else:
linebreak_n = 1
xs = _to_linebreak_list(x, linebreak_n)
ys = _to_linebreak_list(y, linebreak_n)
cs = _to_linebreak_list(c, linebreak_n)
for loop1,(x,y,c) in enumerate(zip(xs, ys, cs)):
data = np.array([x, y[0], y[-1]])
if len(y) == 3:
data_middle = np.array([x, y[1]])
else:
data_middle = None
# points = np.array([x, y1, y2]).T.reshape(-1, 1, 3)
# segments are used for LineCollection
# segments = np.concatenate([points[:-1], points[1:]], axis=1)
# DETERMINE WHICH SCALINGS WE NEED TO USE
do_colorscale = self.do_colorscale
if x is not None and y is not None:
do_colorscale = c is not None and not isinstance(c, str)
else:
do_colorscale = False
# ALLOW ACCESS TO COLOR FOR I OR LOOP
# TODO: in theory these could be exposed (maybe not the loop, but i)
# def get_color_i(i, default=color):
# if do_colorscale and self.axes_c is not None:
# cmap = self.axes_c.cmap
# norm = self.axes_c.get_norm(i=i)
# ci = self.axes.c.get_value(i=i)
# return plt.get_cmap(cmap)(norm(ci))
# else:
# return default
#
# def get_color_loop(loop, do_zorder, default=color):
# if do_colorscale and self.axes_c is not None:
# cmap = self.axes_c.cmap
# norm = self.axes_c.get_norm(i=i)
# if do_zorder:
# cloop = c[loop]
# else:
# cloop = c
# return plt.get_cmap(cmap)(norm(cloop))
# else:
# return default
fb_kwargs = {}
fb_kwargs['color'] = color
fb_kwargs['alpha'] = 0.6 # TODO: make this an option
if do_colorscale:
fb_kwargs['norm'] = self.axes_c.get_norm(i=i) if self.axes_c is not None else None
fb_kwargs['cmap'] = self.axes_c.cmap if self.axes_c is not None else None
artist = ax.fill_between(*data, **fb_kwargs)
return_artists += [artist]
if data_middle is not None:
_ = fb_kwargs.pop('alpha')
fb_kwargs['linestyle'] = 'solid'
fb_kwargs['marker'] = 'None'
artists = ax.plot(*data_middle, **fb_kwargs)
return_artists += artists
self._backend_objects = return_artists
for artist in return_artists:
callbacks._connect_to_autofig(self, artist)
for callback in self.callbacks:
callback_callable = getattr(callbacks, callback)
callback_callable(artist, self)
return return_artists
class Mesh(Call):
def __init__(self, x=None, y=None, z=None, fc=None, ec=None, i=None,
xerror=None, xunit=None, xlabel=None, xnormals=None,
yerror=None, yunit=None, ylabel=None, ynormals=None,
zerror=None, zunit=None, zlabel=None, znormals=None,
fcunit=None, fclabel=None, fcmap=None,
ecunit=None, eclabel=None, ecmap=None,
iunit=None, itol=0.0,
axorder=None, axpos=None,
title=None, label=None,
linestyle=None,
consider_for_limits=True,
uncover=True,
trail=0,
exclude_back=False,
**kwargs):
"""
Create a <autofig.call.Mesh> object which defines a single call to
matplotlib.
See also:
* <autofig.call.Plot>
Arguments
-------------
* `x` (list/array, optional, default=None): array of values for the x-axes.
Access via <autofig.call.Mesh.x>.
* `y` (list/array, optional, default=None): array of values for the y-axes.
Access via <autofig.call.Mesh.y>.
* `z` (list/array, optional, default=None): array of values for the z-axes.
Access via <autofig.call.Mesh.z>
* `fc` or `facecolor` (list/array, optional, default=None): array of values for the
facecolor-direction. Access via <autofig.call.Mesh.fc>. Note: `facecolor`
takes precedence over `fc` if both are provided.
* `ec` or `edgecolor` (list/array, optional, default=None): array of values for the
edgecolor-direction. Access via <autofig.call.Mesh.ec>. Note: `edgecolor`
takes precedence over `ec` if both are provided.
* `i` (list/array or string, optional, default=None): array of values for
the independent-variable. If a string, can be one of: 'x', 'y', 'z',
'fc', 'ec' to reference an existing array. Access via
<autofig.call.Mesh.i>.
* `xerror` (float or list/array, optional, default=None): errors for `x`.
See <autofig.call.Mesh.x> and <autofig.call.CallDimensionX.error>.
* `xunit` (string or astropy unit, optional, default=None): units for `x`.
See <autofig.call.Mesh.x> and <autofig.call.CallDimensionX.unit>.
* `xlabel` (strong, optional, default=None): label for `x`.
See <autofig.call.Mesh.x> and <autofig.call.CallDimensionX.label>.
* `xnormals` (list/array, optional, default=None): normals for `x`.
Currently ignored.
See <autofig.call.Mesh.x> and <autofig.call.CallDimensionX.normals>.
* `yerror` (float or list/array, optional, default=None): errors for `y`.
See <autofig.call.Mesh.y> and <autofig.call.CallDimensionY.error>.
* `yunit` (string or astropy unit, optional, default=None): units for `y`.
See <autofig.call.Mesh.y> and <autofig.call.CallDimensionY.unit>.
* `ylabel` (strong, optional, default=None): label for `y`.
See <autofig.call.Mesh.y> and <autofig.call.CallDimensionY.label>.
* `ynormals` (list/array, optional, default=None): normals for `y`.
Currently ignored.
See <autofig.call.Mesh.y> and <autofig.call.CallDimensionY.normals>.
* `zerror` (float or list/array, optional, default=None): errors for `z`.
See <autofig.call.Mesh.z> and <autofig.call.CallDimensionZ.error>.
* `zunit` (string or astropy unit, optional, default=None): units for `z`.
See <autofig.call.Mesh.z> and <autofig.call.CallDimensionZ.unit>.
* `zlabel` (strong, optional, default=None): label for `x`.
See <autofig.call.Mesh.z> and <autofig.call.CallDimensionZ.label>.
* `znormals` (list/array, optional, default=None): normals for `z`.
If provided then the back of the mesh can be ignored by setting
`exclude_back=True`.
See <autofig.call.Mesh.z> and <autofig.call.CallDimensionZ.normals>.
* `fcerror` (float or list/array, optional, default=None): errors for `fc`.
See <autofig.call.Mesh.fc> and <autofig.call.CallDimensionC.error>.
* `fcunit` (string or astropy unit, optional, default=None): units for `fc`.
See <autofig.call.Mesh.fc> and <autofig.call.CallDimensionC.unit>.
* `fclabel` (strong, optional, default=None): label for `fc`.
See <autofig.call.Mesh.fc> and <autofig.call.CallDimensionC.label>.
* `ecerror` (float or list/array, optional, default=None): errors for `ec`.
See <autofig.call.Mesh.ec> and <autofig.call.CallDimensionC.error>.
* `ecunit` (string or astropy unit, optional, default=None): units for `ec`.
See <autofig.call.Mesh.ec> and <autofig.call.CallDimensionC.unit>.
* `eclabel` (strong, optional, default=None): label for `ec`.
See <autofig.call.Mesh.ec> and <autofig.call.CallDimensionC.label>.
* `iunit` (string or astropy unit, optional, default=None): units for `i`.
See <autofig.call.Mesh.i> and <autofig.call.CallDimensionI.unit>.
* `itol` (float, optional, default=0.0): see <autofig.call.DimensionI.tol>.
* `axorder` (int, optional, default=None): see <autofig.call.Mesh.axorder>.
* `axpos` (tuple, optional, default=None): see <autofig.call.Mesh.axpos>.
* `title` (string, optional, default=None): see <autofig.call.Mesh.title>.
* `label` (string, optional, default=None): see <autofig.call.Mesh.label>.
* `linestyle` or `ls` (string, optional, default='solid'): see
<autofig.call.Mesh.linestyle>. Note: `linestyle` takes precedence
over `ls` if both are provided. So technically `ls` defaults
to 'solid' and `linestyle` defaults to None.
* `consider_for_limits` (bool, optional, default=True): see
<autofig.call.Call.consider_for_limits>.
* `exclude_back` (bool, optional, default=False): whether to exclude
any elements pointing away from the screen. This will be ignored
for 3d projections or if `znormals` is not provided. Setting this
to True can save significant time in drawing the mesh in matplotlib,
and is especially useful for closed surfaces if `fc` is not 'none'.
* `**kwargs`: additional keyword arguments are stored and passed on when
attaching to a parent axes. See <autofig.axes.Axes.add_call>.
Returns
---------
* the instantiated <autofig.call.Mesh> object.
"""
self._axes_fc = None
self._axes_ec = None
facecolor = kwargs.pop('facecolor', None)
fc = facecolor if facecolor is not None else fc
self._fc = CallDimensionC(self, fc, None, fcunit, fclabel, cmap=fcmap)
edgecolor = kwargs.pop('edgecolor', None)
ec = edgecolor if edgecolor is not None else ec
self._ec = CallDimensionC(self, ec, None, ecunit, eclabel, cmap=ecmap)
ls = kwargs.pop('ls', 'solid')
self.linestyle = linestyle if linestyle is not None else ls
self.linebreak = False
self.exclude_back = exclude_back
if hasattr(i, '__iter__') and not isinstance(i, u.Quantity):
raise ValueError("i as an iterable not supported for Meshes, make separate calls for each value of i")
super(Mesh, self).__init__(i=i, iunit=iunit, itol=itol,
x=x, xerror=xerror, xunit=xunit, xlabel=xlabel, xnormals=xnormals,
y=y, yerror=yerror, yunit=yunit, ylabel=ylabel, ynormals=ynormals,
z=z, zerror=zerror, zunit=zunit, zlabel=zlabel, znormals=znormals,
consider_for_limits=consider_for_limits,
uncover=uncover, trail=trail,
axorder=axorder, axpos=axpos,
title=title, label=label,
**kwargs
)
def __repr__(self):
dirs = []
for direction in ['i', 'x', 'y', 'z', 'fc', 'ec']:
if getattr(self, direction).value is not None:
dirs.append(direction)
return "<Call:Mesh | dims: {}>".format(", ".join(dirs))
@classmethod
def from_dict(cls, dict):
return cls(**dict)
def to_dict(self):
return {'classname': self.__class__.__name__,
'x': self.x.to_dict(),
'y': self.y.to_dict(),
'z': self.z.to_dict(),
'fc': self.fc.to_dict(),
'ec': self.ec.to_dict(),
'i': self.i.to_dict(),
'axorder': self._axorder,
'axpos': self._axpos,
'title': self._title,
'label': self._label,
'uncover': self._uncover,
'trail': self._trail,
'consider_for_limits': self._consider_for_limits,
'exclude_back': self._exclude_back}
@property
def axes_fc(self):
# currently no setter as this really should be handle by axes.add_call
return self._axes_fc
@property
def axes_ec(self):
# currently no setter as this really should be handle by axes.add_call
return self._axes_ec
@property
def c(self):
"""
Returns
---------
* <autofig.call.CallDimensionCGroup> of <autofig.call.Mesh.fc> and
<autofig.call.Mesh.ec>
"""
return CallDimensionCGroup([self.fc, self.ec])
@property
def fc(self):
"""
See also:
* <autofig.call.Mesh.get_facecolor>
Returns
----------
* <autofig.call.CallDimensionC>
"""
return self._fc
def get_facecolor(self, colorcycler=None):
"""
See also:
* <autofig.call.Mesh.fc>
Arguments
-----------
* `colorcycler` (optional, default=None): **IGNORED** (only included
to have a similar calling signature as other methods that do
account for color cyclers)
Returns
----------
* (string): 'none' if <autofig.call.Mesh.fc> is not a string.
"""
if isinstance(self.fc.value, str):
color = self.fc.value
else:
# then we'll default to 'none'. If we want to color by
# the dimension, we should call self.c directly
color = 'none'
# we won't use the colorcycler for facecolor
return color
@property
def facecolor(self):
"""
Shortcut to <autofig.call.Mesh.get_facecolor>.
See also:
* <autofig.call.Mesh.fc>
Returns
----------
* (string)
"""
return self.get_facecolor()
@facecolor.setter
def facecolor(self, facecolor):
# TODO: type and cycler checks
facecolor = common.coloralias.map(_map_none(facecolor))
if self.axes is not None:
self.axes._colorcycler.replace_used(self.get_facecolor(), facecolor)
self._fc.value = facecolor
def get_fcmap(self, cmapcycler=None):
if isinstance(self.fc.value, str):
return None
if self.fc.value is None:
return None
cmap = self.fc.cmap
if cmap is None and cmapcycler is not None:
cmap = cmapcycler.next_tmp
return cmap
@property
def ec(self):
"""
See also:
* <autofig.call.Mesh.get_edgecolor>
Returns
----------
* <autofig.call.CallDimensionC>
"""
return self._ec
def get_edgecolor(self, colorcycler=None):
"""
See also:
* <autofig.call.Mesh.ec>
Arguments
-----------
* `colorcycler` (optional, default=None): **IGNORED** (only included
to have a similar calling signature as other methods that do
account for color cyclers)
Returns
----------
* (string): 'black' if <autofig.call.Mesh.ec> is not a string.
"""
if isinstance(self.ec.value, str):
color = self.ec.value
else:
# then we'll default to black. If we want to color by
# the dimension, we should call self.c directly
color = 'black'
# we won't use the colorcycler for edgecolor
return color
@property
def edgecolor(self):
"""
Shortcut to <autofig.call.Mesh.get_edgecolor>.
See also:
* <autofig.call.Mesh.ec>
Returns
----------
* (string)
"""
return self.get_edgecolor()
@edgecolor.setter
def edgecolor(self, edgecolor):
# TODO: type and cycler checks
if edgecolor in ['face']:
self._ec.value = edgecolor
return
edgecolor = common.coloralias.map(_map_none(edgecolor))
if self.axes is not None:
self.axes._colorcycler.replace_used(self.get_edgecolor(), edgecolor)
self._ec.value = edgecolor
def get_ecmap(self, cmapcycler=None):
if isinstance(self.ec.value, str):
return None
if self.ec.value is None:
return None
cmap = self.ec.cmap
if cmap is None and cmapcycler is not None:
cmap = cmapcycler.next_tmp
return cmap
@property
def exclude_back(self):
return self._exclude_back
@exclude_back.setter
def exclude_back(self, exclude_back):
if not isinstance(exclude_back, bool):
raise TypeError("exclude back must be of type bool")
self._exclude_back = exclude_back
def draw(self, ax=None, i=None,
colorcycler=None, markercycler=None, linestylecycler=None):
"""
See also:
* <autofig.draw>
* <autofig.figure.Figure.draw>
* <autofig.axes.Axes.draw>
* <autofig.call.Plot.draw>
Arguments
----------
* `ax`
* `i`
* `colorcycler`
* `markercycler`
* `linestylecycler`
"""
# Mesh.draw
if ax is None:
ax = plt.gca()
else:
if not isinstance(ax, plt.Axes):
raise TypeError("ax must be of type plt.Axes")
if not (i is None or isinstance(i, float) or isinstance(i, int) or isinstance(i, u.Quantity)):
raise TypeError("i must be of type float/int/None")
# determine 2D or 3D
axes_3d = isinstance(ax, Axes3D)
kwargs = self.kwargs.copy()
# PLOTTING
return_artists = []
x = self.x.get_value(i=i, sort_by_indep=False, exclude_back=self.exclude_back, unit=self.axes.x.unit)
y = self.y.get_value(i=i, sort_by_indep=False, exclude_back=self.exclude_back, unit=self.axes.y.unit)
z = self.z.get_value(i=i, sort_by_indep=False, exclude_back=self.exclude_back, unit=self.axes.z.unit)
fc = self.fc.get_value(i=i, sort_by_indep=False, exclude_back=self.exclude_back, unit=self.axes_fc.unit if self.axes_fc is not None else None)
ec = self.ec.get_value(i=i, sort_by_indep=False, exclude_back=self.exclude_back, unit=self.axes_ec.unit if self.axes_ec is not None else None)
# DETERMINE PER-DATAPOINT Z-ORDERS
zorders, do_zorder = self.axes.z.get_zorders(z, i=i)
if do_zorder:
# we can perhaps skip doing the zorder loop if there are no other
# calls within the axes
if len(self.axes.calls) == 1:
do_zorder = False
zorders = np.mean(zorders)
if axes_3d:
if x is not None and y is not None and z is not None:
polygons = np.concatenate((x[:,:,np.newaxis], y[:,:,np.newaxis], z[:,:,np.newaxis]), axis=2)
else:
# there isn't anything to plot here, the current i probably
# filtered this call out
return []
pccall = Poly3DCollection
else:
if x is not None and y is not None:
polygons = np.concatenate((x[:,:,np.newaxis], y[:,:,np.newaxis]), axis=2)
if not do_zorder and z is not None:
# then we'll handle zorder within this Mesh call by
# sorting instead of looping. This is MUCH quicking
# and less memory instensive
sortinds = np.mean(z, axis=1).argsort()
polygons = polygons[sortinds, :, :]
if isinstance(fc, np.ndarray):
fc = fc[sortinds]
if isinstance(ec, np.ndarray):
ec = ec[sortinds]
else:
# there isn't anything to plot here, the current i probably
# filtered this call out
return []
pccall = PolyCollection
do_facecolorscale = fc is not None and not isinstance(fc, str)
do_edgecolorscale = ec is not None and not isinstance(ec, str)
if do_edgecolorscale:
if self.axes_ec is None:
raise NotImplementedError("currently only support edgecolor once attached to axes")
else:
edgenorm = self.axes_ec.get_norm(i=i)
edgecmap = self.axes_ec.cmap
edgecolors = plt.get_cmap(edgecmap)(edgenorm(ec))
else:
edgecolors = self.get_edgecolor(colorcycler=colorcycler)
if do_facecolorscale:
if self.axes_fc is None:
raise NotImplementedError("currently only support facecolor once attached to axes")
else:
facenorm = self.axes_fc.get_norm(i=i)
facecmap = self.axes_fc.cmap
facecolors = plt.get_cmap(facecmap)(facenorm(fc))
else:
facecolors = self.get_facecolor(colorcycler=colorcycler)
if do_zorder:
# LOOP THROUGH POLYGONS so each can be assigned its own zorder
if isinstance(edgecolors, str):
edgecolors = [edgecolors] * len(zorders)
if isinstance(facecolors, str):
facecolors = [facecolors] * len(zorders)
for loop, (polygon, zorder, edgecolor, facecolor) in enumerate(zip(polygons, zorders, edgecolors, facecolors)):
pc = pccall((polygon,),
linestyle=self.linestyle,
edgecolors=edgecolor,
facecolors=facecolor,
zorder=zorder,
label=self.label if loop==0 else None)
ax.add_collection(pc)
return_artists += [pc]
else:
# DON'T LOOP as all have the same zorder, this should be faster
pc = pccall(polygons,
linestyle=self.linestyle,
edgecolors=edgecolors,
facecolors=facecolors,
zorder=zorders,
label=self.label)
ax.add_collection(pc)
return_artists += [pc]
self._backend_objects = return_artists
for artist in return_artists:
callbacks._connect_to_autofig(self, artist)
return return_artists
class CallDimensionGroup(common.Group):
def __init__(self, items):
super(CallDimensionGroup, self).__init__(CallDimension, [], items)
@property
def value(self):
"""
Returns
---------
* (list) a list of <autofig.call.CallDimension.value> for each child
<autofig.call.CallDimension>
"""
return np.array([c.value for c in self._items]).flatten()
@property
def units(self):
"""
"""
return [c.unit for c in self._items]
@property
def unit(self):
units = list(set(self.units))
if len(units) > 1:
raise ValueError("more than 1 units, see units")
else:
return units[0]
@property
def labels(self):
"""
"""
return [c.label for c in self._items]
@property
def label(self):
labels = list(set(self.labels))
if len(labels) > 1:
raise ValueError("more than 1 labels, see labels")
else:
return labels[0]
class CallDimensionCGroup(CallDimensionGroup):
@property
def cmap(self):
"""
Returns
---------
* (list) a list of <autofig.call.CallDimensionC.cmap> for each child
<autofig.call.CallDimensionC>
"""
return self._get_attrs('cmap')
@cmap.setter
def cmap(self, smap):
return self._set_attrs('cmap', cmap)
class CallDimensionSGroup(CallDimensionGroup):
@property
def smap(self):
"""
Returns
---------
* (list) a list of <autofig.call.CallDimensionS.smap> for each child
<autofig.call.CallDimensionS>
"""
return self._get_attrs('smap')
@smap.setter
def smap(self, smap):
return self._set_attrs('smap', smap)
@property
def mode(self):
"""
Returns
---------
* (list) a list of <autofig.call.CallDimensionS.mode> for each child
<autofig.call.CallDimensionS>
"""
return self._get_attrs('mode')
@mode.setter
def mode(self, mode):
return self._set_attrs('mode', mode)
def make_calldimensiongroup(items):
if np.all([isinstance(item, CallDimensionC) for item in items]):
return CallDimensionCGroup(items)
elif np.all([isinstance(item, CallDimensionS) for item in items]):
return CallDimensionSGroup(items)
else:
return CallDimensionGroup(items)
class CallDimension(object):
def __init__(self, direction, call, value, error=None, unit=None, label=None, normals=None):
if isinstance(value, dict):
error = value.get('error', error)
unit = value.get('unit', unit)
label = value.get('label', label)
normals = value.get('normals', normals)
value = value.get('value')
self._call = call
self.direction = direction
# unit must be set before value as setting value pulls the appropriate
# unit for CallDimensionI
self.unit = unit
self.value = value
self.error = error
self.label = label
self.normals = normals
# self.lim = lim
def __repr__(self):
if isinstance(self.value, np.ndarray):
info = "len: {}".format(len(self.value))
else:
info = "value: {}".format(self.value)
return "<{} | {} | type: {} | label: {}>".format(self.direction,
info,
self.unit.physical_type,
self.label)
@classmethod
def from_dict(cls, dict):
return cls(**dict)
def to_dict(self):
return {'direction': self.direction,
'unit': self.unit.to_string(),
'value': common.arraytolistrecursive(self._value),
'error': common.arraytolistrecursive(self._error),
'label': self._label,
'normals': common.arraytolistrecursive(self._normals)}
@property
def call(self):
"""
Returns
---------
* <autofig.call.Call> (<autofig.call.Plot> or <autofig.call.Mesh>): the
parent call object.
"""
return self._call
@property
def direction(self):
"""
Returns
-------------
* (str) one of 'i', 'x', 'y', 'z', 's', 'c'
"""
return self._direction
@direction.setter
def direction(self, direction):
"""
set the direction
"""
if not isinstance(direction, str):
raise TypeError("direction must be of type str")
accepted_values = ['i', 'x', 'y', 'z', 's', 'c']
if direction not in accepted_values:
raise ValueError("must be one of: {}".format(accepted_values))
self._direction = direction
def _to_unit(self, value, unit=None):
if isinstance(value, str):
return value
if value is None:
return value
if unit is not None and unit!=u.dimensionless_unscaled:
unit = common._convert_unit(unit)
value = value*self.unit.to(unit)
return value
def interpolate_at_i(self, i, unit=None):
"""
Access the interpolated value at a given value of `i` (independent-variable).
Arguments
-----------
`i`
`unit` (unit or string, optional, default=None)
Returns
-------------
* (float): the interpolated value
Raises
------------
* ValueError: if there is a lenght mismatch
"""
if isinstance(self.call.i._value, float):
if self.call.i._value==i:
return self._to_unit(self._value, unit)
else:
return None
# we can't call i._value here because that may point to a string, and
# we want this to resolve the array
i_value = self.call.i.get_value(linebreak=False, sort_by_indep=False)
if len(i_value) != len(self._value):
raise ValueError("length mismatch with independent-variable")
sort_inds = i_value.argsort()
indep_value = i_value[sort_inds]
this_value = self._value[sort_inds]
if len(self._value.shape) > 1:
return np.asarray([self._to_unit(np.interp(i, indep_value, this_value_col, left=np.nan, right=np.nan), unit) for this_value_col in this_value.T]).T
return self._to_unit(np.interp(i, indep_value, this_value, left=np.nan, right=np.nan), unit)
def highlight_at_i(self, i, unit=None):
"""
"""
if len(self._value.shape)==1 and isinstance(self.call.i.value, np.ndarray):
return self.interpolate_at_i(i, unit=unit)
else:
return self._to_unit(self._value[self._filter_at_i(i,
uncover=True,
trail=0)].T,
unit)
def _do_linebreak(self, func='get_value', i=None, unit=None,
uncover=None, trail=None, linebreak=None,
sort_by_indep=None):
"""
"""
if linebreak is None:
linebreak = self.linebreak
this_array = getattr(self, func)(i=i,
unit=unit,
uncover=uncover,
trail=trail,
linebreak=False)
if linebreak is False:
return this_array
break_direction = linebreak[0]
# NOTE: we don't need the unit here since we just use it to find
# breakpoints
break_array = getattr(self.call, break_direction).get_value(i=i,
unit=None,
uncover=uncover,
trail=trail,
linebreak=False,
sort_by_indep=sort_by_indep)
if linebreak[1] == '+':
split_inds = np.where(break_array[1:]-break_array[:-1]>0)[0]
elif linebreak[1] == '-':
split_inds = np.where(break_array[1:]-break_array[:-1]<0)[0]
else:
raise NotImplementedError("linebreak='{}' not supported".format(linebreak))
return np.split(this_array, split_inds+1)
def _sort_by_indep(self, func='get_value', i=None, iunit=None, unit=None,
uncover=None, trail=None, linebreak=None,
sort_by_indep=None):
"""
must be called before (or within) _do_linebreak
"""
if sort_by_indep is None:
# TODO: add property of the call?
sort_by_indep = True
indep_array = self.call.i.get_value(i=i,
unit=iunit,
uncover=uncover,
trail=trail,
linebreak=False,
sort_by_indep=False)
this_array = getattr(self, func)(i=i,
unit=unit,
uncover=uncover,
trail=trail,
linebreak=False,
sort_by_indep=False)
if not (isinstance(indep_array, np.ndarray) and len(indep_array)==len(this_array)):
sort_by_indep = False
if sort_by_indep:
# TODO: it might be nice to buffer this at the call level, so making
# multiple get_value calls doesn't have to recompute the sort-order
sort_inds = indep_array.argsort()
return this_array[sort_inds]
else:
return this_array
def _get_trail_min(self, i, trail=None):
trail = self.call.trail if trail is None else trail
# determine length of the trail (if applicable)
if trail is not False:
if trail is True:
# then fallback on 10% default
trail_perc = 0.1
else:
trail_perc = float(trail)
if trail_perc == 0.0:
trail_i = i
else:
all_i = np.hstack(self.call.axes.calls.i.value)
trail_i = i - trail_perc*(np.nanmax(all_i) - np.nanmin(all_i))
if trail_i < np.nanmin(self.call.i.get_value(linebreak=False, sort_by_indep=False)):
# don't allow extraploating below the lower range
trail_i = np.nanmin(self.call.i.get_value(linebreak=False, sort_by_indep=False))
else:
trail_i = None
return trail_i
def _filter_at_i(self, i, uncover=None, trail=None):
uncover = self.call.uncover if uncover is None else uncover
trail = self.call.trail if trail is None else trail
# we can't call i._value here because that may point to a string, and
# we want this to resolve the array
i_value = self.call.i.get_value(linebreak=False, sort_by_indep=False)
if isinstance(i_value, np.ndarray):
trues = np.ones(i_value.shape, dtype=bool)
else:
trues = True
if trail is not False:
trail_i = self._get_trail_min(i=i, trail=trail)
left_filter = i_value >= trail_i - self.call.i.tol
else:
left_filter = trues
if uncover is not False:
right_filter = i_value <= i + self.call.i.tol
else:
right_filter = trues
return (left_filter & right_filter)
def get_value(self, i=None, unit=None,
uncover=None, trail=None,
linebreak=None, sort_by_indep=None,
exclude_back=False,
attr='_value'):
"""
Access the value for a given value of `i` (independent-variable) depending
on which effects (i.e. uncover) are enabled.
If `uncover`, `trail`, or `linebreak` are None (default), then the value from
the parent <autofig.call.Call> from <autofig.call.CallDimension.call>
(probably (<autofig.call.Plot>) will be used. See <autofig.call.Plot.uncover>,
<autofig.call.Plot.trail>, <autofig.call.Plot.linebreak>.
Arguments
-----------
* `i`
* `unit`
* `uncover`
* `trail`
* `linebreak`
* `sort_by_indep`
* `exclude_back`
* `attr`
Returns
----------
* (array or None)
"""
value = getattr(self, attr) # could be self._value or self._error
if value is None:
return None
if uncover is None:
uncover = self.call.uncover
if trail is None:
trail = self.call.trail
if linebreak is None:
linebreak = self.call.linebreak
if sort_by_indep is None:
# TODO: make this a property of the call?
sort_by_indep = True
if isinstance(value, str) or isinstance(value, float):
if i is None:
return self._to_unit(value, unit)
elif isinstance(self.call.i.value, float):
# then we still want to "select" based on the value of i
if self._filter_at_i(i):
return value
else:
return None
else:
# then we should show either way. For example - a color or
# axhline even with i given won't change in i
return self._to_unit(value, unit)
if isinstance(value, list) or isinstance(value, tuple):
value = np.asarray(value)
# from here on we're assuming the value is an array, so let's just check
# to be sure
if not isinstance(value, np.ndarray):
raise NotImplementedError("value/error must be a numpy array")
if exclude_back and self.call.z.normals is not None and self.call.axes.projection == '2d':
value = value[self.call.z.normals >= 0]
if linebreak is not False:
return self._do_linebreak(func='get{}'.format(attr),
i=i,
unit=unit,
uncover=uncover,
trail=trail,
linebreak=linebreak,
sort_by_indep=sort_by_indep)
if sort_by_indep is not False:
# if we've made it here, linebreak should already be False (if
# linebreak was True, then we'd be within _do_linebreak and those
# get_value calls pass linebreak=False)
return self._sort_by_indep(func='get{}'.format(attr),
i=i,
unit=unit,
uncover=uncover,
trail=trail,
linebreak=False,
sort_by_indep=sort_by_indep)
# from here on, linebreak==False and sort_by_indep==False (if either
# were True, then we're within those functions and asking for the original
# array)
if i is None:
if len(value.shape)==1:
return self._to_unit(value, unit)
else:
if isinstance(self.call, Plot):
return self._to_unit(value.T, unit)
else:
return self._to_unit(value, unit)
# filter the data as necessary
filter_ = self._filter_at_i(i, uncover=uncover, trail=trail)
if isinstance(self.call.i.value, float):
if filter_:
return self._to_unit(value, unit)
else:
return None
if len(value.shape)==1 or isinstance(self.call, FillBetween):
# then we're dealing with a flat 1D array
if attr == '_value':
if trail is not False:
trail_i = self._get_trail_min(i)
first_point = self.interpolate_at_i(trail_i)
if uncover:
last_point = self.interpolate_at_i(i)
else:
first_point = np.nan
last_point = np.nan
if uncover and trail is not False:
concat = (np.array([first_point]),
value[filter_],
np.array([last_point]))
elif uncover:
concat = (value[filter_],
np.array([last_point]))
elif trail:
concat = (np.array([first_point]),
value[filter_])
else:
return self._to_unit(value[filter_], unit)
return self._to_unit(np.concatenate(concat), unit)
else:
# then we need to "select" based on the indep and the value
if isinstance(self.call, Plot):
return self._to_unit(value[filter_].T, unit)
else:
return self._to_unit(value[filter_], unit)
# for value we need to define the property without decorators because of
# this: https://stackoverflow.com/questions/13595607/using-super-in-a-propertys-setter-method-when-using-the-property-decorator-r
# and the need to override these in the CallDimensionI class
def _get_value(self):
"""
access the value
"""
return self.get_value(i=None, unit=None)
def _set_value(self, value):
"""
set the value
"""
if value is None:
self._value = value
return
# handle casting to acceptable types
if isinstance(value, list) or isinstance(value, tuple):
value = np.asarray(value)
elif isinstance(value, int):
value = float(value)
if isinstance(value, u.Quantity):
if self.unit == u.dimensionless_unscaled:
# then take the unit from quantity and apply it
self.unit = value.unit
value = value.value
else:
# then convert to the requested unit
value = value.to(self.unit).value
# handle setting based on type
if isinstance(value, np.ndarray):
# if len(value.shape) != 1:
# raise ValueError("value must be a flat array")
self._value = value
elif isinstance(value, float):
# TODO: do we want to cast to np.array([value])??
# this will most likely be used for axhline/axvline
self._value = value
elif self.direction=='c' and isinstance(value, str):
self._value = common.coloralias.map(value)
else:
raise TypeError("value must be of type array (or similar), found {} {}".format(type(value), value))
value = property(_get_value, _set_value)
def get_error(self, i=None, unit=None,
uncover=None, trail=None,
linebreak=None, sort_by_indep=None):
"""
access the error for a given value of i (independent-variable) depending
on which effects (i.e. uncover) are enabled.
"""
return self.get_value(i=i, unit=unit,
uncover=uncover, trail=trail,
linebreak=linebreak, sort_by_indep=sort_by_indep,
attr='_error')
@property
def error(self):
"""
access the error
"""
return self._error
@error.setter
def error(self, error):
"""
set the error
"""
# TODO: check length with value?
# TODO: type checks (similar to value)
if self.direction not in ['x', 'y', 'z'] and error is not None:
raise ValueError("error only accepted for x, y, z dimensions")
if isinstance(error, u.Quantity):
error = error.to(self.unit).value
if isinstance(error, list) or isinstance(error, tuple):
error = np.asarray(error)
self._error = error
@property
def unit(self):
"""
access the unit
"""
return self._unit
@unit.setter
def unit(self, unit):
"""
set the unit
"""
unit = common._convert_unit(unit)
self._unit = unit
@property
def label(self):
"""
access the label
"""
return self._label
@label.setter
def label(self, label):
"""
set the label
"""
if self.direction in ['i'] and label is not None:
raise ValueError("label not accepted for indep dimension")
if label is None:
self._label = label
return
if not isinstance(label, str):
try:
label = str(label)
except:
raise TypeError("label must be of type str")
self._label = label
@property
def normals(self):
"""
access the normals
"""
return self._normals
@normals.setter
def normals(self, normals):
"""
set the normals
"""
if self.direction not in ['x', 'y', 'z'] and normals is not None:
raise ValueError("normals only accepted for x, y, z dimensions")
if normals is None:
self._normals = None
return
if not (isinstance(normals, list) or isinstance(normals, np.ndarray)):
raise TypeError("normals must be of type list or array")
self._normals = normals
class CallDimensionI(CallDimension):
def __init__(self, call, value, unit, tol):
if isinstance(value, dict):
tol = value.get('tol', tol)
self.tol = tol
super(CallDimensionI, self).__init__('i', call, value, unit)
@classmethod
def from_dict(cls, dict):
return cls(**dict)
def to_dict(self):
return {'direction': self.direction,
'unit': self.unit.to_string(),
'value': common.arraytolistrecursive(self._value),
'tol': self._tol}
@property
def tol(self):
"""
Returns
-----------
* (float) tolerance to use when selecting/uncover/trail
"""
if self._tol is None:
return 0.0
return self._tol
@tol.setter
def tol(self, tol):
if not isinstance(tol, float):
raise TypeError("tol must be of type float")
# TODO: handle units?
self._tol = tol
@property
def value(self):
"""
access the value
"""
if isinstance(self._value, str):
dimension = self._value
return getattr(self.call, dimension).value
return super(CallDimensionI, self)._get_value()
@value.setter
def value(self, value):
"""
set the value
"""
# for the indep direction we also allow a string which points to one
# of the other available dimensions
# TODO: support c, fc, ec?
if isinstance(value, common.basestring) and value in ['x', 'y', 'z']:
# we'll cast just to get rid of any python2 unicodes
self._value = str(value)
dimension = value
self._unit = getattr(self.call, dimension).unit
return
# NOTE: cannot do super on setter directly, see this python
# bug: https://bugs.python.org/issue14965 and discussion:
# https://mail.python.org/pipermail/python-dev/2010-April/099672.html
super(CallDimensionI, self)._set_value(value)
def get_value(self, *args, **kwargs):
if isinstance(self._value, str):
dimension = self._value
return getattr(self.call, dimension).get_value(*args, **kwargs)
return super(CallDimensionI, self).get_value(*args, **kwargs)
@property
def is_reference(self):
"""
whether referencing another dimension or its own
"""
return isinstance(self._value, str)
@property
def reference(self):
"""
reference (will return None if not is_reference)
"""
if self.is_reference:
return self._value
else:
return None
class CallDimensionX(CallDimension):
def __init__(self, *args):
super(CallDimensionX, self).__init__('x', *args)
class CallDimensionY(CallDimension):
def __init__(self, *args):
super(CallDimensionY, self).__init__('y', *args)
class CallDimensionZ(CallDimension):
def __init__(self, *args):
super(CallDimensionZ, self).__init__('z', *args)
class CallDimensionS(CallDimension):
def __init__(self, call, value, error=None, unit=None, label=None,
smap=None, mode=None):
if isinstance(value, dict):
error = value.get('error', error)
smap = value.get('smap', smap)
mode = value.get('mode', mode)
if error is not None:
raise ValueError("error not supported for 's' dimension")
self.smap = smap
self.mode = mode
super(CallDimensionS, self).__init__('s', call, value, error, unit,
label)
@classmethod
def from_dict(cls, dict):
return cls(**dict)
def to_dict(self):
return {'direction': self.direction,
'unit': self.unit.to_string(),
'value': common.arraytolistrecursive(self._value),
'error': common.arraytolistrecursive(self._error),
'label': self._label,
'smap': self._smap,
'mode': self._mode}
@property
def smap(self):
return self._smap
@smap.setter
def smap(self, smap):
if smap is None:
self._smap = smap
return
if not isinstance(smap, tuple):
try:
smap = tuple(smap)
except:
raise TypeError('smap must be of type tuple')
if not len(smap)==2:
raise ValueError('smap must have length 2')
self._smap = smap
def _mode_split(self, mode=None):
if mode is None:
mode = self.mode
split = mode.split(':')
mode_dims = split[0]
mode_obj = split[1] if len(split) > 1 else 'axes'
mode_mode = split[2] if len(split) > 2 else 'fixed'
return mode_dims, mode_obj, mode_mode
@property
def mode(self):
if self._mode is None:
return 'xy:figure:fixed'
return self._mode
@mode.setter
def mode(self, mode):
if mode is None:
self._mode = None
return
if not isinstance(mode, str):
raise TypeError("mode must be of type str")
split = mode.split(':')
mode_dims, mode_obj, mode_mode = self._mode_split(mode)
if len(split) > 3:
raise ValueError("mode not recognized")
if mode_dims == 'pt' and len(split) > 1:
raise ValueError("mode not recognized")
if mode_dims not in ['x', 'y', 'xy', 'pt']:
raise ValueError("mode not recognized")
if mode_obj not in ['axes', 'figure']:
raise ValueError("mode not recognized")
if mode_mode not in ['fixed', 'current', 'original']:
raise ValueError("mode not recognized")
if mode_dims == 'pt':
self._mode = mode
else:
self._mode = '{}:{}:{}'.format(mode_dims, mode_obj, mode_mode)
class CallDimensionC(CallDimension):
def __init__(self, call, value, error=None, unit=None, label=None, cmap=None):
if isinstance(value, dict):
error = value.get('error', error)
cmap = value.get('cmap', cmap)
if error is not None:
raise ValueError("error not supported for 'c' dimension")
self.cmap = cmap
super(CallDimensionC, self).__init__('c', call, value, error, unit,
label)
@classmethod
def from_dict(cls, dict):
return cls(**dict)
def to_dict(self):
return {'direction': self.direction,
'unit': self.unit.to_string(),
'value': common.arraytolistrecursive(self._value),
'error': common.arraytolistrecursive(self._error),
'label': self._label,
'cmap': self._cmap}
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
# print("setting call cmap: {}".format(cmap))
try:
cmap_ = plt.get_cmap(cmap)
except:
raise TypeError("could not find cmap")
self._cmap = cmap | unknown | codeparrot/codeparrot-clean | ||
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/platform/platform.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_conversion.h>
#include <cutlass/numeric_types.h>
#include <cutlass/arch/memory_sm75.h>
#include <cutlass/arch/mma_sm75.h>
#include <cutlass/arch/mma_sm80.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/gemm/warp/mma.h>
#include <cutlass/gemm/warp/mma_tensor_op_policy.h>
#include <cutlass/gemm/warp/mma_tensor_op_tile_iterator.h>
#include <cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template<
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Instruction shape to override shared memory iterators with
typename SharedMemoryInstructionShape_,
/// Number of partitions along K dimension
int PartitionsK_ = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Used for partial specialization
typename Enable = bool>
class MmaTensorOpComputeBWithF16 {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
static_assert((platform::is_same<typename ArchMmaOperator::ElementA, half_t>::value
&& platform::is_same<typename ArchMmaOperator::ElementB, half_t>::value)
|| (platform::is_same<typename ArchMmaOperator::ElementA, bfloat16_t>::value
&& platform::is_same<typename ArchMmaOperator::ElementB, bfloat16_t>::value
&& ArchTag::kMinComputeCapability >= 80),
"MmaTensorOpCvtBToA only supports underlying HMMA");
static_assert(platform::is_same<ElementA, half_t>::value
|| (platform::is_same<ElementA, bfloat16_t>::value && ArchTag::kMinComputeCapability >= 80),
"MmaTensorOpCvtBToA only supports Fp16 A or Bf16 A on Ampere+");
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Instruction shape to override shared memory iterators with
using SharedMemoryInstructionShape = SharedMemoryInstructionShape_;
static_assert(SharedMemoryInstructionShape::kM == InstructionShape::kM,
"M dimension of compute instruction must match load");
static_assert(SharedMemoryInstructionShape::kN == InstructionShape::kN,
"N dimension of compute instruction must match load");
static constexpr int kExpansionFactor = SharedMemoryInstructionShape::kK / InstructionShape::kK;
static_assert(!(Shape::kK % SharedMemoryInstructionShape::kK), "");
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
Policy::OpDelta::kRow,
kThreadCount,
kPartitionsK>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = Array<typename ArchMmaOperator::ElementA, FragmentA::kElements>;
/// Iterates over the B operand in memory
using IteratorB =
MmaTensorOpMultiplicandTileIterator<MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<SharedMemoryInstructionShape::kK, InstructionShape::kN>,
Policy::OpDelta::kRow,
kThreadCount,
kPartitionsK>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = Array<typename ArchMmaOperator::ElementB, FragmentB::kElements>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
/// Number of mma operations performed
using MmaIterations = MatrixShape<(Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM,
(Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN>;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaTensorOpComputeBWithF16() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(FragmentC& D,
TransformedFragmentA const& A,
TransformedFragmentB const& B,
FragmentC const& C,
const int warp_tileB_k_offset) const
{
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(
TransformedFragmentB::kElements == MmaOperandB::kElements * kExpansionFactor * MmaIterations::kColumn,
"Each thread should have a pack of mma registers for each column iteration AND for the expanded K dim of B");
D = C;
MmaOperandA const* ptr_A = reinterpret_cast<MmaOperandA const*>(&A);
MmaOperandB const* ptr_B = reinterpret_cast<MmaOperandB const*>(&B);
MmaOperandC* ptr_D = reinterpret_cast<MmaOperandC*>(&D);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)
// Serpentine visitation order maximizing reuse of Rb
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
int m_serpentine = ((n % 2) ? (MmaIterations::kRow - 1 - m) : m);
int n_offsetB = warp_tileB_k_offset + kExpansionFactor * n;
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(ptr_D[n + m_serpentine * MmaIterations::kColumn],
ptr_A[m_serpentine],
ptr_B[n_offsetB],
ptr_D[n + m_serpentine * MmaIterations::kColumn]);
}
else {
mma(ptr_D[m_serpentine + n * MmaIterations::kRow],
ptr_A[m_serpentine],
ptr_B[n_offsetB],
ptr_D[m_serpentine + n * MmaIterations::kRow]);
}
}
}
#elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
// Serpentine visitation order maximizing reuse of Ra
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n);
int n_serpentine_offsetB = warp_tileB_k_offset + kExpansionFactor * n_serpentine;
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(ptr_D[n_serpentine + m * MmaIterations::kColumn],
ptr_A[m],
ptr_B[n_serpentine_offsetB],
ptr_D[n_serpentine + m * MmaIterations::kColumn]);
}
else {
mma(ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_A[m],
ptr_B[n_serpentine_offsetB],
ptr_D[m + n_serpentine * MmaIterations::kRow]);
}
}
}
#else
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////// | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/cuda/cutlass_extensions/gemm/warp/mma_tensorop_compute_B_with_f16.h |
//// [tests/cases/conformance/es6/variableDeclarations/VariableDeclaration1_es6.ts] ////
//// [VariableDeclaration1_es6.ts]
const
//// [VariableDeclaration1_es6.js]
"use strict";
const ; | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/VariableDeclaration1_es6.js |
import hotshot
import hotshot.log
import os
import pprint
import unittest
from test import test_support
from hotshot.log import ENTER, EXIT, LINE
def shortfilename(fn):
# We use a really shortened filename since an exact match is made,
# and the source may be either a Python source file or a
# pre-compiled bytecode file.
if fn:
return os.path.splitext(os.path.basename(fn))[0]
else:
return fn
class UnlinkingLogReader(hotshot.log.LogReader):
"""Extend the LogReader so the log file is unlinked when we're
done with it."""
def __init__(self, logfn):
self.__logfn = logfn
hotshot.log.LogReader.__init__(self, logfn)
def next(self, index=None):
try:
return hotshot.log.LogReader.next(self)
except StopIteration:
self.close()
os.unlink(self.__logfn)
raise
class HotShotTestCase(unittest.TestCase):
def new_profiler(self, lineevents=0, linetimings=1):
self.logfn = test_support.TESTFN
return hotshot.Profile(self.logfn, lineevents, linetimings)
def get_logreader(self):
return UnlinkingLogReader(self.logfn)
def get_events_wotime(self):
L = []
for event in self.get_logreader():
what, (filename, lineno, funcname), tdelta = event
L.append((what, (shortfilename(filename), lineno, funcname)))
return L
def check_events(self, expected):
events = self.get_events_wotime()
if events != expected:
self.fail(
"events did not match expectation; got:\n%s\nexpected:\n%s"
% (pprint.pformat(events), pprint.pformat(expected)))
def run_test(self, callable, events, profiler=None):
if profiler is None:
profiler = self.new_profiler()
self.failUnless(not profiler._prof.closed)
profiler.runcall(callable)
self.failUnless(not profiler._prof.closed)
profiler.close()
self.failUnless(profiler._prof.closed)
self.check_events(events)
def test_addinfo(self):
def f(p):
p.addinfo("test-key", "test-value")
profiler = self.new_profiler()
profiler.runcall(f, profiler)
profiler.close()
log = self.get_logreader()
info = log._info
list(log)
self.failUnless(info["test-key"] == ["test-value"])
def test_line_numbers(self):
def f():
y = 2
x = 1
def g():
f()
f_lineno = f.func_code.co_firstlineno
g_lineno = g.func_code.co_firstlineno
events = [(ENTER, ("test_hotshot", g_lineno, "g")),
(LINE, ("test_hotshot", g_lineno+1, "g")),
(ENTER, ("test_hotshot", f_lineno, "f")),
(LINE, ("test_hotshot", f_lineno+1, "f")),
(LINE, ("test_hotshot", f_lineno+2, "f")),
(EXIT, ("test_hotshot", f_lineno, "f")),
(EXIT, ("test_hotshot", g_lineno, "g")),
]
self.run_test(g, events, self.new_profiler(lineevents=1))
def test_start_stop(self):
# Make sure we don't return NULL in the start() and stop()
# methods when there isn't an error. Bug in 2.2 noted by
# Anthony Baxter.
profiler = self.new_profiler()
profiler.start()
profiler.stop()
profiler.close()
os.unlink(self.logfn)
def test_main():
test_support.run_unittest(HotShotTestCase)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2011, CloudCaptive
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import wsgiref.handlers
import cgi
from google.appengine.ext import webapp
import logging
from serverside import constants
class NotFound(webapp.RequestHandler):
def get(self):
self.redirect('/html/404.html')
application = webapp.WSGIApplication([
('/.*', NotFound),
], debug=constants.DEBUG)
def main():
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import time
import logging
from boto3.session import Session
from helpers.sit_helper import SITHelper
from helpers.log import Log
class CFHelper(object):
FAILED_STATES = ['CREATE_FAILED', 'DELETE_FAILED', 'DELETE_COMPLETE']
COMPLETE_STATES = ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
def __init__(self, configs_directory=None, session=None):
if session is None:
sit_configs = SITHelper(configs_directory).get_configs('sit')
session = Session(profile_name=sit_configs['profile_name'])
self.cf_client = session.client('cloudformation')
def validate_template(self, template_body):
logging.info('Validating template')
try:
self.cf_client.validate_template(TemplateBody=template_body)
except Exception as e:
Log.error('stack validation error', e)
def create_stack(self, stack_name, template_body, tag_value):
logging.info('Creating stack: {0}'.format(stack_name))
try:
self.cf_client.create_stack(
StackName=stack_name,
TemplateBody=template_body,
OnFailure='DELETE',
Capabilities=['CAPABILITY_IAM'],
Tags=[
{
'Key': 'Name',
'Value': tag_value
}
]
)['StackId']
except Exception as e:
Log.error('Failed to create stack {0}'.format(stack_name), e)
def update_stack(self, stack_name, template_body, tag_value):
logging.info('Updating stack: {0}'.format(stack_name))
try:
self.cf_client.update_stack(
StackName=stack_name,
TemplateBody=template_body,
Capabilities=['CAPABILITY_IAM'],
Tags=[
{
'Key': 'Name',
'Value': tag_value
}
]
)['StackId']
except Exception as e:
Log.error('Failed to update stack {0}'.format(stack_name), e)
def stack_exists(self, stack_name):
return self.get_stack_info(stack_name)
def stack_was_created_successfully(self, stack_name, attempt=1, sleep_time=20):
if attempt > 25:
logging.info('Stack was not created/updated in the alotted time')
return False
try:
stack_info = self.get_stack_info(stack_name)
stack_status = stack_info['StackStatus']
if stack_status in self.COMPLETE_STATES:
return True
if stack_status in self.FAILED_STATES:
return False
except Exception as e:
logging.info('There was a problem checking status of stack: {0}'.format(e))
logging.info('Stack creation/update still in progress. Waiting {0} seconds'.format(sleep_time))
time.sleep(sleep_time)
return self.stack_was_created_successfully(stack_name, attempt+1)
def get_stack_info(self, stack_name):
try:
return self.cf_client.describe_stacks(StackName=stack_name)['Stacks'][0]
except Exception as e:
logging.info('stack info not found for: {0}. Error: {1}'.format(stack_name, e))
return False
def get_resource_name(self, stack_name, logical_name):
try:
return self.cf_client.describe_stack_resource(
StackName=stack_name,
LogicalResourceId=logical_name
)['StackResourceDetail']['PhysicalResourceId']
except Exception as e:
Log.error('resource {0} in stack {1} not found'.format(logical_name, stack_name), e) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "test_helper"
require_relative "common"
class InlineAdapterTest < ActionCable::TestCase
include CommonSubscriptionAdapterTest
def setup
super
@tx_adapter.shutdown
@tx_adapter = @rx_adapter
end
def cable_config
{ adapter: "inline" }
end
end | ruby | github | https://github.com/rails/rails | actioncable/test/subscription_adapter/inline_test.rb |
import unittest
from scrapy.http import Request
from scrapy.spider import Spider
from scrapy.utils.reqser import request_to_dict, request_from_dict
class RequestSerializationTest(unittest.TestCase):
def setUp(self):
self.spider = TestSpider()
def test_basic(self):
r = Request("http://www.example.com")
self._assert_serializes_ok(r)
def test_all_attributes(self):
r = Request("http://www.example.com",
callback='parse_item',
errback='handle_error',
method="POST",
body="some body",
headers={'content-encoding': 'text/html; charset=latin-1'},
cookies={'currency': 'usd'},
encoding='latin-1',
priority=20,
meta={'a': 'b'})
self._assert_serializes_ok(r)
def test_latin1_body(self):
r = Request("http://www.example.com", body="\xa3")
self._assert_serializes_ok(r)
def test_utf8_body(self):
r = Request("http://www.example.com", body="\xc2\xa3")
self._assert_serializes_ok(r)
def _assert_serializes_ok(self, request, spider=None):
d = request_to_dict(request, spider=spider)
request2 = request_from_dict(d, spider=spider)
self._assert_same_request(request, request2)
def _assert_same_request(self, r1, r2):
self.assertEqual(r1.url, r2.url)
self.assertEqual(r1.callback, r2.callback)
self.assertEqual(r1.errback, r2.errback)
self.assertEqual(r1.method, r2.method)
self.assertEqual(r1.body, r2.body)
self.assertEqual(r1.headers, r2.headers)
self.assertEqual(r1.cookies, r2.cookies)
self.assertEqual(r1.meta, r2.meta)
self.assertEqual(r1._encoding, r2._encoding)
self.assertEqual(r1.priority, r2.priority)
self.assertEqual(r1.dont_filter, r2.dont_filter)
def test_callback_serialization(self):
r = Request("http://www.example.com", callback=self.spider.parse_item, \
errback=self.spider.handle_error)
self._assert_serializes_ok(r, spider=self.spider)
def test_unserializable_callback1(self):
r = Request("http://www.example.com", callback=lambda x: x)
self.assertRaises(ValueError, request_to_dict, r)
self.assertRaises(ValueError, request_to_dict, r, spider=self.spider)
def test_unserializable_callback2(self):
r = Request("http://www.example.com", callback=self.spider.parse_item)
self.assertRaises(ValueError, request_to_dict, r)
class TestSpider(Spider):
name = 'test'
def parse_item(self, response):
pass
def handle_error(self, failure):
pass | unknown | codeparrot/codeparrot-clean | ||
/*
* PSA hashing layer on top of Mbed TLS software crypto
*/
/*
* Copyright The Mbed TLS Contributors
* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
*/
#ifndef PSA_CRYPTO_HASH_H
#define PSA_CRYPTO_HASH_H
#include <psa/crypto.h>
/** Calculate the hash (digest) of a message using Mbed TLS routines.
*
* \note The signature of this function is that of a PSA driver hash_compute
* entry point. This function behaves as a hash_compute entry point as
* defined in the PSA driver interface specification for transparent
* drivers.
*
* \param alg The hash algorithm to compute (\c PSA_ALG_XXX value
* such that #PSA_ALG_IS_HASH(\p alg) is true).
* \param[in] input Buffer containing the message to hash.
* \param input_length Size of the \p input buffer in bytes.
* \param[out] hash Buffer where the hash is to be written.
* \param hash_size Size of the \p hash buffer in bytes.
* \param[out] hash_length On success, the number of bytes
* that make up the hash value. This is always
* #PSA_HASH_LENGTH(\p alg).
*
* \retval #PSA_SUCCESS
* Success.
* \retval #PSA_ERROR_NOT_SUPPORTED
* \p alg is not supported
* \retval #PSA_ERROR_BUFFER_TOO_SMALL
* \p hash_size is too small
* \retval #PSA_ERROR_INSUFFICIENT_MEMORY \emptydescription
* \retval #PSA_ERROR_CORRUPTION_DETECTED \emptydescription
*/
psa_status_t mbedtls_psa_hash_compute(
psa_algorithm_t alg,
const uint8_t *input,
size_t input_length,
uint8_t *hash,
size_t hash_size,
size_t *hash_length);
/** Set up a multipart hash operation using Mbed TLS routines.
*
* \note The signature of this function is that of a PSA driver hash_setup
* entry point. This function behaves as a hash_setup entry point as
* defined in the PSA driver interface specification for transparent
* drivers.
*
* If an error occurs at any step after a call to mbedtls_psa_hash_setup(), the
* operation will need to be reset by a call to mbedtls_psa_hash_abort(). The
* core may call mbedtls_psa_hash_abort() at any time after the operation
* has been initialized.
*
* After a successful call to mbedtls_psa_hash_setup(), the core must
* eventually terminate the operation. The following events terminate an
* operation:
* - A successful call to mbedtls_psa_hash_finish() or mbedtls_psa_hash_verify().
* - A call to mbedtls_psa_hash_abort().
*
* \param[in,out] operation The operation object to set up. It must have
* been initialized to all-zero and not yet be in use.
* \param alg The hash algorithm to compute (\c PSA_ALG_XXX value
* such that #PSA_ALG_IS_HASH(\p alg) is true).
*
* \retval #PSA_SUCCESS
* Success.
* \retval #PSA_ERROR_NOT_SUPPORTED
* \p alg is not supported
* \retval #PSA_ERROR_BAD_STATE
* The operation state is not valid (it must be inactive).
* \retval #PSA_ERROR_INSUFFICIENT_MEMORY \emptydescription
* \retval #PSA_ERROR_CORRUPTION_DETECTED \emptydescription
*/
psa_status_t mbedtls_psa_hash_setup(
mbedtls_psa_hash_operation_t *operation,
psa_algorithm_t alg);
/** Clone an Mbed TLS hash operation.
*
* \note The signature of this function is that of a PSA driver hash_clone
* entry point. This function behaves as a hash_clone entry point as
* defined in the PSA driver interface specification for transparent
* drivers.
*
* This function copies the state of an ongoing hash operation to
* a new operation object. In other words, this function is equivalent
* to calling mbedtls_psa_hash_setup() on \p target_operation with the same
* algorithm that \p source_operation was set up for, then
* mbedtls_psa_hash_update() on \p target_operation with the same input that
* that was passed to \p source_operation. After this function returns, the
* two objects are independent, i.e. subsequent calls involving one of
* the objects do not affect the other object.
*
* \param[in] source_operation The active hash operation to clone.
* \param[in,out] target_operation The operation object to set up.
* It must be initialized but not active.
*
* \retval #PSA_SUCCESS \emptydescription
* \retval #PSA_ERROR_BAD_STATE
* The \p source_operation state is not valid (it must be active).
* \retval #PSA_ERROR_BAD_STATE
* The \p target_operation state is not valid (it must be inactive).
* \retval #PSA_ERROR_CORRUPTION_DETECTED \emptydescription
* \retval #PSA_ERROR_INSUFFICIENT_MEMORY \emptydescription
*/
psa_status_t mbedtls_psa_hash_clone(
const mbedtls_psa_hash_operation_t *source_operation,
mbedtls_psa_hash_operation_t *target_operation);
/** Add a message fragment to a multipart Mbed TLS hash operation.
*
* \note The signature of this function is that of a PSA driver hash_update
* entry point. This function behaves as a hash_update entry point as
* defined in the PSA driver interface specification for transparent
* drivers.
*
* The application must call mbedtls_psa_hash_setup() before calling this function.
*
* If this function returns an error status, the operation enters an error
* state and must be aborted by calling mbedtls_psa_hash_abort().
*
* \param[in,out] operation Active hash operation.
* \param[in] input Buffer containing the message fragment to hash.
* \param input_length Size of the \p input buffer in bytes.
*
* \retval #PSA_SUCCESS
* Success.
* \retval #PSA_ERROR_BAD_STATE
* The operation state is not valid (it must be active).
* \retval #PSA_ERROR_INSUFFICIENT_MEMORY \emptydescription
* \retval #PSA_ERROR_CORRUPTION_DETECTED \emptydescription
*/
psa_status_t mbedtls_psa_hash_update(
mbedtls_psa_hash_operation_t *operation,
const uint8_t *input,
size_t input_length);
/** Finish the calculation of the Mbed TLS-calculated hash of a message.
*
* \note The signature of this function is that of a PSA driver hash_finish
* entry point. This function behaves as a hash_finish entry point as
* defined in the PSA driver interface specification for transparent
* drivers.
*
* The application must call mbedtls_psa_hash_setup() before calling this function.
* This function calculates the hash of the message formed by concatenating
* the inputs passed to preceding calls to mbedtls_psa_hash_update().
*
* When this function returns successfully, the operation becomes inactive.
* If this function returns an error status, the operation enters an error
* state and must be aborted by calling mbedtls_psa_hash_abort().
*
* \param[in,out] operation Active hash operation.
* \param[out] hash Buffer where the hash is to be written.
* \param hash_size Size of the \p hash buffer in bytes.
* \param[out] hash_length On success, the number of bytes
* that make up the hash value. This is always
* #PSA_HASH_LENGTH(\c alg) where \c alg is the
* hash algorithm that is calculated.
*
* \retval #PSA_SUCCESS
* Success.
* \retval #PSA_ERROR_BAD_STATE
* The operation state is not valid (it must be active).
* \retval #PSA_ERROR_BUFFER_TOO_SMALL
* The size of the \p hash buffer is too small. You can determine a
* sufficient buffer size by calling #PSA_HASH_LENGTH(\c alg)
* where \c alg is the hash algorithm that is calculated.
* \retval #PSA_ERROR_INSUFFICIENT_MEMORY \emptydescription
* \retval #PSA_ERROR_CORRUPTION_DETECTED \emptydescription
*/
psa_status_t mbedtls_psa_hash_finish(
mbedtls_psa_hash_operation_t *operation,
uint8_t *hash,
size_t hash_size,
size_t *hash_length);
/** Abort an Mbed TLS hash operation.
*
* \note The signature of this function is that of a PSA driver hash_abort
* entry point. This function behaves as a hash_abort entry point as
* defined in the PSA driver interface specification for transparent
* drivers.
*
* Aborting an operation frees all associated resources except for the
* \p operation structure itself. Once aborted, the operation object
* can be reused for another operation by calling
* mbedtls_psa_hash_setup() again.
*
* You may call this function any time after the operation object has
* been initialized by one of the methods described in #psa_hash_operation_t.
*
* In particular, calling mbedtls_psa_hash_abort() after the operation has been
* terminated by a call to mbedtls_psa_hash_abort(), mbedtls_psa_hash_finish() or
* mbedtls_psa_hash_verify() is safe and has no effect.
*
* \param[in,out] operation Initialized hash operation.
*
* \retval #PSA_SUCCESS \emptydescription
* \retval #PSA_ERROR_CORRUPTION_DETECTED \emptydescription
*/
psa_status_t mbedtls_psa_hash_abort(
mbedtls_psa_hash_operation_t *operation);
#endif /* PSA_CRYPTO_HASH_H */ | c | github | https://github.com/nodejs/node | deps/LIEF/third-party/mbedtls/library/psa_crypto_hash.h |
#!/usr/bin/env python
#
# This example introduces the concepts of user interaction with VTK.
# First, a different interaction style (than the default) is defined.
# Second, the interaction is started.
#
#
import vtk
#
# Next we create an instance of vtkConeSource and set some of its
# properties. The instance of vtkConeSource "cone" is part of a visualization
# pipeline (it is a source process object); it produces data (output type is
# vtkPolyData) which other filters may process.
#
cone = vtk.vtkConeSource()
cone.SetHeight( 3.0 )
cone.SetRadius( 1.0 )
cone.SetResolution( 10 )
#
# In this example we terminate the pipeline with a mapper process object.
# (Intermediate filters such as vtkShrinkPolyData could be inserted in
# between the source and the mapper.) We create an instance of
# vtkPolyDataMapper to map the polygonal data into graphics primitives. We
# connect the output of the cone souece to the input of this mapper.
#
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
#
# Create an actor to represent the cone. The actor orchestrates rendering of
# the mapper's graphics primitives. An actor also refers to properties via a
# vtkProperty instance, and includes an internal transformation matrix. We
# set this actor's mapper to be coneMapper which we created above.
#
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
#
# Create the Renderer and assign actors to it. A renderer is like a
# viewport. It is part or all of a window on the screen and it is responsible
# for drawing the actors it has. We also set the background color here.
#
ren1 = vtk.vtkRenderer()
ren1.AddActor(coneActor)
ren1.SetBackground(0.1, 0.2, 0.4)
#
# Finally we create the render window which will show up on the screen
# We put our renderer into the render window using AddRenderer. We also
# set the size to be 300 pixels by 300.
#
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(300, 300)
#
# The vtkRenderWindowInteractor class watches for events (e.g., keypress,
# mouse) in the vtkRenderWindow. These events are translated into
# event invocations that VTK understands (see VTK/Common/vtkCommand.h
# for all events that VTK processes). Then observers of these VTK
# events can process them as appropriate.
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
#
# By default the vtkRenderWindowInteractor instantiates an instance
# of vtkInteractorStyle. vtkInteractorStyle translates a set of events
# it observes into operations on the camera, actors, and/or properties
# in the vtkRenderWindow associated with the vtkRenderWinodwInteractor.
# Here we specify a particular interactor style.
style = vtk.vtkInteractorStyleTrackballCamera()
iren.SetInteractorStyle(style)
#
# Unlike the previous scripts where we performed some operations and then
# exited, here we leave an event loop running. The user can use the mouse
# and keyboard to perform the operations on the scene according to the
# current interaction style.
#
#
# Initialize and start the event loop. Once the render window appears, mouse
# in the window to move the camera. The Start() method executes an event
# loop which listens to user mouse and keyboard events. Note that keypress-e
# exits the event loop. (Look in vtkInteractorStyle.h for a summary of events, or
# the appropriate Doxygen documentation.)
#
iren.Initialize()
iren.Start() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
'''
Created on Jul 12, 2011
@author: rafik
'''
from gnuradio import gr, gru, modulation_utils
from gnuradio import usrp
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import random, time, struct, sys
import binascii
from grc_gnuradio import wxgui as grc_wxgui
import wx
import transmit_path
import os
class my_top_block(grc_wxgui.top_block_gui):
def __init__(self, options):
grc_wxgui.top_block_gui.__init__(self, title="Top Block")
#gr.top_block.__init__(self)
#construction of the transmit path of the USRP
self.txpath = transmit_path.transmit_path(self, options)
self.connect(self.txpath)
def get_options():
parser = OptionParser(option_class=eng_option, conflict_handler = "resolve")
expert_grp = parser.add_option_group("Expert")
parser.add_option("-w", "--which", type="int", default=0,
help="select which USRP (0, 1, ...) default is %default",
metavar="NUM")
parser.add_option("-T", "--tx-subdev-spec", type="subdev", default=None,
help="select USRP Tx side A or B (default=first one with a daughterboard)")
parser.add_option("-f", "--freq", type="eng_float", default=None,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-a", "--amplitude", type="eng_float", default=2000,
help="set Tx amplitude (0-32767) (default=%default)")
parser.add_option("-r", "--rate", type="eng_float", default=250e3,
help="Select modulation symbol rate (default=%default)")
parser.add_option("", "--sps", type="int", default=2,
help="Select samples per symbol (default=%default)")
parser.add_option("", "--excess-bw", type="eng_float", default=0.35,
help="Select RRC excess bandwidth (default=%default)")
#Packet size, the header with size of 19 bytes to the packet
parser.add_option("-s", "--size", type="eng_float", default=114,
help="set packet size [default=%default]")
transmit_path.add_options(parser, expert_grp)
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
if options.freq is None:
sys.stderr.write("You must specify -f FREQ or --freq FREQ\n")
parser.print_help(sys.stderr)
sys.exit(1)
return (options, args)
def main():
def send_pkt(payload='', eof=False):
return tb.txpath.send_pkt(0xe5, struct.pack("HHHH", 0xFFFF,0xFFFF, 0x10, 0x10), payload, eof)
def rx_callback(ok, payload):
print "ok = %r, payload =%s "% (ok, payload)
#construct options of modulation BPSK all transmission
(options, args) = get_options()
#Begin construction of the flow graph
tb = my_top_block(options)
#Allow a real time scheduling. It's possible just with root session
r = gr.enable_realtime_scheduling()
if r != gr.RT_OK:
print "Warning: failed to enable realtime scheduling"
tb.start() #Begin the execution of flot graph
#tb.Run() # With the graphical Sink
#like in iee802.15.4 construct and send packets
for i in range(10):
print "envoi du message %d: "% (i+1,)
send_pkt(struct.pack('9B', 0x1, 0x80, 0x80, 0xff, 0xff, 0x10, 0x0, 0x20, 0x0))
time.sleep(1)
#wait transmission to finish
tb.wait()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.web.webmvc.mvcservlet.mvclocaleresolverinterceptor
import org.springframework.context.annotation.Bean
import org.springframework.context.annotation.Configuration
import org.springframework.web.servlet.LocaleResolver
import org.springframework.web.servlet.handler.SimpleUrlHandlerMapping
import org.springframework.web.servlet.i18n.CookieLocaleResolver
import org.springframework.web.servlet.i18n.LocaleChangeInterceptor
// tag::snippet[]
@Configuration
class WebConfiguration {
@Bean
fun localeResolver(): LocaleResolver {
return CookieLocaleResolver()
}
@Bean
fun urlMapping() = SimpleUrlHandlerMapping().apply {
setInterceptors(LocaleChangeInterceptor().apply {
paramName = "siteLanguage"
})
urlMap = mapOf("/**/*.view" to "someController")
}
}
// end::snippet[] | kotlin | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/kotlin/org/springframework/docs/web/webmvc/mvcservlet/mvclocaleresolverinterceptor/WebConfiguration.kt |
#!/usr/bin/python
# File created on 27 Jan 2012.
from __future__ import division
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
import os, re
from os import makedirs, sys, remove, rename
from sys import path
from optparse import OptionParser
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters, fprintf , printf
from libs.python_modules.utils.sysutil import getstatusoutput, pathDelim
from libs.python_modules.parsers.fastareader import FastaReader
except:
print(""" Could not load some user defined module functions""")
print(""" Make sure your typed 'source MetaPathwaysrc'""")
print(""" """)
sys.exit(3)
PATHDELIM = pathDelim()
usage= sys.argv[0] + """ -s <sample_name> -f <output_folder> -i <input_folder> """
parser = None
def createParser():
global parser
epilog = """
This script generates run stats of a MetaPathways processed sample."""
epilog = re.sub(r'[ \t\f\v]+',' ', epilog)
parser = OptionParser(usage=usage, epilog=epilog)
parser.add_option("-s", "--sample_name", dest="sample_name",
help='the sample name [REQUIRED]')
parser.add_option("-f", "--output_folder", dest="output_folder",
help='the output folder where the sample is [REQUIRED]')
parser.add_option("-i", "--input_folder", dest="input_folder",
help='the input folder where the input is [REQUIRED]')
def valid_arguments(opts, args):
state = True
if opts.sample_name == None :
print('ERROR: Missing sample name')
state = False
if opts.output_folder == None :
print('ERROR: Missing output folder for sample')
state = False
if opts.input_folder == None :
print('WARNING: Missing input folder for sample')
return state
def isAminoAcidSequence(sequence):
if sequence:
count = 0
list= {
'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'Q': 0, 'E': 0, 'G': 0,
'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0,
'T': 0, 'W': 0, 'Y': 0, 'V': 0, 'B': 0, 'J': 0, 'Z': 0, }
for x in sequence:
if x.upper() in list:
list[x.upper()]=1
count = 0
for x in list:
count += list[x]
if count > 10:
return True
else:
return False
return True
def filter_sequence(sequence):
if isAminoAcidSequence(sequence):
return sequence
sequence = re.sub(r'[^atcgATCG]','-', sequence.strip())
subsequences = sequence.split('-')
max_length = 0;
longest_sequence = "";
for seq in subsequences:
if len(seq) > max_length :
longest_sequence = seq
max_length = len(seq)
return longest_sequence
class FastaRecord(object):
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
# return FastaRecord(title, sequence)
def read_fasta_records(input_file):
records = []
sequence=""
name=""
while 1:
line = input_file.readline()
if line == "":
if sequence!="" and name!="":
records.append(FastaRecord(name, sequence))
return records
if line=='\n':
continue
line = line.rstrip()
if line.startswith(">") :
if sequence!="" and name!="":
records.append(FastaRecord(name, sequence))
name = line.rstrip()
sequence =""
else:
sequence = sequence + line.rstrip()
return records
# the main function
SIZE = 1000
def main(argv, errorlogger = None, runstatslogger = None):
global parser
(opts, args) = parser.parse_args(argv)
if not valid_arguments(opts, args):
print usage
sys.exit(0)
sample_name = opts.sample_name
output_folder = opts.output_folder
input_folder = opts.input_folder
_MAX = 1000000000000
seq_count = 0
allNames= dict()
outputStr = ""
if opts.input_folder!=None:
formats= "%s\tNumber of sequences in input file BEFORE QC (nucleotide)\t%s\n"
compute_fasta_stats(formats, input_folder + PATHDELIM + sample_name + ".fasta", 'nucleotide', 995)
formats= "%s\tNumber of sequences AFTER QC (nucleotide)\t%s\n"
compute_fasta_stats(formats, output_folder + PATHDELIM + sample_name + PATHDELIM + 'preprocessed' + PATHDELIM + sample_name + ".fasta", 'nucleotide', 1000)
formats= "%s\tNumber of translated ORFs BEFORE QC (amino)\t%s\n"
compute_fasta_stats(formats, output_folder + PATHDELIM + sample_name + PATHDELIM + 'orf_prediction' + PATHDELIM + sample_name + ".faa", 'amino', 1995)
formats= "%s\tNumber of translated ORFs AFTER QC (amino)\t%s\n"
compute_fasta_stats(formats, output_folder + PATHDELIM + sample_name + PATHDELIM + 'orf_prediction' + PATHDELIM + sample_name + ".qced.faa", 'amino', 2000)
def compute_fasta_stats(formats, input_file, seqtype, priority):
MIN_LENGTH='MIN_LENGTH'
MAX_LENGTH='MAX_LENGTH'
NUMSEQ='NUMSEQ'
TOTAL_LENGTH='TOTAL_LENGTH'
AVG_LENGTH='AVG_LENGTH'
stats = {
MIN_LENGTH: 0,
MAX_LENGTH: 0,
NUMSEQ : 0,
TOTAL_LENGTH: 0,
AVG_LENGTH : 0
}
""" min length """
_MAX = 1000000000000
stats[MAX_LENGTH] = -(_MAX)
stats[MIN_LENGTH]= _MAX
fastareader= FastaReader(input_file)
""" process one fasta sequence at a time """
lengths_str=""
for record in fastareader:
seqname = record.name
seq = record.sequence
length = len(seq)
stats[NUMSEQ] += 1
stats[AVG_LENGTH] = stats[AVG_LENGTH] + length
if stats[MIN_LENGTH] > length:
stats[MIN_LENGTH] = length
if stats[MAX_LENGTH] < length:
stats[MAX_LENGTH] = length
if stats[NUMSEQ] > 0 :
stats[AVG_LENGTH] = stats[AVG_LENGTH]/stats[NUMSEQ]
else:
stats[AVG_LENGTH] = 0
# printf("%s\tNumber of sequences in input file BEFORE QC (%s)\t%s\n" %(str(priority), opts.seqtype, str(stats[NUMSEQ][BEFORE])) )
# printf("%s\tNumber of sequences AFTER QC (%s)\t%s\n" %(str(priority + 5), opts.seqtype, str(stats[NUMSEQ][AFTER])))
printf(formats %(str(priority + 5), str(stats[NUMSEQ])))
printf("%s\t-min length\t%s\n" %(str(priority + 6), str(stats[MIN_LENGTH])) )
printf("%s\t-avg length\t%s\n" %( str(priority + 7), str(int(stats[AVG_LENGTH]))))
printf("%s\t-max length\t%s\n" %( str(priority + 8), str(stats[MAX_LENGTH])) )
printf("%s\t-total base pairs (bp)\t%s\n" %( str(priority + 9), str(int(stats[AVG_LENGTH]* stats[NUMSEQ])) ))
# printf("%s\tNumber of translated ORFs BEFORE QC (%s)\t%s\n" %(str(priority), opts.seqtype, str(stats[NUMSEQ][BEFORE])) )
# printf("%s\tNumber of tranlated ORFs AFTER QC (%s)\t%s\n" %(str(priority + 5), opts.seqtype, str(stats[NUMSEQ][AFTER])))
def MetaPathways_filter_input(argv, errorlogger = None, runstatslogger = None):
createParser()
main(argv, errorlogger = errorlogger, runstatslogger = runstatslogger)
return (0,'')
# the main function of metapaths
if __name__ == "__main__":
createParser()
main(sys.argv[1:]) | unknown | codeparrot/codeparrot-clean | ||
import numpy
import six
import chainer
from chainer import configuration
from chainer import functions
from chainer import initializers
from chainer import link
from chainer.utils import argument
from chainer import variable
class BatchNormalization(link.Link):
"""Batch normalization layer on outputs of linear or convolution functions.
This link wraps the :func:`~chainer.functions.batch_normalization` and
:func:`~chainer.functions.fixed_batch_normalization` functions.
It runs in three modes: training mode, fine-tuning mode, and testing mode.
In training mode, it normalizes the input by *batch statistics*. It also
maintains approximated population statistics by moving averages, which can
be used for instant evaluation in testing mode. Training mode is enabled
when ``chainer.config.train`` is set to ``True`` and :meth:`__call__`
is invoked with ``finetune=False`` (the default is False).
In fine-tuning mode, it accumulates the input to compute *population
statistics*. In order to correctly compute the population statistics, a
user must use this mode to feed mini-batches running through whole training
dataset. Finetuning mode is enabled when ``chainer.config.train`` is set to
``True`` and :meth:`__call__` is invoked with ``finetune=True``.
In testing mode, it uses pre-computed population statistics to normalize
the input variable. The population statistics is approximated if it is
computed by training mode, or accurate if it is correctly computed by
fine-tuning mode. Testing mode is enabled when ``chainer.config.train``
is set to ``False``.
Args:
size (int, tuple of ints, or None): Size (or shape) of channel
dimensions. If ``None``, the size will be determined from
dimension(s) of the input batch during the first forward pass.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability.
dtype (numpy.dtype): Type to use in computing.
use_gamma (bool): If ``True``, use scaling parameter. Otherwise, use
unit(1) which makes no effect.
use_beta (bool): If ``True``, use shifting parameter. Otherwise, use
unit(0) which makes no effect.
axis (int or tuple of int): Axis over which normalization is
performed. When axis is ``None``, it is determined from input
dimensions. For example, if ``x.ndim`` is 4, axis becomes (0, 2, 3)
and normalization is performed over 0th, 2nd and 3rd axis of input.
If it is 2, axis becomes (0) and normalization is performed
over 0th axis of input. When a tuple of int is given to this
option, numbers in the tuple must be being sorted in ascending
order. For example, (0, 2) is OK, but (2, 0) is not.
initial_gamma: Initializer of the scaling parameter. The default value
is ``1``.
initial_beta: Initializer of the shifting parameter. The default value
is ``0``.
initial_avg_mean: Initializer of the moving average of population mean.
The default value is ``0``.
initial_avg_var: Initializer of the moving average of population
variance. The default value is ``1``.
.. note::
From v5.0.0, the initial value of the population variance is changed to
1. It does not change the behavior of training, but the resulting model
may have a slightly different behavior on inference. To emulate the
old behavior, pass ``initial_avg_var=0`` for training.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
.. seealso::
:func:`~chainer.functions.batch_normalization`,
:func:`~chainer.functions.fixed_batch_normalization`
Attributes:
gamma (~chainer.Variable): Scaling parameter. In mixed16 mode, it is
initialized as float32 variable.
beta (~chainer.Variable): Shifting parameter. In mixed16 mode, it is
initialized as float32 variable.
avg_mean (:ref:`ndarray`): Population mean. In mixed16 mode, it is
initialized as float32 array.
avg_var (:ref:`ndarray`): Population variance. In mixed16 mode, it is
initialized as float32 array.
N (int): Count of batches given for fine-tuning.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability. This value is added
to the batch variances.
.. admonition:: Example
>>> x = np.arange(12).reshape(4, 3).astype(np.float32) ** 2
>>> x
array([[ 0., 1., 4.],
[ 9., 16., 25.],
[ 36., 49., 64.],
[ 81., 100., 121.]], dtype=float32)
>>> bn = chainer.links.BatchNormalization(3)
>>> bn(x)
variable([[-1. , -1.0664359 , -1.1117983 ],
[-0.71428573, -0.6714596 , -0.6401263 ],
[ 0.14285715, 0.19748813, 0.23583598],
[ 1.5714287 , 1.5404074 , 1.5160885 ]])
>>> (x - x.mean(axis=0)) / np.sqrt(x.var(axis=0) + 2e-5)
array([[-1. , -1.0664359 , -1.1117983 ],
[-0.71428573, -0.6714596 , -0.6401263 ],
[ 0.14285715, 0.19748813, 0.235836 ],
[ 1.5714285 , 1.5404074 , 1.5160886 ]], dtype=float32)
There are several ways to make a BatchNormalization link.
Consider an input of batched 10 images of 32x32 with 3 channels.
>>> x = np.random.randn(10, 3, 32, 32).astype(np.float32)
1. Give the parameter size:
To normalize for each channel, give the number of channels
to ``size``.
>>> bn = chainer.links.BatchNormalization(3)
>>> bn.avg_mean.shape
(3,)
>>> bn.beta += 2.0
>>> bn.gamma *= 5.0
>>> list(sorted(bn.namedparams())) # doctest: +ELLIPSIS
[('/beta', variable([2., ...])), ('/gamma', variable([5., ...]))]
>>> y = bn(x)
>>> y.shape
(10, 3, 32, 32)
>>> np.testing.assert_allclose(
... y.array.mean(axis=(0, 2, 3)), bn.beta.array, atol=1e-6)
>>> np.testing.assert_allclose(
... y.array.std(axis=(0, 2, 3)),
... bn.gamma.array, atol=1e-3)
To normalize for each channel for each pixel, ``size`` should
be the tuple of the dimensions.
>>> bn = chainer.links.BatchNormalization((3, 32, 32))
>>> bn.avg_mean.shape
(3, 32, 32)
>>> y = bn(x)
>>> y.shape
(10, 3, 32, 32)
>>> np.testing.assert_allclose(
... y.array.mean(axis=0), bn.beta.array, atol=1e-6)
>>> np.testing.assert_allclose(
... y.array.std(axis=0),
... bn.gamma.array, atol=1e-3)
By default, channel axis is (or starts from) the 1st axis of the
input shape.
2. Give the aggregate axes:
from Chainer v5
With ``axis`` option, similarly to NumPy, you may specify the
aggregate axes, which are treated as the "batch" axes for the
batch statistics.
You can omit ``size`` if ``axis`` is given. In this case, creation
of persistent values ``avg_mean``, ``avg_var`` and parameters
``beta``, ``gamma`` is deferred until first forward propagation.
The examples in 1. corresponds to the following, respectively.
>>> bn = chainer.links.BatchNormalization(axis=(0, 2, 3))
>>> print(bn.avg_mean)
None
>>> y = bn(x)
>>> bn.avg_mean.shape
(3,)
>>> bn = chainer.links.BatchNormalization(axis=0)
>>> print(bn.avg_mean)
None
>>> y = bn(x)
>>> bn.avg_mean.shape
(3, 32, 32)
"""
gamma = None
beta = None
avg_mean = None
avg_var = None
def __init__(self, size=None, decay=0.9, eps=2e-5, dtype=None,
use_gamma=True, use_beta=True,
initial_gamma=None, initial_beta=None, axis=None,
initial_avg_mean=None, initial_avg_var=None):
super(BatchNormalization, self).__init__()
if size is None and axis is None:
raise RuntimeError('size or axis is required')
self._initial_avg_mean = initial_avg_mean
self._initial_avg_var = initial_avg_var
self.N = 0
self.register_persistent('N')
self.decay = decay
self.eps = eps
if isinstance(axis, six.integer_types):
axis = (axis,)
self.axis = axis
self._highprec_dtype = chainer.get_dtype(
dtype, map_mixed16=numpy.float32)
with self.init_scope():
if use_gamma:
if initial_gamma is None:
initial_gamma = 1
gamma_initializer = \
initializers._get_initializer(initial_gamma)
gamma_initializer.dtype = self._highprec_dtype
self.gamma = variable.Parameter(gamma_initializer)
if use_beta:
if initial_beta is None:
initial_beta = 0
beta_initializer = initializers._get_initializer(initial_beta)
beta_initializer.dtype = self._highprec_dtype
self.beta = variable.Parameter(beta_initializer)
if size is not None:
self._initialize_params(size)
def _initialize_params(self, shape):
self.avg_mean = self._init_array(self._initial_avg_mean, 0, shape)
self._initial_avg_mean = None
self.register_persistent('avg_mean')
self.avg_var = self._init_array(self._initial_avg_var, 1, shape)
self._initial_avg_var = None
self.register_persistent('avg_var')
if self.gamma is not None:
self.gamma.initialize(shape)
if self.beta is not None:
self.beta.initialize(shape)
def _init_array(self, initializer, default_value, size):
if initializer is None:
initializer = default_value
initializer = initializers._get_initializer(initializer)
return initializers.generate_array(
initializer, size, self.xp, dtype=self._highprec_dtype,
device=self.device)
@property
def printable_specs(self):
specs = [
('size', self.avg_mean.shape[0]),
('decay', self.decay),
('eps', self.eps),
('dtype', self.avg_mean.dtype),
('use_gamma', hasattr(self, 'gamma')),
('use_beta', hasattr(self, 'beta')),
]
for spec in specs:
yield spec
def forward(self, x, **kwargs):
"""forward(self, x, finetune=False)
Invokes the forward propagation of BatchNormalization.
In training mode, the BatchNormalization computes moving averages of
mean and variance for evaluation during training, and normalizes the
input using batch statistics.
Args:
x (~chainer.Variable): Input variable.
finetune (bool): If it is in the training mode and ``finetune`` is
``True``, BatchNormalization runs in fine-tuning mode; it
accumulates the input array to compute population statistics
for normalization, and normalizes the input using batch
statistics.
"""
finetune, = argument.parse_kwargs(
kwargs, ('finetune', False),
test='test argument is not supported anymore. '
'Use chainer.using_config')
if self.avg_mean is None:
param_shape = tuple([
d
for i, d in enumerate(x.shape)
if i not in self.axis])
self._initialize_params(param_shape)
gamma = self.gamma
if gamma is None:
with chainer.using_device(self.device):
gamma = self.xp.ones(
self.avg_mean.shape, dtype=self._highprec_dtype)
beta = self.beta
if beta is None:
with chainer.using_device(self.device):
beta = self.xp.zeros(
self.avg_mean.shape, dtype=self._highprec_dtype)
if configuration.config.train:
if finetune:
self.N += 1
decay = 1. - 1. / self.N
else:
decay = self.decay
avg_mean = self.avg_mean
avg_var = self.avg_var
if chainer.config.in_recomputing:
# Do not update statistics when extra forward computation is
# called.
if finetune:
self.N -= 1 # Revert the count
avg_mean = None
avg_var = None
ret = functions.batch_normalization(
x, gamma, beta, eps=self.eps, running_mean=avg_mean,
running_var=avg_var, decay=decay, axis=self.axis)
else:
# Use running average statistics or fine-tuned statistics.
mean = self.avg_mean
var = self.avg_var
ret = functions.fixed_batch_normalization(
x, gamma, beta, mean, var, self.eps, axis=self.axis)
return ret
def start_finetuning(self):
"""Resets the population count for collecting population statistics.
This method can be skipped if it is the first time to use the
fine-tuning mode. Otherwise, this method should be called before
starting the fine-tuning mode again.
"""
self.N = 0 | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.