code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
#!/usr/bin/env python ############################################################################ # Joshua R. Boverhof # See LBNLCopyright for copyright notice! ########################################################################### import sys, unittest from ServiceTest import main, ServiceTestCase, ServiceTestSuite """ Unittest for contacting the threatService Web service. WSDL: http://www.boyzoid.com/threat.cfc?wsdl """ # General targets def dispatch(): """Run all dispatch tests""" suite = ServiceTestSuite() suite.addTest(unittest.makeSuite(HomelandTestCase, 'test_dispatch')) return suite def local(): """Run all local tests""" suite = ServiceTestSuite() suite.addTest(unittest.makeSuite(HomelandTestCase, 'test_local')) return suite def net(): """Run all network tests""" suite = ServiceTestSuite() suite.addTest(unittest.makeSuite(HomelandTestCase, 'test_net')) return suite def all(): """Run all tests""" suite = ServiceTestSuite() suite.addTest(unittest.makeSuite(HomelandTestCase, 'test_')) return suite class HomelandTestCase(ServiceTestCase): """Test case for ZipCodeResolver Web service """ name = "test_ThreatService" client_file_name = "Current_Homeland_Security_Threat_Level_client.py" types_file_name = "Current_Homeland_Security_Threat_Level_types.py" server_file_name = None def __init__(self, methodName): ServiceTestCase.__init__(self, methodName) self.wsdl2py_args.append('-b') def test_net_threatLevel(self): loc = self.client_module.Current_Homeland_Security_Threat_LevelLocator() port = loc.getthreat_cfc(**self.getPortKWArgs()) msg = self.client_module.threatLevelRequest() rsp = port.threatLevel(msg) for item in rsp.ThreatLevelReturn.Item: item.Key item.Value if __name__ == "__main__" : main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies 'AR' in make_global_settings. """ import os import sys import TestGyp def resolve_path(test, path): if path is None: return None elif test.format == 'make': return '$(abspath %s)' % path elif test.format in ['ninja', 'xcode-ninja']: return os.path.join('..', '..', path) else: test.fail_test() def verify_ar_target(test, ar=None, rel_path=False): if rel_path: ar_expected = resolve_path(test, ar) else: ar_expected = ar # Resolve default values if ar_expected is None: if test.format == 'make': # Make generator hasn't set the default value for AR. # You can remove the following assertion as long as it doesn't # break existing projects. test.must_not_contain('Makefile', 'AR ?= ') return elif test.format in ['ninja', 'xcode-ninja']: if sys.platform == 'win32': ar_expected = 'lib.exe' else: ar_expected = 'ar' if test.format == 'make': test.must_contain('Makefile', 'AR ?= %s' % ar_expected) elif test.format in ['ninja', 'xcode-ninja']: test.must_contain('out/Default/build.ninja', 'ar = %s' % ar_expected) else: test.fail_test() def verify_ar_host(test, ar=None, rel_path=False): if rel_path: ar_expected = resolve_path(test, ar) else: ar_expected = ar # Resolve default values if ar_expected is None: ar_expected = 'ar' if test.format == 'make': test.must_contain('Makefile', 'AR.host ?= %s' % ar_expected) elif test.format in ['ninja', 'xcode-ninja']: test.must_contain('out/Default/build.ninja', 'ar_host = %s' % ar_expected) else: test.fail_test() test_format = ['ninja'] if sys.platform in ('linux2', 'darwin'): test_format += ['make'] test = TestGyp.TestGyp(formats=test_format) # Check default values test.run_gyp('make_global_settings_ar.gyp') verify_ar_target(test) # Check default values with GYP_CROSSCOMPILE enabled. with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}): test.run_gyp('make_global_settings_ar.gyp') verify_ar_target(test) verify_ar_host(test) # Test 'AR' in 'make_global_settings'. with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}): test.run_gyp('make_global_settings_ar.gyp', '-Dcustom_ar_target=my_ar') verify_ar_target(test, ar='my_ar', rel_path=True) # Test 'AR'/'AR.host' in 'make_global_settings'. with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}): test.run_gyp('make_global_settings_ar.gyp', '-Dcustom_ar_target=my_ar_target1', '-Dcustom_ar_host=my_ar_host1') verify_ar_target(test, ar='my_ar_target1', rel_path=True) verify_ar_host(test, ar='my_ar_host1', rel_path=True) # Test $AR and $AR_host environment variables. with TestGyp.LocalEnv({'AR': 'my_ar_target2', 'AR_host': 'my_ar_host2'}): test.run_gyp('make_global_settings_ar.gyp') # Ninja generator resolves $AR in gyp phase. Make generator doesn't. if test.format == 'ninja': if sys.platform == 'win32': # TODO(yukawa): Make sure if this is an expected result or not. verify_ar_target(test, ar='lib.exe', rel_path=False) else: verify_ar_target(test, ar='my_ar_target2', rel_path=False) verify_ar_host(test, ar='my_ar_host2', rel_path=False) # Test 'AR' in 'make_global_settings' with $AR_host environment variable. with TestGyp.LocalEnv({'AR_host': 'my_ar_host3'}): test.run_gyp('make_global_settings_ar.gyp', '-Dcustom_ar_target=my_ar_target3') verify_ar_target(test, ar='my_ar_target3', rel_path=True) verify_ar_host(test, ar='my_ar_host3', rel_path=False) test.pass_test()
unknown
codeparrot/codeparrot-clean
name: "CodeQL" on: pull_request: schedule: # run weekly new vulnerability was added to the database - cron: '0 0 * * 0' jobs: analyze: name: Analyze runs-on: ubuntu-latest if: github.event_name != 'schedule' || github.repository == 'redis/redis' strategy: fail-fast: false matrix: language: [ 'cpp' ] steps: - name: Checkout repository uses: actions/checkout@v4 - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} - name: Autobuild uses: github/codeql-action/autobuild@v3 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3
unknown
github
https://github.com/redis/redis
.github/workflows/codeql-analysis.yml
# -*- coding: utf-8 -*- # # Copyright (C) Pootle contributors. # # This file is a part of the Pootle project. It is distributed under the GPL3 # or later license. See the LICENSE file for a copy of the license and the # AUTHORS file for copyright and authorship information. import json import pytest from django import forms from django.http import Http404 from pytest_pootle.factories import LanguageDBFactory, UserFactory from pytest_pootle.utils import create_api_request from accounts.models import User from pootle.core.delegate import panels from pootle.core.plugin import provider from pootle.core.views import APIView from pootle.core.views.browse import PootleBrowseView from pootle.core.views.display import StatsDisplay from pootle.core.views.panels import Panel from pootle.core.views.widgets import TableSelectMultiple from pootle_config.utils import ObjectConfig def _test_stats_display(obj): stats = StatsDisplay(obj) assert stats.context == obj stat_data = obj.data_tool.get_stats() assert stats.stat_data == stat_data.copy() stats.add_children_info(stat_data) if stat_data.get("last_submission"): stat_data["last_submission"]["msg"] = ( stats.get_action_message( stat_data["last_submission"])) StatsDisplay(obj).localize_stats(stat_data) assert stat_data == stats.stats class UserAPIView(APIView): model = User restrict_to_methods = ('get', 'delete',) page_size = 10 fields = ('username', 'full_name',) class WriteableUserAPIView(APIView): model = User fields = ('username', 'email',) class UserSettingsForm(forms.ModelForm): password = forms.CharField(required=False) class Meta(object): model = User fields = ('username', 'password', 'full_name') widgets = { 'password': forms.PasswordInput(), } def clean_password(self): return self.cleaned_data['password'].upper() class WriteableUserSettingsAPIView(APIView): model = User edit_form_class = UserSettingsForm class UserM2MAPIView(APIView): model = User restrict_to_methods = ('get', 'delete',) page_size = 10 fields = ('username', 'alt_src_langs',) m2m = ('alt_src_langs', ) class UserConfigAPIView(APIView): model = User restrict_to_methods = ('get', 'delete',) page_size = 10 fields = ('username', ) config = ( ("foo0", "foo0.bar"), ("foo1", "foo1.bar")) def test_apiview_invalid_method(rf): """Tests for invalid methods.""" view = UserAPIView.as_view() # Forbidden method request = create_api_request(rf, 'post') response = view(request) # "Method not allowed" if the method is not within the restricted list assert response.status_code == 405 # Non-existent method request = create_api_request(rf, 'patch') response = view(request) assert response.status_code == 405 @pytest.mark.django_db def test_apiview_get_single(rf): """Tests retrieving a single object using the API.""" view = UserAPIView.as_view() user = UserFactory.create(username='foo') request = create_api_request(rf) response = view(request, id=user.id) # This should have been a valid request... assert response.status_code == 200 # ...and JSON-encoded, so should properly parse it response_data = json.loads(response.content) assert isinstance(response_data, dict) assert response_data['username'] == 'foo' assert 'email' not in response_data # Non-existent IDs should return 404 with pytest.raises(Http404): view(request, id='777') @pytest.mark.django_db def test_apiview_get_multiple(rf, no_extra_users): """Tests retrieving multiple objects using the API.""" view = UserAPIView.as_view() UserFactory.create(username='foo') request = create_api_request(rf) response = view(request) response_data = json.loads(response.content) # Response should contain a 1-item list assert response.status_code == 200 assert isinstance(response_data, dict) assert 'count' in response_data assert 'models' in response_data assert len(response_data['models']) == User.objects.count() # Let's add more users UserFactory.create_batch(5) response = view(request) response_data = json.loads(response.content) assert response.status_code == 200 assert isinstance(response_data, dict) assert 'count' in response_data assert 'models' in response_data assert len(response_data['models']) == User.objects.count() # Let's add even more users to test pagination UserFactory.create_batch(5) response = view(request) response_data = json.loads(response.content) # First page is full assert response.status_code == 200 assert isinstance(response_data, dict) assert 'count' in response_data assert 'models' in response_data assert len(response_data['models']) == 10 request = create_api_request(rf, url='/?p=2') response = view(request) response_data = json.loads(response.content) # Second page constains a single user assert response.status_code == 200 assert isinstance(response_data, dict) assert 'count' in response_data assert 'models' in response_data assert len(response_data['models']) == User.objects.count() - 10 @pytest.mark.django_db def test_apiview_post(rf): """Tests creating a new object using the API.""" view = WriteableUserAPIView.as_view() # Malformed request, only JSON-encoded data is understood request = create_api_request(rf, 'post') response = view(request) response_data = json.loads(response.content) assert response.status_code == 400 assert 'msg' in response_data assert response_data['msg'] == 'Invalid JSON data' # Not sending all required data fails validation missing_data = { 'not_a_field': 'not a value', } request = create_api_request(rf, 'post', data=missing_data) response = view(request) response_data = json.loads(response.content) assert response.status_code == 400 assert 'errors' in response_data # Sending all required data should create a new user data = { 'username': 'foo', 'email': 'foo@bar.tld', } request = create_api_request(rf, 'post', data=data) response = view(request) response_data = json.loads(response.content) assert response.status_code == 200 assert response_data['username'] == 'foo' user = User.objects.latest('id') assert user.username == 'foo' # Trying to add the same user again should fail validation response = view(request) response_data = json.loads(response.content) assert response.status_code == 400 assert 'errors' in response_data @pytest.mark.django_db def test_apiview_put(rf): """Tests updating an object using the API.""" view = WriteableUserAPIView.as_view() user = UserFactory.create(username='foo') # Malformed request, only JSON-encoded data is understood request = create_api_request(rf, 'put') response = view(request, id=user.id) response_data = json.loads(response.content) assert response.status_code == 400 assert response_data['msg'] == 'Invalid JSON data' # Update a field's data new_username = 'foo_new' update_data = { 'username': new_username, } request = create_api_request(rf, 'put', data=update_data) # Requesting unknown resources is a 404 with pytest.raises(Http404): view(request, id='11') # All fields must be submitted response = view(request, id=user.id) response_data = json.loads(response.content) assert response.status_code == 400 assert 'errors' in response_data # Specify missing fields update_data.update({ 'email': user.email, }) request = create_api_request(rf, 'put', data=update_data) response = view(request, id=user.id) response_data = json.loads(response.content) # Now all is ok assert response.status_code == 200 assert response_data['username'] == new_username # Email shouldn't have changed assert response_data['email'] == user.email # View with a custom form update_data.update({ 'password': 'd34db33f', }) view = WriteableUserSettingsAPIView.as_view() request = create_api_request(rf, 'put', data=update_data) response = view(request, id=user.id) response_data = json.loads(response.content) assert response.status_code == 200 assert 'password' not in response_data @pytest.mark.django_db def test_apiview_delete(rf): """Tests deleting an object using the API.""" view = UserAPIView.as_view() user = UserFactory.create(username='foo') # Delete is not supported for collections request = create_api_request(rf, 'delete') response = view(request) assert response.status_code == 405 assert User.objects.filter(id=user.id).count() == 1 # But it is supported for single items (specified by id): response = view(request, id=user.id) assert response.status_code == 200 assert User.objects.filter(id=user.id).count() == 0 # Should raise 404 if we try to access a deleted resource again: with pytest.raises(Http404): view(request, id=user.id) @pytest.mark.django_db def test_apiview_search(rf): """Tests filtering through a search query.""" # Note that `UserAPIView` is configured to search in all defined fields, # which are `username` and `full_name` view = UserAPIView.as_view() # Let's create some users to search for UserFactory.create(username='foo', full_name='Foo Bar') UserFactory.create(username='foobar', full_name='Foo Bar') UserFactory.create(username='foobarbaz', full_name='Foo Bar') # `q=bar` should match 3 users (full names match) request = create_api_request(rf, url='/?q=bar') response = view(request) response_data = json.loads(response.content) assert response.status_code == 200 assert len(response_data['models']) == 3 # `q=baz` should match 1 user request = create_api_request(rf, url='/?q=baz') response = view(request) response_data = json.loads(response.content) assert response.status_code == 200 assert len(response_data['models']) == 1 # Searches are case insensitive; `q=BaZ` should match 1 user request = create_api_request(rf, url='/?q=BaZ') response = view(request) response_data = json.loads(response.content) assert response.status_code == 200 assert len(response_data['models']) == 1 @pytest.mark.django_db def test_view_gathered_context_data(rf, member, no_context_data): from pootle.core.views.base import PootleDetailView from pootle_project.models import Project from pootle.core.delegate import context_data class DummyView(PootleDetailView): model = Project def get_object(self): return Project.objects.get(code="project0") def get_context_data(self, *args, **kwargs): return dict(foo="bar") @property def permission_context(self): return self.get_object().directory request = rf.get("foo") request.user = member view = DummyView.as_view() response = view(request) assert response.context_data == dict(foo="bar") @provider(context_data, sender=DummyView) def provide_context_data(sender, **kwargs): return dict( foo2="bar2", sender=sender, context=kwargs["context"], view=kwargs["view"]) view = DummyView.as_view() response = view(request) assert response.context_data.pop("sender") == DummyView assert response.context_data.pop("context") is response.context_data assert isinstance(response.context_data.pop("view"), DummyView) assert sorted(response.context_data.items()) == [ ("foo", "bar"), ("foo2", "bar2")] @pytest.mark.django_db def test_apiview_get_single_m2m(rf): """Tests retrieving a single object with an m2m field using the API.""" view = UserM2MAPIView.as_view() user = UserFactory.create(username='foo') request = create_api_request(rf) response = view(request, id=user.id) response_data = json.loads(response.content) assert response_data["alt_src_langs"] == [] user.alt_src_langs.add(LanguageDBFactory(code="alt1")) user.alt_src_langs.add(LanguageDBFactory(code="alt2")) request = create_api_request(rf) response = view(request, id=user.id) response_data = json.loads(response.content) assert response_data["alt_src_langs"] assert ( response_data["alt_src_langs"] == list(str(l) for l in user.alt_src_langs.values_list("pk", flat=True))) @pytest.mark.django_db def test_apiview_get_multi_m2m(rf): """Tests several objects with m2m fields using the API.""" view = UserM2MAPIView.as_view() user0 = UserFactory.create(username='foo0') user1 = UserFactory.create(username='foo1') request = create_api_request(rf) response = view(request) response_data = json.loads(response.content) for model in [x for x in response_data["models"] if x['username'] in ['foo0', 'foo1']]: assert model['alt_src_langs'] == [] user0.alt_src_langs.add(LanguageDBFactory(code="alt1")) user0.alt_src_langs.add(LanguageDBFactory(code="alt2")) user1.alt_src_langs.add(LanguageDBFactory(code="alt3")) user1.alt_src_langs.add(LanguageDBFactory(code="alt4")) request = create_api_request(rf) response = view(request) response_data = json.loads(response.content) for model in response_data["models"]: user = User.objects.get(username=model["username"]) if user in [user0, user1]: assert model["alt_src_langs"] assert ( model["alt_src_langs"] == list( str(l) for l in user.alt_src_langs.values_list("pk", flat=True))) @pytest.mark.django_db def test_apiview_get_single_config(rf): """Tests retrieving a single object with an m2m field using the API.""" view = UserConfigAPIView.as_view() user0 = UserFactory.create(username='user0') user1 = UserFactory.create(username='user1') request = create_api_request(rf) response = view(request, id=user0.id) response_data = json.loads(response.content) assert response_data["foo0"] is None assert response_data["foo1"] is None # string config user_config = ObjectConfig(user1) user_config["foo0.bar"] = "foo0.baz" user_config["foo1.bar"] = "foo1.baz" request = create_api_request(rf) response = view(request, id=user1.id) response_data = json.loads(response.content) assert response_data["foo0"] == "foo0.baz" assert response_data["foo1"] == "foo1.baz" # list config user_config["foo0.bar"] = ["foo0.baz"] user_config["foo1.bar"] = ["foo1.baz"] request = create_api_request(rf) response = view(request, id=user1.id) response_data = json.loads(response.content) assert response_data["foo0"] == ["foo0.baz"] assert response_data["foo1"] == ["foo1.baz"] @pytest.mark.django_db def test_apiview_get_multi_config(rf): """Tests retrieving a single object with an m2m field using the API.""" view = UserConfigAPIView.as_view() user0 = UserFactory.create(username='user0') user1 = UserFactory.create(username='user1') request = create_api_request(rf) response = view(request) response_data = json.loads(response.content) for model in response_data["models"]: assert model["foo0"] is None assert model["foo1"] is None user_config = ObjectConfig(user0) user_config["foo0.bar"] = "user0.foo0.baz" user_config["foo1.bar"] = "user0.foo1.baz" user_config = ObjectConfig(user1) user_config["foo0.bar"] = "user1.foo0.baz" user_config["foo1.bar"] = "user1.foo1.baz" request = create_api_request(rf) response = view(request) response_data = json.loads(response.content) for model in response_data["models"]: if model["username"] in ["user0", "user1"]: model["foo0"] == "%s.foo0.baz" % model["username"] model["foo1"] == "%s.foo1.baz" % model["username"] @pytest.mark.django_db def test_widget_table_select_multiple_dict(): choices = ( ("foo", dict(id="foo", title="Foo")), ("bar", dict(id="bar", title="Bar")), ("baz", dict(id="baz", title="Baz"))) widget = TableSelectMultiple(item_attrs=["id"], choices=choices) rendered = widget.render("a-field", None) for i, (name, choice) in enumerate(choices): assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s" /></td>' % name) in rendered) assert ('<td>%s</td>' % choice["title"]) not in rendered widget = TableSelectMultiple(item_attrs=["id"], choices=choices) rendered = widget.render("a-field", choices[0]) for i, (name, choice) in enumerate(choices): checked = "" if i == 0: checked = ' checked' assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s"%s /></td>' % (name, checked)) in rendered) assert ('<td>%s</td>' % choice["title"]) not in rendered widget = TableSelectMultiple(item_attrs=["id", "title"], choices=choices) rendered = widget.render("a-field", choices[0]) for i, (name, choice) in enumerate(choices): checked = "" if i == 0: checked = ' checked' assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s"%s /></td>' % (name, checked)) in rendered) assert ('<td class="field-title">%s</td>' % choice["title"]) in rendered @pytest.mark.django_db def test_widget_table_select_multiple_objects(): choices = ( ("foo", dict(id="foo", title="Foo")), ("bar", dict(id="bar", title="Bar")), ("baz", dict(id="baz", title="Baz"))) class Dummy(object): def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) object_choices = tuple( (name, Dummy(**choice)) for name, choice in choices) widget = TableSelectMultiple(item_attrs=["id"], choices=object_choices) rendered = widget.render("a-field", None) for i, (name, choice) in enumerate(choices): # this test is way too brittle assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s" /></td>' % name) in rendered) assert ('<td>%s</td>' % choice["title"]) not in rendered widget = TableSelectMultiple(item_attrs=["id"], choices=object_choices) rendered = widget.render("a-field", choices[0]) for i, (name, choice) in enumerate(choices): checked = "" if i == 0: checked = ' checked' # this test is way too brittle assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s"%s /></td>' % (name, checked)) in rendered) assert ('<td>%s</td>' % choice["title"]) not in rendered widget = TableSelectMultiple(item_attrs=["id", "title"], choices=object_choices) rendered = widget.render("a-field", choices[0]) for i, (name, choice) in enumerate(choices): checked = "" if i == 0: checked = ' checked' # this test is way too brittle assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s"%s /></td>' % (name, checked)) in rendered) assert ('<td class="field-title">%s</td>' % choice["title"]) in rendered @pytest.mark.django_db def test_widget_table_select_multiple_callable(): choices = ( ("foo", dict(id="foo", title="Foo")), ("bar", dict(id="bar", title="Bar")), ("baz", dict(id="baz", title="Baz"))) def _get_id(attr): return "xx%s" % attr["id"] def _get_title(attr): return "xx%s" % attr["title"] widget = TableSelectMultiple(item_attrs=[_get_id], choices=choices) rendered = widget.render("a-field", None) for i, (name, choice) in enumerate(choices): assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s" /></td>' % name) in rendered) assert ('<td class="field-get-id">xx%s</td>' % choice["id"]) in rendered assert ( ('<td class="field-get-title">xx%s</td>' % choice["title"]) not in rendered) widget = TableSelectMultiple(item_attrs=[_get_id], choices=choices) rendered = widget.render("a-field", choices[0]) for i, (name, choice) in enumerate(choices): checked = "" if i == 0: checked = ' checked' assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s"%s /></td>' % (name, checked)) in rendered) assert ('<td class="field-get-id">xx%s</td>' % choice["id"]) in rendered assert ( ('<td class="field-get-title">xx%s</td>' % choice["title"]) not in rendered) widget = TableSelectMultiple(item_attrs=[_get_id, _get_title], choices=choices) rendered = widget.render("a-field", choices[0]) for i, (name, choice) in enumerate(choices): checked = "" if i == 0: checked = ' checked' assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s"%s /></td>' % (name, checked)) in rendered) assert ('<td class="field-get-id">xx%s</td>' % choice["id"]) in rendered assert ( ('<td class="field-get-title">xx%s</td>' % choice["title"]) in rendered) @pytest.mark.django_db def test_widget_table_select_multiple_object_methods(): choices = ( ("foo", dict(id="foo", title="Foo")), ("bar", dict(id="bar", title="Bar")), ("baz", dict(id="baz", title="Baz"))) class Dummy(object): def get_id(self): return self.kwargs["id"] def get_title(self): return self.kwargs["title"] def __init__(self, **kwargs): self.kwargs = kwargs for k in kwargs.keys(): setattr(self, k, getattr(self, "get_%s" % k)) object_choices = tuple( (name, Dummy(**choice)) for name, choice in choices) widget = TableSelectMultiple(item_attrs=["id"], choices=object_choices) rendered = widget.render("a-field", None) for i, (name, choice) in enumerate(choices): assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s" /></td>' % name) in rendered) assert ('<td>%s</td>' % choice["title"]) not in rendered widget = TableSelectMultiple(item_attrs=["id"], choices=object_choices) rendered = widget.render("a-field", choices[0]) for i, (name, choice) in enumerate(choices): checked = "" if i == 0: checked = ' checked' assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s"%s /></td>' % (name, checked)) in rendered) assert ('<td>%s</td>' % choice["title"]) not in rendered widget = TableSelectMultiple(item_attrs=["id", "title"], choices=object_choices) rendered = widget.render("a-field", choices[0]) for i, (name, choice) in enumerate(choices): checked = "" if i == 0: checked = ' checked' assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s"%s /></td>' % (name, checked)) in rendered) assert ('<td class="field-title">%s</td>' % choice["title"]) in rendered @pytest.mark.django_db def test_widget_table_select_id_attr(): choices = ( ("foo", dict(id="foo", title="Foo")), ("bar", dict(id="bar", title="Bar")), ("baz", dict(id="baz", title="Baz"))) widget = TableSelectMultiple(item_attrs=["id"], choices=choices) rendered = widget.render("a-field", None, attrs=dict(id="special-id")) for i, (name, choice) in enumerate(choices): assert ( ('<td class="row-select"><input type="checkbox" ' 'name="a-field" value="%s" id="special-id_%s" /></td>' % (name, i)) in rendered) @pytest.mark.django_db def test_display_stats(tp0, subdir0, language0, store0): _test_stats_display(tp0) _test_stats_display(subdir0) _test_stats_display(language0) _test_stats_display(store0) @pytest.mark.django_db def test_display_stats_action_message(tp0): action = dict( profile_url="/profile/url", unit_source="Some unit source", unit_url="/unit/url", displayname="Some user", check_name="some-check", checks_url="/checks/url", check_display_name="Some check") stats = StatsDisplay(tp0) for i in [2, 3, 4, 6, 7, 8, 9]: _action = action.copy() _action["type"] = i message = stats.get_action_message(_action) assert ( ("<a href='%s' class='user-name'>%s</a>" % (action["profile_url"], action["displayname"])) in message) if i != 4: assert ( ("<a href='%s'>%s</a>" % (action["unit_url"], action["unit_source"])) in message) if i in [6, 7]: assert ( ("<a href='%s'>%s</a>" % (action["checks_url"], action["check_display_name"])) in message) for i in [1, 5]: for _i in [0, 1, 2, 3, 4, 5]: _action = action.copy() _action["type"] = i _action["translation_action_type"] = _i message = stats.get_action_message(_action) assert ( ("<a href='%s' class='user-name'>%s</a>" % (action["profile_url"], action["displayname"])) in message) assert ( ("<a href='%s'>%s</a>" % (action["unit_url"], action["unit_source"])) in message) @pytest.mark.django_db def test_browse_view_panels(): class FooBrowseView(PootleBrowseView): panel_names = ["foo_panel"] class FooPanel(Panel): @property def content(self): return "__FOO__" @provider(panels, sender=FooBrowseView) def foo_panel_provider(**kwargs_): return dict(foo_panel=FooPanel) view = FooBrowseView() assert list(view.panels) == ["__FOO__"] class BarBrowseView(PootleBrowseView): panel_names = ["foo_panel", "bar_panel"] class BarPanel(Panel): @property def content(self): return "__BAR__" @provider(panels, sender=PootleBrowseView) def bar_panel_provider(**kwargs_): return dict(bar_panel=BarPanel) # foo_panel is only registered for FooBrowseView # bar_panel is registered for PootleBrowseView # only bar_panel is included view = BarBrowseView() assert list(view.panels) == ["__BAR__"]
unknown
codeparrot/codeparrot-clean
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package dag import ( "strings" "testing" ) func TestGraphDot_empty(t *testing.T) { var g Graph g.Add(1) g.Add(2) g.Add(3) actual := strings.TrimSpace(string(g.Dot(nil))) expected := strings.TrimSpace(testGraphDotEmptyStr) if actual != expected { t.Fatalf("bad: %s", actual) } } func TestGraphDot_basic(t *testing.T) { var g Graph g.Add(1) g.Add(2) g.Add(3) g.Connect(BasicEdge(1, 3)) actual := strings.TrimSpace(string(g.Dot(nil))) expected := strings.TrimSpace(testGraphDotBasicStr) if actual != expected { t.Fatalf("bad: %s", actual) } } func TestGraphDot_quoted(t *testing.T) { var g Graph quoted := `name["with-quotes"]` other := `other` g.Add(quoted) g.Add(other) g.Connect(BasicEdge(quoted, other)) actual := strings.TrimSpace(string(g.Dot(nil))) expected := strings.TrimSpace(testGraphDotQuotedStr) if actual != expected { t.Fatalf("\ngot: %q\nwanted %q\n", actual, expected) } } func TestGraphDot_attrs(t *testing.T) { var g Graph g.Add(&testGraphNodeDotter{ Result: &DotNode{ Name: "foo", Attrs: map[string]string{"foo": "bar"}, }, }) actual := strings.TrimSpace(string(g.Dot(nil))) expected := strings.TrimSpace(testGraphDotAttrsStr) if actual != expected { t.Fatalf("bad: %s", actual) } } type testGraphNodeDotter struct{ Result *DotNode } func (n *testGraphNodeDotter) Name() string { return n.Result.Name } func (n *testGraphNodeDotter) DotNode(string, *DotOpts) *DotNode { return n.Result } const testGraphDotQuotedStr = `digraph { compound = "true" newrank = "true" subgraph "root" { "[root] name[\"with-quotes\"]" -> "[root] other" } }` const testGraphDotBasicStr = `digraph { compound = "true" newrank = "true" subgraph "root" { "[root] 1" -> "[root] 3" } } ` const testGraphDotEmptyStr = `digraph { compound = "true" newrank = "true" subgraph "root" { } }` const testGraphDotAttrsStr = `digraph { compound = "true" newrank = "true" subgraph "root" { "[root] foo" [foo = "bar"] } }`
go
github
https://github.com/hashicorp/terraform
internal/dag/marshal_test.go
########################################################################## # # Processor specific code # CPU = "8051" # Description = "Intel 8051 8-bit microprocessor." # DataWidth = 8 # 8-bit data # AddressWidth = 16 # 16-bit addresses # Maximum length of an instruction (for formatting purposes) maxLength = 3 # Leadin bytes for multibyte instructions leadInBytes = [] # Addressing mode table # List of addressing modes and corresponding format strings for operands. addressModeTable = { "" : "", "@a+dptr" : "@a+dptr", "@dptr,a" : "@dptr,a", "@r0" : "@r0", "@r0,a" : "@r0,a", "@r0,direct" : "@r0,${0:02X}", "@r0,immed" : "@r0,#${0:02X}", "@r0,immed,offset" : "@r0,#${0:02x},${1:02X}", "@r1" : "@r1", "@r1,a" : "@r1,a", "@r1,direct" : "@r1,${0:02X}", "@r1,immed" : "@r1,#${0:02X}", "@r1,immed,offset" : "@r1,#${0:02x},${1:02X}", "a" : "a", "a,@a+dptr" : "a,@a+dptr", "a,@a+pc" : "a,@a+pc", "a,@dptr" : "a,@dptr", "a,@r0" : "a,@r0", "a,@r1" : "a,@r1", "a,direct" : "a,${0:02X}", "a,direct,offset" : "a,${0:02x},${1:02X}", "a,immed" : "a,#${0:02X}", "a,immed,offset" : "a,#${0:02x},${1:02X}", "a,r0" : "a,r0", "a,r1" : "a,r1", "a,r2" : "a,r2", "a,r3" : "a,r3", "a,r4" : "a,r4", "a,r5" : "a,r5", "a,r6" : "a,r6", "a,r7" : "a,r7", "ab" : "ab", "addr11" : "${0:02X}", "addr16" : "${1:02X}{0:02X}", "bit" : "${0:02X}", "bit,c" : "${0:02X},c", "bit,offset" : "${0:02X},${1:02X}", "c" : "c", "c,bit" : "c,${0:02X}", "direct" : "${0:02X}", "direct,@r0" : "${0:02X},@r0", "direct,@r1" : "${0:02X},@r1", "direct,a" : "${0:02X},a", "direct,direct" : "${0:02X},${1:02X}", "direct,immed" : "${0:02X},#${1:02X}", "direct,offset" : "${0:02X},${1:02X}", "direct,r0" : "${0:02X},r0", "direct,r1" : "${0:02X},r1", "direct,r2" : "${0:02X},r2", "direct,r3" : "${0:02X},r3", "direct,r4" : "${0:02X},r4", "direct,r5" : "${0:02X},r5", "direct,r6" : "${0:02X},r6", "direct,r7" : "${0:02X},r7", "dptr" : "dptr", "dptr,immed" : "dptr,#${0:02X}", "offset" : "${0:04X}", "r0" : "r0", "r0,a" : "r0,a", "r0,direct" : "r0,${0:02X}", "r0,immed" : "r0,#${0:02X}", "r0,immed,offset" : "r0,#${0:02x},${1:02X}", "r0,offset" : "r0,${0:02X}", "r1" : "r1", "r1,a" : "r1,a", "r1,direct" : "r1,${0:02X}", "r1,immed" : "r1,#${0:02X}", "r1,immed,offset" : "r1,#${0:02x},${1:02X}", "r1,offset" : "r1,${0:02X}", "r2" : "r2", "r2,a" : "r2,a", "r2,direct" : "r2,${0:02X}", "r2,immed" : "r2,#${0:02X}", "r2,immed,offset" : "r2,#${0:02x},${1:02X}", "r2,offset" : "r2,${0:02X}", "r3" : "r3", "r3,a" : "r3,a", "r3,direct" : "r3,${0:02X}", "r3,immed" : "r3,#${0:02X}", "r3,immed,offset" : "r3,#${0:02x},${1:02X}", "r3,offset" : "r3,${0:02X}", "r4" : "r4", "r4,a" : "r4,a", "r4,direct" : "r4,${0:02X}", "r4,immed" : "r4,#${0:02X}", "r4,immed,offset" : "r4,#${0:02x},${1:02X}", "r4,offset" : "r4,${0:02X}", "r5" : "r5", "r5,a" : "r5,a", "r5,direct" : "r5,${0:02X}", "r5,immed" : "r5,#${0:02X}", "r5,immed,offset" : "r5,#${0:02x},${1:02X}", "r5,offset" : "r5,${0:02X}", "r6" : "r6", "r6,a" : "r6,a", "r6,direct" : "r6,${0:02X}", "r6,immed" : "r6,#${0:02X}", "r6,immed,offset" : "r6,#${0:02x},${1:02X}", "r6,offset" : "r6,${0:02X}", "r7" : "r7", "r7,a" : "r7,a", "r7,direct" : "r7,${0:02X}", "r7,immed" : "r7,#${0:02X}", "r7,immed,offset" : "r7,#${0:02x},${1:02X}", "r7,offset" : "r7,${0:02X}", } # Op Code Table # Key is numeric opcode (possibly multiple bytes) # Value is a list: # # bytes # mnemonic # addressing mode # flags (e.g. pcr) opcodeTable = { 0x00 : [ 1, "nop", "" ], 0x01 : [ 2, "ajmp", "addr11" ], 0x02 : [ 3, "ljmp", "addr16" ], 0x03 : [ 1, "rr", "a" ], 0x04 : [ 1, "inc", "a" ], 0x05 : [ 2, "inc", "direct" ], 0x06 : [ 1, "inc", "@r0" ], 0x07 : [ 1, "inc", "@r1" ], 0x08 : [ 1, "inc", "r0" ], 0x09 : [ 1, "inc", "r1" ], 0x0a : [ 1, "inc", "r2" ], 0x0b : [ 1, "inc", "r3" ], 0x0c : [ 1, "inc", "r4" ], 0x0d : [ 1, "inc", "r5" ], 0x0e : [ 1, "inc", "r6" ], 0x0f : [ 1, "inc", "r7" ], 0x10 : [ 3, "jbc", "bit,offset" ], 0x11 : [ 2, "acall", "addr11" ], 0x12 : [ 3, "lcall", "addr16" ], 0x13 : [ 1, "rrc", "a" ], 0x14 : [ 1, "dec", "a" ], 0x15 : [ 2, "dec", "direct" ], 0x16 : [ 1, "dec", "r0" ], 0x17 : [ 1, "dec", "@r1" ], 0x18 : [ 1, "dec", "r0" ], 0x19 : [ 1, "dec", "r1" ], 0x1a : [ 1, "dec", "r2" ], 0x1b : [ 1, "dec", "r3" ], 0x1c : [ 1, "dec", "r4" ], 0x1d : [ 1, "dec", "r5" ], 0x1e : [ 1, "dec", "r6" ], 0x1f : [ 1, "dec", "r7" ], 0x20 : [ 3, "jb", "bit,offset" ], 0x21 : [ 2, "ajmp", "addr11" ], 0x22 : [ 1, "ret", "" ], 0x23 : [ 1, "rl", "a" ], 0x24 : [ 2, "add", "a,immed" ], 0x25 : [ 2, "add", "a,direct" ], 0x26 : [ 1, "add", "a,r0" ], 0x27 : [ 1, "add", "a,@r1" ], 0x28 : [ 1, "add", "a,r0" ], 0x29 : [ 1, "add", "a,r1" ], 0x2a : [ 1, "add", "a,r2" ], 0x2b : [ 1, "add", "a,r3" ], 0x2c : [ 1, "add", "a,r4" ], 0x2d : [ 1, "add", "a,r5" ], 0x2e : [ 1, "add", "a,r6" ], 0x2f : [ 1, "add", "a,r7" ], 0x30 : [ 3, "jnb", "bit,offset" ], 0x31 : [ 2, "acall", "addr11" ], 0x32 : [ 1, "reti", "" ], 0x33 : [ 1, "rlc", "a" ], 0x34 : [ 2, "addc", "a,immed" ], 0x35 : [ 2, "addc", "a,direct" ], 0x36 : [ 1, "addc", "a,@r0" ], 0x37 : [ 1, "addc", "a,@r1" ], 0x38 : [ 1, "addc", "a,r0" ], 0x39 : [ 1, "addc", "a,r1" ], 0x3a : [ 1, "addc", "a,r2" ], 0x3b : [ 1, "addc", "a,r3" ], 0x3c : [ 1, "addc", "a,r4" ], 0x3d : [ 1, "addc", "a,r5" ], 0x3e : [ 1, "addc", "a,r6" ], 0x3f : [ 1, "addc", "a,r7" ], 0x40 : [ 2, "jc", "offset",pcr ], 0x41 : [ 2, "ajmp", "addr11" ], 0x42 : [ 2, "orl", "direct,a" ], 0x43 : [ 3, "orl", "direct,immed" ], 0x44 : [ 2, "orl", "a,immed" ], 0x45 : [ 2, "orl", "a,direct" ], 0x46 : [ 1, "orl", "a,@r0" ], 0x47 : [ 1, "orl", "a,@r1" ], 0x48 : [ 1, "orl", "a,r0" ], 0x49 : [ 1, "orl", "a,r1" ], 0x4a : [ 1, "orl", "a,r2" ], 0x4b : [ 1, "orl", "a,r3" ], 0x4c : [ 1, "orl", "a,r4" ], 0x4d : [ 1, "orl", "a,r5" ], 0x4e : [ 1, "orl", "a,r6" ], 0x4f : [ 1, "orl", "a,r7" ], 0x50 : [ 2, "jnc", "offset", pcr ], 0x51 : [ 2, "acall", "addr11" ], 0x52 : [ 2, "anl", "direct,a" ], 0x53 : [ 3, "anl", "direct,immed" ], 0x54 : [ 2, "anl", "a,immed" ], 0x55 : [ 2, "anl", "a,direct" ], 0x56 : [ 1, "anl", "a,@r0" ], 0x57 : [ 1, "anl", "a,@r1" ], 0x58 : [ 1, "anl", "a,r0" ], 0x59 : [ 1, "anl", "a,r1" ], 0x5a : [ 1, "anl", "a,r2" ], 0x5b : [ 1, "anl", "a,r3" ], 0x5c : [ 1, "anl", "a,r4" ], 0x5d : [ 1, "anl", "a,r5" ], 0x5e : [ 1, "anl", "a,r6" ], 0x5f : [ 1, "anl", "a,r7" ], 0x60 : [ 2, "jz", "offset", pcr ], 0x61 : [ 2, "ajmp", "addr11" ], 0x62 : [ 2, "xrl", "direct,a" ], 0x63 : [ 3, "xrl", "direct,immed" ], 0x64 : [ 2, "xrl", "a,immed" ], 0x65 : [ 2, "xrl", "a,direct" ], 0x66 : [ 1, "xrl", "a,@r0" ], 0x67 : [ 1, "xrl", "a,@r1" ], 0x68 : [ 1, "xrl", "a,r0" ], 0x69 : [ 1, "xrl", "a,r1" ], 0x6a : [ 1, "xrl", "a,r2" ], 0x6b : [ 1, "xrl", "a,r3" ], 0x6c : [ 1, "xrl", "a,r4" ], 0x6d : [ 1, "xrl", "a,r5" ], 0x6e : [ 1, "xrl", "a,r6" ], 0x6f : [ 1, "xrl", "a,r7" ], 0x70 : [ 2, "jnz", "offset", pcr ], 0x71 : [ 2, "acall", "addr11" ], 0x72 : [ 2, "orl", "c,bit" ], 0x73 : [ 1, "jmp", "@a+dptr" ], 0x74 : [ 2, "mov", "a,immed" ], 0x75 : [ 3, "mov", "direct,immed" ], 0x76 : [ 2, "mov", "@r0,immed" ], 0x77 : [ 2, "mov", "@r1,immed" ], 0x78 : [ 2, "mov", "r0,immed" ], 0x79 : [ 2, "mov", "r1,immed" ], 0x7a : [ 2, "mov", "r2,immed" ], 0x7b : [ 2, "mov", "r3,immed" ], 0x7c : [ 2, "mov", "r4,immed" ], 0x7d : [ 2, "mov", "r5,immed" ], 0x7e : [ 2, "mov", "r6,immed" ], 0x7f : [ 2, "mov", "r7,immed" ], 0x80 : [ 2, "sjmp", "offset", pcr ], 0x81 : [ 2, "ajmp", "addr11" ], 0x82 : [ 2, "anl", "c,bit" ], 0x83 : [ 1, "movc", "a,@a+pc" ], 0x84 : [ 1, "div", "ab" ], 0x85 : [ 3, "mov", "direct,direct" ], 0x86 : [ 2, "mov", "direct,@r0" ], 0x87 : [ 2, "mov", "direct,@r1" ], 0x88 : [ 2, "mov", "direct,r0" ], 0x89 : [ 2, "mov", "direct,r1" ], 0x8a : [ 2, "mov", "direct,r2" ], 0x8b : [ 2, "mov", "direct,r3" ], 0x8c : [ 2, "mov", "direct,r4" ], 0x8d : [ 2, "mov", "direct,r5" ], 0x8e : [ 2, "mov", "direct,r6" ], 0x8f : [ 2, "mov", "direct,r7" ], 0x90 : [ 3, "mov", "dptr,immed" ], 0x91 : [ 2, "acall", "addr11" ], 0x92 : [ 2, "mov", "bit,c" ], 0x93 : [ 1, "movc", "a,@a+dptr" ], 0x94 : [ 2, "subb", "a,immed" ], 0x95 : [ 2, "subb", "a,direct" ], 0x96 : [ 1, "subb", "a,@r0" ], 0x97 : [ 1, "subb", "a,@r1" ], 0x98 : [ 1, "subb", "a,r0" ], 0x99 : [ 1, "subb", "a,r1" ], 0x9a : [ 1, "subb", "a,r2" ], 0x9b : [ 1, "subb", "a,r3" ], 0x9c : [ 1, "subb", "a,r4" ], 0x9d : [ 1, "subb", "a,r5" ], 0x9e : [ 1, "subb", "a,r6" ], 0x9f : [ 1, "subb", "a,r7" ], 0xa0 : [ 2, "orl", "c,bit" ], 0xa1 : [ 2, "ajmp", "addr11" ], 0xa2 : [ 2, "mov", "c,bit" ], 0xa3 : [ 1, "inc", "dptr" ], 0xa4 : [ 1, "mul", "ab" ], 0xa6 : [ 2, "mov", "@r0,direct" ], 0xa7 : [ 2, "mov", "@r1,direct" ], 0xa8 : [ 2, "mov", "r0,direct" ], 0xa9 : [ 2, "mov", "r1,direct" ], 0xaa : [ 2, "mov", "r2,direct" ], 0xab : [ 2, "mov", "r3,direct" ], 0xac : [ 2, "mov", "r4,direct" ], 0xad : [ 2, "mov", "r5,direct" ], 0xae : [ 2, "mov", "r6,direct" ], 0xaf : [ 2, "mov", "r7,direct" ], 0xb0 : [ 2, "anl", "c,bit" ], 0xb1 : [ 2, "acall", "addr11" ], 0xb2 : [ 2, "cpl", "bit" ], 0xb3 : [ 1, "cpl", "c" ], 0xb4 : [ 3, "cjne", "a,immed,offset" ], 0xb5 : [ 3, "cjne", "a,direct,offset" ], 0xb6 : [ 3, "cjne", "@r0,immed,offset" ], 0xb7 : [ 3, "cjne", "@r1,immed,offset" ], 0xb8 : [ 3, "cjne", "r0,immed,offset" ], 0xb9 : [ 3, "cjne", "r1,immed,offset" ], 0xba : [ 3, "cjne", "r2,immed,offset" ], 0xbb : [ 3, "cjne", "r3,immed,offset" ], 0xbc : [ 3, "cjne", "r4,immed,offset" ], 0xbd : [ 3, "cjne", "r5,immed,offset" ], 0xbe : [ 3, "cjne", "r6,immed,offset" ], 0xbf : [ 3, "cjne", "r7,immed,offset" ], 0xc0 : [ 2, "push", "direct" ], 0xc1 : [ 2, "ajmp", "addr11" ], 0xc2 : [ 2, "clr", "bit" ], 0xc3 : [ 1, "clr", "c" ], 0xc4 : [ 1, "swap", "a" ], 0xc5 : [ 2, "xch", "a,direct" ], 0xc6 : [ 1, "xch", "a,@r0" ], 0xc7 : [ 1, "xch", "a,@r1" ], 0xc8 : [ 1, "xch", "a,r0" ], 0xc9 : [ 1, "xch", "a,r1" ], 0xca : [ 1, "xch", "a,r2" ], 0xcb : [ 1, "xch", "a,r3" ], 0xcc : [ 1, "xch", "a,r4" ], 0xcd : [ 1, "xch", "a,r5" ], 0xce : [ 1, "xch", "a,r6" ], 0xcf : [ 1, "xch", "a,r7" ], 0xd0 : [ 2, "pop", "direct" ], 0xd1 : [ 2, "acall", "addr11" ], 0xd2 : [ 2, "setb", "bit" ], 0xd3 : [ 1, "setb", "c" ], 0xd4 : [ 1, "da", "a" ], 0xd5 : [ 3, "djnz", "direct,offset" ], 0xd6 : [ 1, "xchd", "a,@r0" ], 0xd7 : [ 1, "xchd", "a,@r1" ], 0xd8 : [ 2, "djnz", "r0,offset" ], 0xd9 : [ 2, "djnz", "r1,offset" ], 0xda : [ 2, "djnz", "r2,offset" ], 0xdb : [ 2, "djnz", "r3,offset" ], 0xdc : [ 2, "djnz", "r4,offset" ], 0xdd : [ 2, "djnz", "r5,offset" ], 0xde : [ 2, "djnz", "r6,offset" ], 0xdf : [ 2, "djnz", "r7,offset" ], 0xe0 : [ 1, "movx", "a,@dptr" ], 0xe1 : [ 2, "ajmp", "addr11" ], 0xe2 : [ 1, "movx", "a,@r0" ], 0xe3 : [ 1, "movx", "a,@r1" ], 0xe4 : [ 1, "clr", "a" ], 0xe5 : [ 2, "mov", "a,direct" ], 0xe6 : [ 1, "mov", "a,@r0" ], 0xe7 : [ 1, "mov", "a,@r1" ], 0xe8 : [ 1, "mov", "a,r0" ], 0xe9 : [ 1, "mov", "a,r1" ], 0xea : [ 1, "mov", "a,r2" ], 0xeb : [ 1, "mov", "a,r3" ], 0xec : [ 1, "mov", "a,r4" ], 0xed : [ 1, "mov", "a,r5" ], 0xee : [ 1, "mov", "a,r6" ], 0xef : [ 1, "mov", "a,r7" ], 0xf0 : [ 1, "movx", "@dptr,a" ], 0xf1 : [ 2, "acall", "addr11" ], 0xf2 : [ 1, "movx", "@r0,a" ], 0xf3 : [ 1, "movx", "@r1,a" ], 0xf4 : [ 1, "cpl", "a" ], 0xf5 : [ 2, "mov", "direct,a" ], 0xf6 : [ 1, "mov", "@r0,a" ], 0xf7 : [ 1, "mov", "@r1,a" ], 0xf8 : [ 1, "mov", "r0,a" ], 0xf9 : [ 1, "mov", "r1,a" ], 0xfa : [ 1, "mov", "r2,a" ], 0xfb : [ 1, "mov", "r3,a" ], 0xfc : [ 1, "mov", "r4,a" ], 0xfd : [ 1, "mov", "r5,a" ], 0xfe : [ 1, "mov", "r6,a" ], 0xff : [ 1, "mov", "r7,a" ], } # End of processor specific code ##########################################################################
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A mechanism for library configuration. Whenever App Engine library code has the need for a user-configurable value, it should use the following protocol: 1. Pick a prefix unique to the library module, e.g. 'mylib'. 2. Call lib_config.register(prefix, mapping) with that prefix as the first argument and a dict mapping suffixes to default functions as the second. 3. The register() function returns a config handle unique to this prefix. The config handle object has attributes corresponding to each of the suffixes given in the mapping. Call these functions (they're not really methods even though they look like methods) to access the user's configuration value. If the user didn't configure a function, the default function from the mapping is called instead. 4. Document the function name and its signature and semantics. Users wanting to provide configuration values should create a module named appengine_config.py in the top-level directory of their application, and define functions as documented by various App Engine library components in that module. To change the configuration, edit the file and re-deploy the application. (When using the SDK, no redeployment is required: the development server will pick up the changes the next time it handles a request.) Third party libraries can also use this mechanism. For casual use, just calling the register() method with a unique prefix is okay. For carefull libraries, however, it is recommended to instantiate a new LibConfigRegistry instance using a different module name. Example appengine_config.py file: from somewhere import MyMiddleWareClass def mylib_add_middleware(app): app = MyMiddleWareClass(app) return app Example library use: from google.appengine.api import lib_config config_handle = lib_config.register( 'mylib', {'add_middleware': lambda app: app}) def add_middleware(app): return config_handle.add_middleware(app) """ __all__ = ['DEFAULT_MODNAME', 'LibConfigRegistry', 'ConfigHandle', 'register', 'main', ] import logging import os import sys import threading DEFAULT_MODNAME = 'appengine_config' class LibConfigRegistry(object): """A registry for library configuration values.""" def __init__(self, modname): """Constructor. Args: modname: The module name to be imported. Note: the actual import of this module is deferred until the first time a configuration value is requested through attribute access on a ConfigHandle instance. """ self._modname = modname self._registrations = {} self._module = None self._lock = threading.RLock() def register(self, prefix, mapping): """Register a set of configuration names. Args: prefix: A shared prefix for the configuration names being registered. If the prefix doesn't end in '_', that character is appended. mapping: A dict mapping suffix strings to default values. Returns: A ConfigHandle instance. It's okay to re-register the same prefix: the mappings are merged, and for duplicate suffixes the most recent registration wins. """ if not prefix.endswith('_'): prefix += '_' self._lock.acquire() try: handle = self._registrations.get(prefix) if handle is None: handle = ConfigHandle(prefix, self) self._registrations[prefix] = handle finally: self._lock.release() handle._update_defaults(mapping) return handle def initialize(self, import_func=__import__): """Attempt to import the config module, if not already imported. This function always sets self._module to a value unequal to None: either the imported module (if imported successfully), or a dummy object() instance (if an ImportError was raised). Other exceptions are *not* caught. When a dummy instance is used, it is also put in sys.modules. This allows us to detect when sys.modules was changed (as dev_appserver.py does when it notices source code changes) and re-try the __import__ in that case, while skipping it (for speed) if nothing has changed. Args: import_func: Used for dependency injection. """ self._lock.acquire() try: if (self._module is not None and self._module is sys.modules.get(self._modname)): return try: import_func(self._modname) except ImportError, err: if str(err) != 'No module named %s' % self._modname: raise self._module = object() sys.modules[self._modname] = self._module else: self._module = sys.modules[self._modname] finally: self._lock.release() def reset(self): """Drops the imported config module. If the config module has not been imported then this is a no-op. """ self._lock.acquire() try: if self._module is None: return self._module = None handles = self._registrations.values() finally: self._lock.release() for handle in handles: handle._clear_cache() def _pairs(self, prefix): """Generate (key, value) pairs from the config module matching prefix. Args: prefix: A prefix string ending in '_', e.g. 'mylib_'. Yields: (key, value) pairs where key is the configuration name with prefix removed, and value is the corresponding value. """ self._lock.acquire() try: mapping = getattr(self._module, '__dict__', None) if not mapping: return items = mapping.items() finally: self._lock.release() nskip = len(prefix) for key, value in items: if key.startswith(prefix): yield key[nskip:], value def _dump(self): """Print info about all registrations to stdout.""" self.initialize() handles = [] self._lock.acquire() try: if not hasattr(self._module, '__dict__'): print 'Module %s.py does not exist.' % self._modname elif not self._registrations: print 'No registrations for %s.py.' % self._modname else: print 'Registrations in %s.py:' % self._modname print '-'*40 handles = self._registrations.items() finally: self._lock.release() for _, handle in sorted(handles): handle._dump() class ConfigHandle(object): """A set of configuration for a single library module or package. Public attributes of instances of this class are configuration values. Attributes are dynamically computed (in __getattr__()) and cached as regular instance attributes. """ _initialized = False def __init__(self, prefix, registry): """Constructor. Args: prefix: A shared prefix for the configuration names being registered. It *must* end in '_'. (This is enforced by LibConfigRegistry.) registry: A LibConfigRegistry instance. """ assert prefix.endswith('_') self._prefix = prefix self._defaults = {} self._overrides = {} self._registry = registry self._lock = threading.RLock() def _update_defaults(self, mapping): """Update the default mappings. Args: mapping: A dict mapping suffix strings to default values. """ self._lock.acquire() try: for key, value in mapping.iteritems(): if key.startswith('__') and key.endswith('__'): continue self._defaults[key] = value if self._initialized: self._update_configs() finally: self._lock.release() def _update_configs(self): """Update the configuration values. This clears the cached values, initializes the registry, and loads the configuration values from the config module. """ self._lock.acquire() try: if self._initialized: self._clear_cache() self._registry.initialize() for key, value in self._registry._pairs(self._prefix): if key not in self._defaults: logging.warn('Configuration "%s" not recognized', self._prefix + key) else: self._overrides[key] = value self._initialized = True finally: self._lock.release() def _clear_cache(self): """Clear the cached values.""" self._lock.acquire() try: self._initialized = False for key in self._defaults: self._overrides.pop(key, None) try: delattr(self, key) except AttributeError: pass finally: self._lock.release() def _dump(self): """Print info about this set of registrations to stdout.""" self._lock.acquire() try: print 'Prefix %s:' % self._prefix if self._overrides: print ' Overrides:' for key in sorted(self._overrides): print ' %s = %r' % (key, self._overrides[key]) else: print ' No overrides' if self._defaults: print ' Defaults:' for key in sorted(self._defaults): print ' %s = %r' % (key, self._defaults[key]) else: print ' No defaults' print '-'*40 finally: self._lock.release() def __getattr__(self, suffix): """Dynamic attribute access. Args: suffix: The attribute name. Returns: A configuration values. Raises: AttributeError if the suffix is not a registered suffix. The first time an attribute is referenced, this method is invoked. The value returned taken either from the config module or from the registered default. """ self._lock.acquire() try: if not self._initialized: self._update_configs() if suffix in self._overrides: value = self._overrides[suffix] elif suffix in self._defaults: value = self._defaults[suffix] else: raise AttributeError(suffix) setattr(self, suffix, value) return value finally: self._lock.release() _default_registry = LibConfigRegistry(DEFAULT_MODNAME) def register(prefix, mapping): """Register a set of configurations with the default config module. Args: prefix: A shared prefix for the configuration names being registered. If the prefix doesn't end in '_', that character is appended. mapping: A dict mapping suffix strings to default values. Returns: A ConfigHandle instance. """ return _default_registry.register(prefix, mapping) def main(): """CGI-style request handler to dump the configuration. Put this in your app.yaml to enable (you can pick any URL): - url: /lib_config script: $PYTHON_LIB/google/appengine/api/lib_config.py Note: unless you are using the SDK, you must be admin. """ if not os.getenv('SERVER_SOFTWARE', '').startswith('Dev'): from google.appengine.api import users if not users.is_current_user_admin(): if users.get_current_user() is None: print 'Status: 302' print 'Location:', users.create_login_url(os.getenv('PATH_INFO', '')) else: print 'Status: 403' print print 'Forbidden' return print 'Content-type: text/plain' print _default_registry._dump() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Add tables for backfill. Revision ID: 522625f6d606 Revises: 1cdc775ca98f Create Date: 2024-08-23 14:26:08.250493 """ from __future__ import annotations import sqlalchemy as sa from alembic import op import airflow # revision identifiers, used by Alembic. revision = "522625f6d606" down_revision = "1cdc775ca98f" branch_labels = None depends_on = None airflow_version = "3.0.0" def upgrade(): """Apply Add tables for backfill.""" op.create_table( "backfill", sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), sa.Column("dag_id", sa.String(length=250), nullable=False), sa.Column("from_date", airflow.utils.sqlalchemy.UtcDateTime(timezone=True), nullable=False), sa.Column("to_date", airflow.utils.sqlalchemy.UtcDateTime(timezone=True), nullable=False), sa.Column("dag_run_conf", sa.JSON(), nullable=False), sa.Column("is_paused", sa.Boolean(), nullable=True), sa.Column("max_active_runs", sa.Integer(), nullable=False), sa.Column("created_at", airflow.utils.sqlalchemy.UtcDateTime(timezone=True), nullable=False), sa.Column("completed_at", airflow.utils.sqlalchemy.UtcDateTime(timezone=True), nullable=True), sa.Column("updated_at", airflow.utils.sqlalchemy.UtcDateTime(timezone=True), nullable=False), sa.PrimaryKeyConstraint("id", name=op.f("backfill_pkey")), ) op.create_table( "backfill_dag_run", sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), sa.Column("backfill_id", sa.Integer(), nullable=False), sa.Column("dag_run_id", sa.Integer(), nullable=True), sa.Column("sort_ordinal", sa.Integer(), nullable=False), sa.PrimaryKeyConstraint("id", name=op.f("backfill_dag_run_pkey")), sa.UniqueConstraint("backfill_id", "dag_run_id", name="ix_bdr_backfill_id_dag_run_id"), sa.ForeignKeyConstraint( ["backfill_id"], ["backfill.id"], name="bdr_backfill_fkey", ondelete="cascade" ), sa.ForeignKeyConstraint(["dag_run_id"], ["dag_run.id"], name="bdr_dag_run_fkey", ondelete="set null"), ) def downgrade(): """Unapply Add tables for backfill.""" op.drop_table("backfill_dag_run") op.drop_table("backfill")
python
github
https://github.com/apache/airflow
airflow-core/src/airflow/migrations/versions/0033_3_0_0_add_tables_for_backfill.py
# -*- encoding: utf-8 -*- """ Django settings for dp project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '@+d%g-+1%))$q!un*qx6pv&vivpcz7yzmd7#3v)56#q&-5n*&@' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = ( 'grappelli', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'corsheaders', 'social.apps.django_app.default', 'django_extensions', 'rest_framework', 'rest_framework.authtoken', 'djrill', 'tournaments', 'games', 'notifications', 'contact', 'homepage', ) MIDDLEWARE_CLASSES = ( 'corsheaders.middleware.CorsMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'dp.urls' WSGI_APPLICATION = 'dp.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases import dj_database_url DATABASES = { 'default': dj_database_url.config(default='postgres://dp:DPfutbol1983%@localhost:5432/dp') } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'es-ar' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/' STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'django.contrib.staticfiles.finders.FileSystemFinder', ) MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'templates'), ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.request', 'django.core.context_processors.static', 'django.contrib.messages.context_processors.messages', ) AUTHENTICATION_BACKENDS = ( 'social.backends.facebook.FacebookAppOAuth2', 'social.backends.facebook.FacebookOAuth2', 'django.contrib.auth.backends.ModelBackend', ) REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( #'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.TokenAuthentication', ) } CORS_ORIGIN_WHITELIST = ( 'localhost:9090', 'localhost:3000', '127.0.0.1:9000', 'dpfutbol.com', 'www.dpfutbol.com', ) CORS_ALLOW_HEADERS = ( 'x-requested-with', 'content-type', 'accept', 'origin', 'authorization', 'x-csrftoken', 'WWW-Authenticate', ) SOCIAL_AUTH_FACEBOOK_KEY = '1480234775555747' SOCIAL_AUTH_FACEBOOK_SECRET = 'ab0980264107f9856823e3650a1871da' SOCIAL_AUTH_FACEBOOK_SCOPE = ['email'] AUTH_USER_MODEL = 'games.Player' REDIS_HOST = 'localhost' REDIS_PORT = 6379 # MANDRILL MANDRILL_API_KEY = '4rbqFI0BJL8ryoHT7CRGLw' EMAIL_BACKEND = "djrill.mail.backends.djrill.DjrillBackend" # CELERY SETTINGS BROKER_URL = 'redis://localhost:6379/0' CELERY_ACCEPT_CONTENT = ['json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' GRAPPELLI_ADMIN_TITLE = u"DP Fútbol"
unknown
codeparrot/codeparrot-clean
"""Implement the rules of each Scala build utility type.""" import os import java_common as jc import shared_utils as su class Error(Exception): """Error class for this module.""" def get_scala_compile_command(rule_details, compile_libs, dir_path, file_list, warn_as_error): """Get scala compile command.""" compile_params = rule_details.get(su.COMPILE_PARAMS_KEY, []) version = rule_details.get(su.SCALA_VERSION_KEY, su.SCALA_DEFAULT_VERSION) compile_command = [su.get_scala_compiler(version)] if warn_as_error: compile_command.append('-Xfatal-warnings') else: compile_command.append('-nowarn') class_path = [] if compile_libs: class_path.extend([c for c in compile_libs if isinstance(c, str)]) class_path = ':'.join(class_path) if class_path: compile_command.extend(['-cp', class_path]) compile_command.extend(['-d', dir_path]) compile_command.extend(compile_params or []) compile_command.extend(file_list) return compile_command class ScalaCommon(jc.JavaCommon): """Common Scala handler functions.""" @classmethod def _get_all_pc_deps(cls, rule_details): """Get precompiled deps of current rule.""" pc_deps = rule_details.get(su.PC_DEPS_KEY, []) pc_deps = [su.expand_env_vars(f) for f in list(set(pc_deps))] return pc_deps @classmethod def _is_test_rule(cls, rule_details): """Just check if the given rule is a test rule.""" return rule_details[su.TYPE_KEY] == su.SCALA_TEST_TYPE @classmethod def _set_compile_command(cls, rule_details): """Set Java compile command.""" rule_details[su.COMPILE_COMMAND_KEY] = [] if not rule_details[su.SRCS_KEY]: return compile_libs = [] if rule_details[su.COMPILE_LIBS_KEY]: compile_libs = [os.path.join(rule_details[su.WDIR_CLSDEPS_KEY], '*')] compile_command = get_scala_compile_command( rule_details, compile_libs, rule_details[su.WDIR_TARGET_KEY], [su.get_relative_path(rule_details[su.POSSIBLE_PREFIXES_KEY], f) for f in rule_details[su.SRCS_KEY]], not rule_details[su.COMPILE_IGNORE_WARNINGS_KEY]) rule_details[su.COMPILE_COMMAND_KEY].append(compile_command) @classmethod def _set_test_commands(cls, rule_details, details_map): """Initializing build rule dictionary.""" # Hard coded for the time being to support ScalaTest. classpath = ["{}/*".format(rule_details[su.WDIR_CLSDEPS_KEY])] classpath.append(rule_details[su.OUT_KEY]) version = rule_details.get(su.SCALA_VERSION_KEY, su.SCALA_DEFAULT_VERSION) test_command = [su.get_scala_runtime(version), '-cp', ':'.join(classpath), 'org.scalatest.run', rule_details[su.TEST_CLASS_KEY]] rule_details[su.TEST_COMMANDS_KEY] = [test_command] @classmethod def include_deps_recursively(cls, rule_details): """Dependency graph pruning optimization.""" cls._normalize_fields(rule_details) if rule_details[su.TYPE_KEY] != su.SCALA_LIB_TYPE: return True if rule_details[su.LINK_INCLUDE_DEPS_KEY]: # If the jar built by a java library includes all its dependencies, # there is no point in including these dependencies in the all_deps key. return False return True class ScalaLibrary(ScalaCommon): """Handler class for Scala lib build rules.""" class ScalaBinary(ScalaCommon): """Handler class for Scala binary build rules.""" class ScalaTest(ScalaCommon): """Handler class for Scala test build rules."""
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Python 3.x incompatible, use import builtins instead import __builtin__ as builtins import json import unittest import mock import pytest from django.core.exceptions import ValidationError from nose.tools import * # noqa: F403 (PEP8 asserts) from framework.auth import Auth from osf_tests.factories import (AuthUserFactory, NodeLicenseRecordFactory, ProjectFactory) from tests.base import OsfTestCase from osf.utils.migrations import ensure_licenses from tests.utils import assert_logs, assert_not_logs from website import settings from osf.models.licenses import NodeLicense, serialize_node_license_record, serialize_node_license from osf.models import NodeLog from osf.exceptions import NodeStateError CHANGED_NAME = 'FOO BAR' CHANGED_TEXT = 'Some good new text' CHANGED_PROPERTIES = ['foo', 'bar'] LICENSE_TEXT = json.dumps({ 'MIT': { 'name': CHANGED_NAME, 'text': CHANGED_TEXT, 'properties': CHANGED_PROPERTIES } }) class TestNodeLicenses(OsfTestCase): def setUp(self): super(TestNodeLicenses, self).setUp() self.user = AuthUserFactory() self.node = ProjectFactory(creator=self.user) self.LICENSE_NAME = 'MIT License' self.node_license = NodeLicense.objects.get(name=self.LICENSE_NAME) self.YEAR = '2105' self.COPYRIGHT_HOLDERS = ['Foo', 'Bar'] self.node.node_license = NodeLicenseRecordFactory( node_license=self.node_license, year=self.YEAR, copyright_holders=self.COPYRIGHT_HOLDERS ) self.node.save() def test_serialize_node_license(self): serialized = serialize_node_license(self.node_license) assert_equal(serialized['name'], self.LICENSE_NAME) assert_equal(serialized['id'], self.node_license.license_id) assert_equal(serialized['text'], self.node_license.text) def test_serialize_node_license_record(self): serialized = serialize_node_license_record(self.node.node_license) assert_equal(serialized['name'], self.LICENSE_NAME) assert_equal(serialized['id'], self.node_license.license_id) assert_equal(serialized['text'], self.node_license.text) assert_equal(serialized['year'], self.YEAR) assert_equal(serialized['copyright_holders'], self.COPYRIGHT_HOLDERS) def test_serialize_node_license_record_None(self): self.node.node_license = None serialized = serialize_node_license_record(self.node.node_license) assert_equal(serialized, {}) def test_copy_node_license_record(self): record = self.node.node_license copied = record.copy() assert_is_not_none(copied._id) assert_not_equal(record._id, copied._id) for prop in ('license_id', 'name', 'node_license'): assert_equal(getattr(record, prop), getattr(copied, prop)) @pytest.mark.enable_implicit_clean def test_license_uniqueness_on_id_is_enforced_in_the_database(self): NodeLicense(license_id='foo', name='bar', text='baz').save() assert_raises(ValidationError, NodeLicense(license_id='foo', name='buz', text='boo').save) def test_ensure_licenses_updates_existing_licenses(self): assert_equal(ensure_licenses(), (0, 18)) def test_ensure_licenses_no_licenses(self): before_count = NodeLicense.objects.all().count() NodeLicense.objects.all().delete() assert_false(NodeLicense.objects.all().count()) ensure_licenses() assert_equal(before_count, NodeLicense.objects.all().count()) def test_ensure_licenses_some_missing(self): NodeLicense.objects.get(license_id='LGPL3').delete() with assert_raises(NodeLicense.DoesNotExist): NodeLicense.objects.get(license_id='LGPL3') ensure_licenses() found = NodeLicense.objects.get(license_id='LGPL3') assert_is_not_none(found) def test_ensure_licenses_updates_existing(self): with mock.patch.object(builtins, 'open', mock.mock_open(read_data=LICENSE_TEXT)): ensure_licenses() MIT = NodeLicense.objects.get(license_id='MIT') assert_equal(MIT.name, CHANGED_NAME) assert_equal(MIT.text, CHANGED_TEXT) assert_equal(MIT.properties, CHANGED_PROPERTIES) @assert_logs(NodeLog.CHANGED_LICENSE, 'node') def test_Node_set_node_license(self): GPL3 = NodeLicense.objects.get(license_id='GPL3') NEW_YEAR = '2014' COPYLEFT_HOLDERS = ['Richard Stallman'] self.node.set_node_license( { 'id': GPL3.license_id, 'year': NEW_YEAR, 'copyrightHolders': COPYLEFT_HOLDERS }, auth=Auth(self.user), save=True ) assert_equal(self.node.node_license.license_id, GPL3.license_id) assert_equal(self.node.node_license.name, GPL3.name) assert_equal(self.node.node_license.copyright_holders, COPYLEFT_HOLDERS) @assert_not_logs(NodeLog.CHANGED_LICENSE, 'node') def test_Node_set_node_license_invalid(self): with assert_raises(NodeStateError): self.node.set_node_license( { 'id': 'SOME ID', 'year': 'foo', 'copyrightHolders': [] }, auth=Auth(self.user) )
unknown
codeparrot/codeparrot-clean
# (C) Copyright 2017, 2019-2021 by Rocky Bernstein # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ Common routines for entering and classifiying opcodes. Inspired by, limited by, and somewhat compatible with the corresponding Python opcode.py structures """ from copy import deepcopy from xdis.cross_dis import ( findlinestarts, findlabels, get_jump_target_maps, get_jump_targets, ) from xdis import wordcode from xdis import IS_PYPY, PYTHON_VERSION cmp_op = ( "<", "<=", "==", "!=", ">", ">=", "in", "not-in", "is", "is-not", "exception-match", "BAD", ) # Opcodes greater than 90 take an instruction operand or "argument" # as opcode.py likes to call it. HAVE_ARGUMENT = 90 fields2copy = """ hascompare hascondition hasconst hasfree hasjabs hasjrel haslocal hasname hasnargs hasstore hasvargs oppop oppush nofollow """.split() def init_opdata(l, from_mod, version=None, is_pypy=False): """Sets up a number of the structures found in Python's opcode.py. Python opcode.py routines assign attributes to modules. In order to do this in a modular way here, the local dictionary for the module is passed. """ if version: l["python_version"] = version l["is_pypy"] = is_pypy l["cmp_op"] = cmp_op l["HAVE_ARGUMENT"] = HAVE_ARGUMENT l["findlinestarts"] = findlinestarts if version <= 3.5: l["findlabels"] = findlabels l["get_jump_targets"] = get_jump_targets l["get_jump_target_maps"] = get_jump_target_maps else: l["findlabels"] = wordcode.findlabels l["get_jump_targets"] = wordcode.get_jump_targets l["get_jump_target_maps"] = wordcode.get_jump_target_maps l["opmap"] = deepcopy(from_mod.opmap) l["opname"] = deepcopy(from_mod.opname) for field in fields2copy: l[field] = list(getattr(from_mod, field)) def compare_op(l, name, op, pop=2, push=1): def_op(l, name, op, pop, push) l["hascompare"].append(op) def conditional_op(l, name, op): l["hascompare"].append(op) def const_op(l, name, op, pop=0, push=1): def_op(l, name, op, pop, push) l["hasconst"].append(op) def def_op(l, op_name, opcode, pop=-2, push=-2, fallthrough=True): l["opname"][opcode] = op_name l["opmap"][op_name] = opcode l["oppush"][opcode] = push l["oppop"][opcode] = pop if not fallthrough: l["nofollow"].append(opcode) def free_op(l, name, op, pop=0, push=1): def_op(l, name, op, pop, push) l["hasfree"].append(op) def jabs_op(l, name, op, pop=0, push=0, conditional=False, fallthrough=True): def_op(l, name, op, pop, push, fallthrough=fallthrough) l["hasjabs"].append(op) if conditional: l["hascondition"].append(op) def jrel_op(l, name, op, pop=0, push=0, conditional=False, fallthrough=True): def_op(l, name, op, pop, push) l["hasjrel"].append(op) if conditional: l["hascondition"].append(op) def local_op(l, name, op, pop=0, push=1): def_op(l, name, op, pop, push) l["haslocal"].append(op) def name_op(l, op_name, op_code, pop=-2, push=-2): def_op(l, op_name, op_code, pop, push) l["hasname"].append(op_code) def nargs_op(l, name, op, pop=-2, push=-2, fallthrough=True): def_op(l, name, op, pop, push, fallthrough=fallthrough) l["hasnargs"].append(op) def rm_op(l, name, op): """Remove an opcode. This is used when basing a new Python release off of another one, and there is an opcode that is in the old release that was removed in the new release. We are pretty aggressive about removing traces of the op. """ # opname is an array, so we need to keep the position in there. l["opname"][op] = "<%s>" % op if op in l["hasconst"]: l["hasconst"].remove(op) if op in l["hascompare"]: l["hascompare"].remove(op) if op in l["hascondition"]: l["hascondition"].remove(op) if op in l["hasfree"]: l["hasfree"].remove(op) if op in l["hasjabs"]: l["hasjabs"].remove(op) if op in l["hasname"]: l["hasname"].remove(op) if op in l["hasjrel"]: l["hasjrel"].remove(op) if op in l["haslocal"]: l["haslocal"].remove(op) if op in l["hasname"]: l["hasname"].remove(op) if op in l["hasnargs"]: l["hasnargs"].remove(op) if op in l["hasvargs"]: l["hasvargs"].remove(op) if op in l["nofollow"]: l["nofollow"].remove(op) assert l["opmap"][name] == op del l["opmap"][name] def store_op(l, name, op, pop=0, push=1, is_type="def"): if is_type == "name": name_op(l, name, op, pop, push) elif is_type == "local": local_op(l, name, op, pop, push) elif is_type == "free": free_op(l, name, op, pop, push) else: assert is_type == "def" def_op(l, name, op, pop, push) l["hasstore"].append(op) # This is not in Python. The operand indicates how # items on the pop from the stack. BUILD_TUPLE_UNPACK # is line this. def varargs_op(l, op_name, op_code, pop=-1, push=1): def_op(l, op_name, op_code, pop, push) l["hasvargs"].append(op_code) # Some of the convoluted code below reflects some of the # many Python idiocies over the years. def finalize_opcodes(l): # Not sure why, but opcode.py address has opcode.EXTENDED_ARG # as well as opmap['EXTENDED_ARG'] l["EXTENDED_ARG"] = l["opmap"]["EXTENDED_ARG"] # In Python 3.6+ this is 8, but we expect # those opcodes to set that if "EXTENDED_ARG_SHIFT" not in l: l["EXTENDED_ARG_SHIFT"] = 16 l["ARG_MAX_VALUE"] = (1 << l["EXTENDED_ARG_SHIFT"]) - 1 l["EXTENDED_ARG"] = l["opmap"]["EXTENDED_ARG"] l["opmap"] = fix_opcode_names(l["opmap"]) # Now add in the attributes into the module for op in l["opmap"]: l[op] = l["opmap"][op] l["JUMP_OPs"] = frozenset(l["hasjrel"] + l["hasjabs"]) l["NOFOLLOW"] = frozenset(l["nofollow"]) opcode_check(l) return def fix_opcode_names(opmap): """ Python stupidly named some OPCODES with a + which prevents using opcode name directly as an attribute, e.g. SLICE+3. So we turn that into SLICE_3 so we can then use opcode_23.SLICE_3. Later Python's fix this. """ return dict([(k.replace("+", "_"), v) for (k, v) in opmap.items()]) def update_pj3(g, l): g.update({"PJIF": l["opmap"]["POP_JUMP_IF_FALSE"]}) g.update({"PJIT": l["opmap"]["POP_JUMP_IF_TRUE"]}) update_sets(l) def update_pj2(g, l): g.update({"PJIF": l["opmap"]["JUMP_IF_FALSE"]}) g.update({"PJIT": l["opmap"]["JUMP_IF_TRUE"]}) update_sets(l) def update_sets(l): l["COMPARE_OPS"] = frozenset(l["hascompare"]) l["CONDITION_OPS"] = frozenset(l["hascondition"]) l["CONST_OPS"] = frozenset(l["hasconst"]) l["FREE_OPS"] = frozenset(l["hasfree"]) l["JREL_OPS"] = frozenset(l["hasjrel"]) l["JABS_OPS"] = frozenset(l["hasjabs"]) l["JUMP_UNCONDITONAL"] = frozenset( [l["opmap"]["JUMP_ABSOLUTE"], l["opmap"]["JUMP_FORWARD"]] ) if l["python_version"] < 3.8: l["LOOP_OPS"] = frozenset([l["opmap"]["SETUP_LOOP"]]) else: l["LOOP_OPS"] = frozenset() l["LOCAL_OPS"] = frozenset(l["haslocal"]) l["JUMP_OPS"] = ( l["JABS_OPS"] | l["JREL_OPS"] | l["LOOP_OPS"] | l["JUMP_UNCONDITONAL"] ) l["NAME_OPS"] = frozenset(l["hasname"]) l["NARGS_OPS"] = frozenset(l["hasnargs"]) l["VARGS_OPS"] = frozenset(l["hasvargs"]) l["STORE_OPS"] = frozenset(l["hasstore"]) def extended_format_CALL_FUNCTION(opc, instructions): """call_function_inst should be a "CALL_FUNCTION_KW" instruction. Look in `instructions` to see if we can find a method name. If not we'll return None. """ # From opcode description: argc indicates the total number of positional and keyword arguments. # Sometimes the function name is in the stack arg positions back. call_function_inst = instructions[0] assert call_function_inst.opname == "CALL_FUNCTION" argc = call_function_inst.arg ( name_default, pos_args, ) = divmod(argc, 256) function_pos = pos_args + name_default * 2 + 1 assert len(instructions) >= function_pos + 1 for i, inst in enumerate(instructions[1:]): if i + 1 == function_pos: i += 1 break if inst.is_jump_target: i += 1 break # Make sure we are in the same basic block # and ... ? opcode = inst.opcode if inst.optype in ("nargs", "vargs"): break if inst.opname == "LOAD_ATTR" or inst.optype != "name": function_pos += (opc.oppop[opcode] - opc.oppush[opcode]) + 1 if inst.opname in ("CALL_FUNCTION", "CALL_FUNCTION_KW"): break pass s = "" if i == function_pos: if instructions[function_pos].opname in ( "LOAD_CONST", "LOAD_GLOBAL", "LOAD_ATTR", "LOAD_NAME", ): s = resolved_attrs(instructions[function_pos:]) s += ": " pass pass s += format_CALL_FUNCTION_pos_name_encoded(call_function_inst.arg) return s def resolved_attrs(instructions): resolved = [] for inst in instructions: name = inst.argrepr if name: if name[0] == "'" and name[-1] == "'": name = name[1:-1] else: name = "" resolved.append(name) if inst.opname != "LOAD_ATTR": break return ".".join(reversed(resolved)) def extended_format_ATTR(opc, instructions): if instructions[1].opname in ( "LOAD_CONST", "LOAD_GLOBAL", "LOAD_ATTR", "LOAD_NAME", ): return "%s.%s" % (instructions[1].argrepr, instructions[0].argrepr) def extended_format_MAKE_FUNCTION_older(opc, instructions): """make_function_inst should be a "MAKE_FUNCTION" or "MAKE_CLOSURE" instruction. TOS should have the function or closure name. """ # From opcode description: argc indicates the total number of positional and keyword arguments. # Sometimes the function name is in the stack arg positions back. assert len(instructions) >= 2 inst = instructions[0] assert inst.opname in ("MAKE_FUNCTION", "MAKE_CLOSURE") s = "" code_inst = instructions[1] if code_inst.opname == "LOAD_CONST" and hasattr(code_inst.argval, "co_name"): s += "%s: " % code_inst.argval.co_name pass s += format_MAKE_FUNCTION_default_argc(inst.arg) return s def extended_format_RAISE_VARARGS_older(opc, instructions): raise_inst = instructions[0] assert raise_inst.opname == "RAISE_VARARGS" assert len(instructions) >= 1 if instructions[1].opname in ( "LOAD_CONST", "LOAD_GLOBAL", "LOAD_ATTR", "LOAD_NAME", ): return resolved_attrs(instructions[1:]) return format_RAISE_VARARGS_older(raise_inst.argval) def extended_format_RETURN_VALUE(opc, instructions): return_inst = instructions[0] assert return_inst.opname == "RETURN_VALUE" assert len(instructions) >= 1 if instructions[1].opname in ( "LOAD_CONST", "LOAD_GLOBAL", "LOAD_ATTR", "LOAD_NAME", ): return resolved_attrs(instructions[1:]) return None def format_extended_arg(arg): return str(arg * (1 << 16)) def format_CALL_FUNCTION_pos_name_encoded(argc): """Encoded positional and named args. Used to up to about 3.6 where wordcodes are used and a different encoding occurs. Pypy36 though sticks to this encoded version though.""" name_default, pos_args = divmod(argc, 256) return "%d positional, %d named" % (pos_args, name_default) # After Python 3.2 def format_MAKE_FUNCTION_arg(argc): name_and_annotate, pos_args = divmod(argc, 256) annotate_args, name_default = divmod(name_and_annotate, 256) return "%d positional, %d name and default, %d annotations" % ( pos_args, name_default, annotate_args, ) # Up to and including Python 3.2 def format_MAKE_FUNCTION_default_argc(argc): return "%d default parameters" % argc # Up until 3.7 def format_RAISE_VARARGS_older(argc): assert 0 <= argc <= 3 if argc == 0: return "reraise" elif argc == 1: return "exception" elif argc == 2: return "exception, parameter" elif argc == 3: return "exception, parameter, traceback" def opcode_check(l): """When the version of Python we are running happens to have the same opcode set as the opcode we are importing, we perform checks to make sure our opcode set matches exactly. """ # Python 2.6 reports 2.6000000000000001 if abs(PYTHON_VERSION - l["python_version"]) <= 0.01 and IS_PYPY == l["is_pypy"]: try: import dis opmap = fix_opcode_names(dis.opmap) # print(set(opmap.items()) - set(l['opmap'].items())) # print(set(l['opmap'].items()) - set(opmap.items())) assert all(item in opmap.items() for item in l["opmap"].items()) assert all(item in l["opmap"].items() for item in opmap.items()) except: pass def dump_opcodes(opmap): """Utility for dumping opcodes""" op2name = {} for k in opmap.keys(): op2name[opmap[k]] = k for i in sorted(op2name.keys()): print("%-3s %s" % (str(i), op2name[i]))
unknown
codeparrot/codeparrot-clean
#include "perf_precomp.hpp" namespace opencv_test { using namespace perf; CV_ENUM(pnpAlgo, SOLVEPNP_ITERATIVE, SOLVEPNP_EPNP, SOLVEPNP_P3P, SOLVEPNP_DLS, SOLVEPNP_UPNP) typedef tuple<int, pnpAlgo> PointsNum_Algo_t; typedef perf::TestBaseWithParam<PointsNum_Algo_t> PointsNum_Algo; typedef perf::TestBaseWithParam<int> PointsNum; PERF_TEST_P(PointsNum_Algo, solvePnP, testing::Combine( //When non planar, DLT needs at least 6 points for SOLVEPNP_ITERATIVE flag testing::Values(6, 3*9, 7*13), //TODO: find why results on 4 points are too unstable testing::Values((int)SOLVEPNP_ITERATIVE, (int)SOLVEPNP_EPNP, (int)SOLVEPNP_UPNP, (int)SOLVEPNP_DLS) ) ) { int pointsNum = get<0>(GetParam()); pnpAlgo algo = get<1>(GetParam()); vector<Point2f> points2d(pointsNum); vector<Point3f> points3d(pointsNum); Mat rvec = Mat::zeros(3, 1, CV_32FC1); Mat tvec = Mat::zeros(3, 1, CV_32FC1); Mat distortion = Mat::zeros(5, 1, CV_32FC1); Mat intrinsics = Mat::eye(3, 3, CV_32FC1); intrinsics.at<float> (0, 0) = 400.0; intrinsics.at<float> (1, 1) = 400.0; intrinsics.at<float> (0, 2) = 640 / 2; intrinsics.at<float> (1, 2) = 480 / 2; warmup(points3d, WARMUP_RNG); warmup(rvec, WARMUP_RNG); warmup(tvec, WARMUP_RNG); projectPoints(points3d, rvec, tvec, intrinsics, distortion, points2d); //add noise Mat noise(1, (int)points2d.size(), CV_32FC2); randu(noise, 0, 0.01); cv::add(points2d, noise, points2d); declare.in(points3d, points2d); declare.time(100); TEST_CYCLE_N(1000) { cv::solvePnP(points3d, points2d, intrinsics, distortion, rvec, tvec, false, algo); } SANITY_CHECK(rvec, 1e-4); SANITY_CHECK(tvec, 1e-4); } PERF_TEST_P(PointsNum_Algo, solvePnPSmallPoints, testing::Combine( testing::Values(5), testing::Values((int)SOLVEPNP_P3P, (int)SOLVEPNP_EPNP, (int)SOLVEPNP_DLS, (int)SOLVEPNP_UPNP) ) ) { int pointsNum = get<0>(GetParam()); pnpAlgo algo = get<1>(GetParam()); if( algo == SOLVEPNP_P3P ) pointsNum = 4; vector<Point2f> points2d(pointsNum); vector<Point3f> points3d(pointsNum); Mat rvec = Mat::zeros(3, 1, CV_32FC1); Mat tvec = Mat::zeros(3, 1, CV_32FC1); Mat distortion = Mat::zeros(5, 1, CV_32FC1); Mat intrinsics = Mat::eye(3, 3, CV_32FC1); intrinsics.at<float> (0, 0) = 400.0f; intrinsics.at<float> (1, 1) = 400.0f; intrinsics.at<float> (0, 2) = 640 / 2; intrinsics.at<float> (1, 2) = 480 / 2; warmup(points3d, WARMUP_RNG); warmup(rvec, WARMUP_RNG); warmup(tvec, WARMUP_RNG); // normalize Rodrigues vector Mat rvec_tmp = Mat::eye(3, 3, CV_32F); cv::Rodrigues(rvec, rvec_tmp); cv::Rodrigues(rvec_tmp, rvec); cv::projectPoints(points3d, rvec, tvec, intrinsics, distortion, points2d); //add noise Mat noise(1, (int)points2d.size(), CV_32FC2); randu(noise, -0.001, 0.001); cv::add(points2d, noise, points2d); declare.in(points3d, points2d); declare.time(100); TEST_CYCLE_N(1000) { cv::solvePnP(points3d, points2d, intrinsics, distortion, rvec, tvec, false, algo); } SANITY_CHECK(rvec, 1e-1); SANITY_CHECK(tvec, 1e-2); } PERF_TEST_P(PointsNum, DISABLED_SolvePnPRansac, testing::Values(5, 3*9, 7*13)) { int count = GetParam(); Mat object(1, count, CV_32FC3); randu(object, -100, 100); Mat camera_mat(3, 3, CV_32FC1); randu(camera_mat, 0.5, 1); camera_mat.at<float>(0, 1) = 0.f; camera_mat.at<float>(1, 0) = 0.f; camera_mat.at<float>(2, 0) = 0.f; camera_mat.at<float>(2, 1) = 0.f; Mat dist_coef(1, 8, CV_32F, cv::Scalar::all(0)); vector<cv::Point2f> image_vec; Mat rvec_gold(1, 3, CV_32FC1); randu(rvec_gold, 0, 1); Mat tvec_gold(1, 3, CV_32FC1); randu(tvec_gold, 0, 1); projectPoints(object, rvec_gold, tvec_gold, camera_mat, dist_coef, image_vec); Mat image(1, count, CV_32FC2, &image_vec[0]); Mat rvec; Mat tvec; TEST_CYCLE() { cv::solvePnPRansac(object, image, camera_mat, dist_coef, rvec, tvec); } SANITY_CHECK(rvec, 1e-6); SANITY_CHECK(tvec, 1e-6); } } // namespace
cpp
github
https://github.com/opencv/opencv
modules/calib3d/perf/perf_pnp.cpp
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package io_test import ( "bytes" "fmt" . "io" "slices" "strings" "testing" "time" ) func checkWrite(t *testing.T, w Writer, data []byte, c chan int) { n, err := w.Write(data) if err != nil { t.Errorf("write: %v", err) } if n != len(data) { t.Errorf("short write: %d != %d", n, len(data)) } c <- 0 } // Test a single read/write pair. func TestPipe1(t *testing.T) { c := make(chan int) r, w := Pipe() var buf = make([]byte, 64) go checkWrite(t, w, []byte("hello, world"), c) n, err := r.Read(buf) if err != nil { t.Errorf("read: %v", err) } else if n != 12 || string(buf[0:12]) != "hello, world" { t.Errorf("bad read: got %q", buf[0:n]) } <-c r.Close() w.Close() } func reader(t *testing.T, r Reader, c chan int) { var buf = make([]byte, 64) for { n, err := r.Read(buf) if err == EOF { c <- 0 break } if err != nil { t.Errorf("read: %v", err) } c <- n } } // Test a sequence of read/write pairs. func TestPipe2(t *testing.T) { c := make(chan int) r, w := Pipe() go reader(t, r, c) var buf = make([]byte, 64) for i := 0; i < 5; i++ { p := buf[0 : 5+i*10] n, err := w.Write(p) if n != len(p) { t.Errorf("wrote %d, got %d", len(p), n) } if err != nil { t.Errorf("write: %v", err) } nn := <-c if nn != n { t.Errorf("wrote %d, read got %d", n, nn) } } w.Close() nn := <-c if nn != 0 { t.Errorf("final read got %d", nn) } } type pipeReturn struct { n int err error } // Test a large write that requires multiple reads to satisfy. func writer(w WriteCloser, buf []byte, c chan pipeReturn) { n, err := w.Write(buf) w.Close() c <- pipeReturn{n, err} } func TestPipe3(t *testing.T) { c := make(chan pipeReturn) r, w := Pipe() var wdat = make([]byte, 128) for i := 0; i < len(wdat); i++ { wdat[i] = byte(i) } go writer(w, wdat, c) var rdat = make([]byte, 1024) tot := 0 for n := 1; n <= 256; n *= 2 { nn, err := r.Read(rdat[tot : tot+n]) if err != nil && err != EOF { t.Fatalf("read: %v", err) } // only final two reads should be short - 1 byte, then 0 expect := n if n == 128 { expect = 1 } else if n == 256 { expect = 0 if err != EOF { t.Fatalf("read at end: %v", err) } } if nn != expect { t.Fatalf("read %d, expected %d, got %d", n, expect, nn) } tot += nn } pr := <-c if pr.n != 128 || pr.err != nil { t.Fatalf("write 128: %d, %v", pr.n, pr.err) } if tot != 128 { t.Fatalf("total read %d != 128", tot) } for i := 0; i < 128; i++ { if rdat[i] != byte(i) { t.Fatalf("rdat[%d] = %d", i, rdat[i]) } } } // Test read after/before writer close. type closer interface { CloseWithError(error) error Close() error } type pipeTest struct { async bool err error closeWithError bool } func (p pipeTest) String() string { return fmt.Sprintf("async=%v err=%v closeWithError=%v", p.async, p.err, p.closeWithError) } var pipeTests = []pipeTest{ {true, nil, false}, {true, nil, true}, {true, ErrShortWrite, true}, {false, nil, false}, {false, nil, true}, {false, ErrShortWrite, true}, } func delayClose(t *testing.T, cl closer, ch chan int, tt pipeTest) { time.Sleep(1 * time.Millisecond) var err error if tt.closeWithError { err = cl.CloseWithError(tt.err) } else { err = cl.Close() } if err != nil { t.Errorf("delayClose: %v", err) } ch <- 0 } func TestPipeReadClose(t *testing.T) { for _, tt := range pipeTests { c := make(chan int, 1) r, w := Pipe() if tt.async { go delayClose(t, w, c, tt) } else { delayClose(t, w, c, tt) } var buf = make([]byte, 64) n, err := r.Read(buf) <-c want := tt.err if want == nil { want = EOF } if err != want { t.Errorf("read from closed pipe: %v want %v", err, want) } if n != 0 { t.Errorf("read on closed pipe returned %d", n) } if err = r.Close(); err != nil { t.Errorf("r.Close: %v", err) } } } // Test close on Read side during Read. func TestPipeReadClose2(t *testing.T) { c := make(chan int, 1) r, _ := Pipe() go delayClose(t, r, c, pipeTest{}) n, err := r.Read(make([]byte, 64)) <-c if n != 0 || err != ErrClosedPipe { t.Errorf("read from closed pipe: %v, %v want %v, %v", n, err, 0, ErrClosedPipe) } } // Test write after/before reader close. func TestPipeWriteClose(t *testing.T) { for _, tt := range pipeTests { c := make(chan int, 1) r, w := Pipe() if tt.async { go delayClose(t, r, c, tt) } else { delayClose(t, r, c, tt) } n, err := WriteString(w, "hello, world") <-c expect := tt.err if expect == nil { expect = ErrClosedPipe } if err != expect { t.Errorf("write on closed pipe: %v want %v", err, expect) } if n != 0 { t.Errorf("write on closed pipe returned %d", n) } if err = w.Close(); err != nil { t.Errorf("w.Close: %v", err) } } } // Test close on Write side during Write. func TestPipeWriteClose2(t *testing.T) { c := make(chan int, 1) _, w := Pipe() go delayClose(t, w, c, pipeTest{}) n, err := w.Write(make([]byte, 64)) <-c if n != 0 || err != ErrClosedPipe { t.Errorf("write to closed pipe: %v, %v want %v, %v", n, err, 0, ErrClosedPipe) } } func TestWriteEmpty(t *testing.T) { r, w := Pipe() go func() { w.Write([]byte{}) w.Close() }() var b [2]byte ReadFull(r, b[0:2]) r.Close() } func TestWriteNil(t *testing.T) { r, w := Pipe() go func() { w.Write(nil) w.Close() }() var b [2]byte ReadFull(r, b[0:2]) r.Close() } func TestWriteAfterWriterClose(t *testing.T) { r, w := Pipe() defer r.Close() done := make(chan bool) var writeErr error go func() { _, err := w.Write([]byte("hello")) if err != nil { t.Errorf("got error: %q; expected none", err) } w.Close() _, writeErr = w.Write([]byte("world")) done <- true }() buf := make([]byte, 100) var result string n, err := ReadFull(r, buf) if err != nil && err != ErrUnexpectedEOF { t.Fatalf("got: %q; want: %q", err, ErrUnexpectedEOF) } result = string(buf[0:n]) <-done if result != "hello" { t.Errorf("got: %q; want: %q", result, "hello") } if writeErr != ErrClosedPipe { t.Errorf("got: %q; want: %q", writeErr, ErrClosedPipe) } } func TestPipeCloseError(t *testing.T) { type testError1 struct{ error } type testError2 struct{ error } r, w := Pipe() r.CloseWithError(testError1{}) if _, err := w.Write(nil); err != (testError1{}) { t.Errorf("Write error: got %T, want testError1", err) } r.CloseWithError(testError2{}) if _, err := w.Write(nil); err != (testError1{}) { t.Errorf("Write error: got %T, want testError1", err) } r, w = Pipe() w.CloseWithError(testError1{}) if _, err := r.Read(nil); err != (testError1{}) { t.Errorf("Read error: got %T, want testError1", err) } w.CloseWithError(testError2{}) if _, err := r.Read(nil); err != (testError1{}) { t.Errorf("Read error: got %T, want testError1", err) } } func TestPipeConcurrent(t *testing.T) { const ( input = "0123456789abcdef" count = 8 readSize = 2 ) t.Run("Write", func(t *testing.T) { r, w := Pipe() for i := 0; i < count; i++ { go func() { time.Sleep(time.Millisecond) // Increase probability of race if n, err := w.Write([]byte(input)); n != len(input) || err != nil { t.Errorf("Write() = (%d, %v); want (%d, nil)", n, err, len(input)) } }() } buf := make([]byte, count*len(input)) for i := 0; i < len(buf); i += readSize { if n, err := r.Read(buf[i : i+readSize]); n != readSize || err != nil { t.Errorf("Read() = (%d, %v); want (%d, nil)", n, err, readSize) } } // Since each Write is fully gated, if multiple Read calls were needed, // the contents of Write should still appear together in the output. got := string(buf) want := strings.Repeat(input, count) if got != want { t.Errorf("got: %q; want: %q", got, want) } }) t.Run("Read", func(t *testing.T) { r, w := Pipe() c := make(chan []byte, count*len(input)/readSize) for i := 0; i < cap(c); i++ { go func() { time.Sleep(time.Millisecond) // Increase probability of race buf := make([]byte, readSize) if n, err := r.Read(buf); n != readSize || err != nil { t.Errorf("Read() = (%d, %v); want (%d, nil)", n, err, readSize) } c <- buf }() } for i := 0; i < count; i++ { if n, err := w.Write([]byte(input)); n != len(input) || err != nil { t.Errorf("Write() = (%d, %v); want (%d, nil)", n, err, len(input)) } } // Since each read is independent, the only guarantee about the output // is that it is a permutation of the input in readSized groups. got := make([]byte, 0, count*len(input)) for i := 0; i < cap(c); i++ { got = append(got, (<-c)...) } got = sortBytesInGroups(got, readSize) want := bytes.Repeat([]byte(input), count) want = sortBytesInGroups(want, readSize) if string(got) != string(want) { t.Errorf("got: %q; want: %q", got, want) } }) } func sortBytesInGroups(b []byte, n int) []byte { var groups [][]byte for len(b) > 0 { groups = append(groups, b[:n]) b = b[n:] } slices.SortFunc(groups, bytes.Compare) return bytes.Join(groups, nil) } var ( rSink *PipeReader wSink *PipeWriter ) func TestPipeAllocations(t *testing.T) { numAllocs := testing.AllocsPerRun(10, func() { rSink, wSink = Pipe() }) // go.dev/cl/473535 claimed Pipe() should only do 2 allocations, // plus the 2 escaping to heap for simulating real world usages. expectedAllocs := 4 if int(numAllocs) > expectedAllocs { t.Fatalf("too many allocations for io.Pipe() call: %f", numAllocs) } }
go
github
https://github.com/golang/go
src/io/pipe_test.go
# Copyright (c) 2022 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) DOCUMENTATION: module: in_function short_description: Short description for in_function module description: - Description for in_function module options: test: description: Description for test type: str author: - Ansible Core Team EXAMPLES: | - name: example for sidecar ns.col.in_function: RETURN: test: description: The test return value returned: always type: str sample: abc
unknown
github
https://github.com/ansible/ansible
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.yml
# BER encoder from pyasn1.type import base, tag, univ, char, useful from pyasn1.codec.ber import eoo from pyasn1.compat.octets import int2oct, oct2int, ints2octs, null, str2octs from pyasn1 import debug, error class Error(Exception): pass class AbstractItemEncoder: supportIndefLenMode = 1 def encodeTag(self, t, isConstructed): tagClass, tagFormat, tagId = t.asTuple() # this is a hotspot v = tagClass | tagFormat if isConstructed: v = v|tag.tagFormatConstructed if tagId < 31: return int2oct(v|tagId) else: s = int2oct(tagId&0x7f) tagId = tagId >> 7 while tagId: s = int2oct(0x80|(tagId&0x7f)) + s tagId = tagId >> 7 return int2oct(v|0x1F) + s def encodeLength(self, length, defMode): if not defMode and self.supportIndefLenMode: return int2oct(0x80) if length < 0x80: return int2oct(length) else: substrate = null while length: substrate = int2oct(length&0xff) + substrate length = length >> 8 substrateLen = len(substrate) if substrateLen > 126: raise Error('Length octets overflow (%d)' % substrateLen) return int2oct(0x80 | substrateLen) + substrate def encodeValue(self, encodeFun, value, defMode, maxChunkSize): raise Error('Not implemented') def _encodeEndOfOctets(self, encodeFun, defMode): if defMode or not self.supportIndefLenMode: return null else: return encodeFun(eoo.endOfOctets, defMode) def encode(self, encodeFun, value, defMode, maxChunkSize): substrate, isConstructed = self.encodeValue( encodeFun, value, defMode, maxChunkSize ) tagSet = value.getTagSet() if tagSet: if not isConstructed: # primitive form implies definite mode defMode = 1 return self.encodeTag( tagSet[-1], isConstructed ) + self.encodeLength( len(substrate), defMode ) + substrate + self._encodeEndOfOctets(encodeFun, defMode) else: return substrate # untagged value class EndOfOctetsEncoder(AbstractItemEncoder): def encodeValue(self, encodeFun, value, defMode, maxChunkSize): return null, 0 class ExplicitlyTaggedItemEncoder(AbstractItemEncoder): def encodeValue(self, encodeFun, value, defMode, maxChunkSize): if isinstance(value, base.AbstractConstructedAsn1Item): value = value.clone(tagSet=value.getTagSet()[:-1], cloneValueFlag=1) else: value = value.clone(tagSet=value.getTagSet()[:-1]) return encodeFun(value, defMode, maxChunkSize), 1 explicitlyTaggedItemEncoder = ExplicitlyTaggedItemEncoder() class BooleanEncoder(AbstractItemEncoder): supportIndefLenMode = 0 _true = ints2octs((1,)) _false = ints2octs((0,)) def encodeValue(self, encodeFun, value, defMode, maxChunkSize): return value and self._true or self._false, 0 class IntegerEncoder(AbstractItemEncoder): supportIndefLenMode = 0 supportCompactZero = False def encodeValue(self, encodeFun, value, defMode, maxChunkSize): if value == 0: # shortcut for zero value if self.supportCompactZero: # this seems to be a correct way for encoding zeros return null, 0 else: # this seems to be a widespread way for encoding zeros return ints2octs((0,)), 0 octets = [] value = int(value) # to save on ops on asn1 type while 1: octets.insert(0, value & 0xff) if value == 0 or value == -1: break value = value >> 8 if value == 0 and octets[0] & 0x80: octets.insert(0, 0) while len(octets) > 1 and \ (octets[0] == 0 and octets[1] & 0x80 == 0 or \ octets[0] == 0xff and octets[1] & 0x80 != 0): del octets[0] return ints2octs(octets), 0 class BitStringEncoder(AbstractItemEncoder): def encodeValue(self, encodeFun, value, defMode, maxChunkSize): if not maxChunkSize or len(value) <= maxChunkSize*8: r = {}; l = len(value); p = 0; j = 7 while p < l: i, j = divmod(p, 8) r[i] = r.get(i,0) | value[p]<<(7-j) p = p + 1 keys = list(r); keys.sort() return int2oct(7-j) + ints2octs([r[k] for k in keys]), 0 else: pos = 0; substrate = null while 1: # count in octets v = value.clone(value[pos*8:pos*8+maxChunkSize*8]) if not v: break substrate = substrate + encodeFun(v, defMode, maxChunkSize) pos = pos + maxChunkSize return substrate, 1 class OctetStringEncoder(AbstractItemEncoder): def encodeValue(self, encodeFun, value, defMode, maxChunkSize): if not maxChunkSize or len(value) <= maxChunkSize: return value.asOctets(), 0 else: pos = 0; substrate = null while 1: v = value.clone(value[pos:pos+maxChunkSize]) if not v: break substrate = substrate + encodeFun(v, defMode, maxChunkSize) pos = pos + maxChunkSize return substrate, 1 class NullEncoder(AbstractItemEncoder): supportIndefLenMode = 0 def encodeValue(self, encodeFun, value, defMode, maxChunkSize): return null, 0 class ObjectIdentifierEncoder(AbstractItemEncoder): supportIndefLenMode = 0 precomputedValues = { (1, 3, 6, 1, 2): (43, 6, 1, 2), (1, 3, 6, 1, 4): (43, 6, 1, 4) } def encodeValue(self, encodeFun, value, defMode, maxChunkSize): oid = value.asTuple() if oid[:5] in self.precomputedValues: octets = self.precomputedValues[oid[:5]] index = 5 else: if len(oid) < 2: raise error.PyAsn1Error('Short OID %s' % (value,)) # Build the first twos if oid[0] > 6 or oid[1] > 39 or oid[0] == 6 and oid[1] > 15: raise error.PyAsn1Error( 'Initial sub-ID overflow %s in OID %s' % (oid[:2], value) ) octets = (oid[0] * 40 + oid[1],) index = 2 # Cycle through subids for subid in oid[index:]: if subid > -1 and subid < 128: # Optimize for the common case octets = octets + (subid & 0x7f,) elif subid < 0 or subid > 0xFFFFFFFF: raise error.PyAsn1Error( 'SubId overflow %s in %s' % (subid, value) ) else: # Pack large Sub-Object IDs res = (subid & 0x7f,) subid = subid >> 7 while subid > 0: res = (0x80 | (subid & 0x7f),) + res subid = subid >> 7 # Add packed Sub-Object ID to resulted Object ID octets += res return ints2octs(octets), 0 class RealEncoder(AbstractItemEncoder): supportIndefLenMode = 0 def encodeValue(self, encodeFun, value, defMode, maxChunkSize): if value.isPlusInfinity(): return int2oct(0x40), 0 if value.isMinusInfinity(): return int2oct(0x41), 0 m, b, e = value if not m: return null, 0 if b == 10: return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), 0 elif b == 2: fo = 0x80 # binary enoding if m < 0: fo = fo | 0x40 # sign bit m = -m while int(m) != m: # drop floating point m *= 2 e -= 1 while m & 0x1 == 0: # mantissa normalization m >>= 1 e += 1 eo = null while e not in (0, -1): eo = int2oct(e&0xff) + eo e >>= 8 if e == 0 and eo and oct2int(eo[0]) & 0x80: eo = int2oct(0) + eo n = len(eo) if n > 0xff: raise error.PyAsn1Error('Real exponent overflow') if n == 1: pass elif n == 2: fo |= 1 elif n == 3: fo |= 2 else: fo |= 3 eo = int2oct(n//0xff+1) + eo po = null while m: po = int2oct(m&0xff) + po m >>= 8 substrate = int2oct(fo) + eo + po return substrate, 0 else: raise error.PyAsn1Error('Prohibited Real base %s' % b) class SequenceEncoder(AbstractItemEncoder): def encodeValue(self, encodeFun, value, defMode, maxChunkSize): value.setDefaultComponents() value.verifySizeSpec() substrate = null; idx = len(value) while idx > 0: idx = idx - 1 if value[idx] is None: # Optional component continue component = value.getDefaultComponentByPosition(idx) if component is not None and component == value[idx]: continue substrate = encodeFun( value[idx], defMode, maxChunkSize ) + substrate return substrate, 1 class SequenceOfEncoder(AbstractItemEncoder): def encodeValue(self, encodeFun, value, defMode, maxChunkSize): value.verifySizeSpec() substrate = null; idx = len(value) while idx > 0: idx = idx - 1 substrate = encodeFun( value[idx], defMode, maxChunkSize ) + substrate return substrate, 1 class ChoiceEncoder(AbstractItemEncoder): def encodeValue(self, encodeFun, value, defMode, maxChunkSize): return encodeFun(value.getComponent(), defMode, maxChunkSize), 1 class AnyEncoder(OctetStringEncoder): def encodeValue(self, encodeFun, value, defMode, maxChunkSize): return value.asOctets(), defMode == 0 tagMap = { eoo.endOfOctets.tagSet: EndOfOctetsEncoder(), univ.Boolean.tagSet: BooleanEncoder(), univ.Integer.tagSet: IntegerEncoder(), univ.BitString.tagSet: BitStringEncoder(), univ.OctetString.tagSet: OctetStringEncoder(), univ.Null.tagSet: NullEncoder(), univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(), univ.Enumerated.tagSet: IntegerEncoder(), univ.Real.tagSet: RealEncoder(), # Sequence & Set have same tags as SequenceOf & SetOf univ.SequenceOf.tagSet: SequenceOfEncoder(), univ.SetOf.tagSet: SequenceOfEncoder(), univ.Choice.tagSet: ChoiceEncoder(), # character string types char.UTF8String.tagSet: OctetStringEncoder(), char.NumericString.tagSet: OctetStringEncoder(), char.PrintableString.tagSet: OctetStringEncoder(), char.TeletexString.tagSet: OctetStringEncoder(), char.VideotexString.tagSet: OctetStringEncoder(), char.IA5String.tagSet: OctetStringEncoder(), char.GraphicString.tagSet: OctetStringEncoder(), char.VisibleString.tagSet: OctetStringEncoder(), char.GeneralString.tagSet: OctetStringEncoder(), char.UniversalString.tagSet: OctetStringEncoder(), char.BMPString.tagSet: OctetStringEncoder(), # useful types useful.GeneralizedTime.tagSet: OctetStringEncoder(), useful.UTCTime.tagSet: OctetStringEncoder() } # Type-to-codec map for ambiguous ASN.1 types typeMap = { univ.Set.typeId: SequenceEncoder(), univ.SetOf.typeId: SequenceOfEncoder(), univ.Sequence.typeId: SequenceEncoder(), univ.SequenceOf.typeId: SequenceOfEncoder(), univ.Choice.typeId: ChoiceEncoder(), univ.Any.typeId: AnyEncoder() } class Encoder: def __init__(self, tagMap, typeMap={}): self.__tagMap = tagMap self.__typeMap = typeMap def __call__(self, value, defMode=1, maxChunkSize=0): debug.logger & debug.flagEncoder and debug.logger('encoder called in %sdef mode, chunk size %s for type %s, value:\n%s' % (not defMode and 'in' or '', maxChunkSize, value.__class__.__name__, value.prettyPrint())) tagSet = value.getTagSet() if len(tagSet) > 1: concreteEncoder = explicitlyTaggedItemEncoder else: if value.typeId is not None and value.typeId in self.__typeMap: concreteEncoder = self.__typeMap[value.typeId] elif tagSet in self.__tagMap: concreteEncoder = self.__tagMap[tagSet] else: tagSet = value.baseTagSet if tagSet in self.__tagMap: concreteEncoder = self.__tagMap[tagSet] else: raise Error('No encoder for %s' % (value,)) debug.logger & debug.flagEncoder and debug.logger('using value codec %s chosen by %r' % (concreteEncoder.__class__.__name__, tagSet)) substrate = concreteEncoder.encode( self, value, defMode, maxChunkSize ) debug.logger & debug.flagEncoder and debug.logger('built %s octets of substrate: %s\nencoder completed' % (len(substrate), debug.hexdump(substrate))) return substrate encode = Encoder(tagMap, typeMap)
unknown
codeparrot/codeparrot-clean
from typing import TYPE_CHECKING, Any from langchain_classic._api import create_importer if TYPE_CHECKING: from langchain_community.chat_models.mlflow_ai_gateway import ( ChatMLflowAIGateway, ChatParams, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "ChatMLflowAIGateway": "langchain_community.chat_models.mlflow_ai_gateway", "ChatParams": "langchain_community.chat_models.mlflow_ai_gateway", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "ChatMLflowAIGateway", "ChatParams", ]
python
github
https://github.com/langchain-ai/langchain
libs/langchain/langchain_classic/chat_models/mlflow_ai_gateway.py
# -*- coding: utf-8 -*- # (c) 2020 Matt Martz <matt@sivel.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type from units.compat.mock import MagicMock import pytest from ansible.module_utils.six import PY3 from ansible.utils.display import Display, get_text_width, initialize_locale def test_get_text_width(): initialize_locale() assert get_text_width(u'コンニチハ') == 10 assert get_text_width(u'abコcd') == 6 assert get_text_width(u'café') == 4 assert get_text_width(u'four') == 4 assert get_text_width(u'\u001B') == 0 assert get_text_width(u'ab\u0000') == 2 assert get_text_width(u'abコ\u0000') == 4 assert get_text_width(u'🚀🐮') == 4 assert get_text_width(u'\x08') == 0 assert get_text_width(u'\x08\x08') == 0 assert get_text_width(u'ab\x08cd') == 3 assert get_text_width(u'ab\x1bcd') == 3 assert get_text_width(u'ab\x7fcd') == 3 assert get_text_width(u'ab\x94cd') == 3 pytest.raises(TypeError, get_text_width, 1) pytest.raises(TypeError, get_text_width, b'four') @pytest.mark.skipif(PY3, reason='Fallback only happens reliably on py2') def test_get_text_width_no_locale(): pytest.raises(EnvironmentError, get_text_width, u'🚀🐮') def test_Display_banner_get_text_width(monkeypatch): initialize_locale() display = Display() display_mock = MagicMock() monkeypatch.setattr(display, 'display', display_mock) display.banner(u'🚀🐮', color=False, cows=False) args, kwargs = display_mock.call_args msg = args[0] stars = u' %s' % (75 * u'*') assert msg.endswith(stars) @pytest.mark.skipif(PY3, reason='Fallback only happens reliably on py2') def test_Display_banner_get_text_width_fallback(monkeypatch): display = Display() display_mock = MagicMock() monkeypatch.setattr(display, 'display', display_mock) display.banner(u'🚀🐮', color=False, cows=False) args, kwargs = display_mock.call_args msg = args[0] stars = u' %s' % (77 * u'*') assert msg.endswith(stars)
unknown
codeparrot/codeparrot-clean
/* * Copyright (c) 2017 Mockito contributors * This program is made available under the terms of the MIT License. */ package org.mockito.internal.creation; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.lang.reflect.Method; import org.junit.Before; import org.junit.Test; import org.mockitoutil.TestBase; public class DelegatingMethodTest extends TestBase { private Method someMethod, otherMethod; private DelegatingMethod delegatingMethod; @Before public void setup() throws Exception { someMethod = Something.class.getMethod("someMethod", Object.class); otherMethod = Something.class.getMethod("otherMethod", Object.class); delegatingMethod = new DelegatingMethod(someMethod); } @Test public void equals_should_return_false_when_not_equal() throws Exception { DelegatingMethod notEqual = new DelegatingMethod(otherMethod); assertFalse(delegatingMethod.equals(notEqual)); } @Test public void equals_should_return_true_when_equal() throws Exception { DelegatingMethod equal = new DelegatingMethod(someMethod); assertTrue(delegatingMethod.equals(equal)); } @Test public void equals_should_return_true_when_self() throws Exception { assertTrue(delegatingMethod.equals(delegatingMethod)); } @Test public void equals_should_return_false_when_not_equal_to_method() throws Exception { assertFalse(delegatingMethod.equals(otherMethod)); } @Test public void equals_should_return_true_when_equal_to_method() throws Exception { assertTrue(delegatingMethod.equals(someMethod)); } private interface Something { Object someMethod(Object param); Object otherMethod(Object param); } }
java
github
https://github.com/mockito/mockito
mockito-core/src/test/java/org/mockito/internal/creation/DelegatingMethodTest.java
#!/bin/env python """ A simple example for the usage of Person.send() - random movement - output to visual player, which is executed as child process - you may try the other commented monitor examples - you can choose a single or multiple monitors - print a message send to another person """ import sys sys.path.append("..") import time import random from mosp.core import Simulation, Person from mosp.geo import osm from mosp.impl import movement from mosp.monitors import * __author__ = "P. Tute, B. Henne" __maintainer__ = "B. Henne" __contact__ = "henne@dcsec.uni-hannover.de" __copyright__ = "(c) 2010-2011, DCSec, Leibniz Universitaet Hannover, Germany" __license__ = "GPLv3" class MsgRandomWiggler(Person): """Implements a simple person doing only random movement on the map, sending and receiving some messages. @author: P. Tute""" next_target = movement.person_next_target_random def receive(self, m, sender): """On receiving a message, the message is printed to stdout.""" # this method is called, when a message is available. Do whatever you want with the message here. It will be called for each available message. print 't=%s, sender=%s, receiver=%s' % (self.sim.now(), sender.p_id, self.p_id) print '\t message=%s' % m return True # When True is returned, the message is removed from the message queue. Return False, if you want to keep it for the next time this Person wakes up. def think(self): """Person with id 23 sends hello messages to all people in his vicinity of 50 meters.""" super(MsgRandomWiggler, self).think() if self.p_id == 23: # send a message to a receiver or (here) a group of receivers self.send(self.get_near(50, self_included=False), "Hello Person in my vicinity at t=%s" % self.sim.now()) # the message will be queued by the receiver and received, when he wakes up # you may also specify earliest_arrival, which is the earliest tick to deliver this message # note that this is not a guarantee for receiving at that exact time. It is only guaranteed that the message is not delivered earlier. # finally you may also interrupt the receiver(s) by passing interrupt=True when calling send() def main(): """Defines the simulation, map, monitors, persons.""" t = time.time() s = Simulation(geo=osm.OSMModel('../data/hannover2.osm'), rel_speed=40) print time.time() - t #m = s.add_monitor(EmptyMonitor, 2) #m = s.add_monitor(PipePlayerMonitor, 2) #m = s.add_monitor(RecordFilePlayerMonitor, 2) #m = s.add_monitor(RecordFilePlayerMonitor, 2, filename='exampleoutput_RecordFilePlayerMonitor') #m = s.add_monitor(ChildprocessPlayerChamplainMonitor, 2) m = s.add_monitor(SocketPlayerMonitor, 2) s.add_persons(MsgRandomWiggler, 100, monitor=m) s.run(until=1000, real_time=True, monitor=True) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function INCLUDES = """ #include <openssl/crypto.h> """ TYPES = """ typedef ... CRYPTO_THREADID; static const int SSLEAY_VERSION; static const int SSLEAY_CFLAGS; static const int SSLEAY_PLATFORM; static const int SSLEAY_DIR; static const int SSLEAY_BUILT_ON; static const int CRYPTO_MEM_CHECK_ON; static const int CRYPTO_MEM_CHECK_OFF; static const int CRYPTO_MEM_CHECK_ENABLE; static const int CRYPTO_MEM_CHECK_DISABLE; static const int CRYPTO_LOCK; static const int CRYPTO_UNLOCK; static const int CRYPTO_READ; static const int CRYPTO_WRITE; static const int CRYPTO_LOCK_SSL; """ FUNCTIONS = """ unsigned long SSLeay(void); const char *SSLeay_version(int); void CRYPTO_free(void *); int CRYPTO_mem_ctrl(int); int CRYPTO_is_mem_check_on(void); void CRYPTO_mem_leaks(struct bio_st *); void CRYPTO_cleanup_all_ex_data(void); int CRYPTO_num_locks(void); void CRYPTO_set_locking_callback(void(*)(int, int, const char *, int)); void CRYPTO_set_id_callback(unsigned long (*)(void)); unsigned long (*CRYPTO_get_id_callback(void))(void); void (*CRYPTO_get_locking_callback(void))(int, int, const char *, int); void CRYPTO_lock(int, int, const char *, int); void OPENSSL_free(void *); """ MACROS = """ void CRYPTO_add(int *, int, int); void CRYPTO_malloc_init(void); void CRYPTO_malloc_debug_init(void); """ CUSTOMIZATIONS = """ """ CONDITIONAL_NAMES = {}
unknown
codeparrot/codeparrot-clean
/*! * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import { Box, Separator, Heading, HStack, Stack } from "@chakra-ui/react"; import type { TaskInstanceState, TaskInstanceStateCount } from "openapi-gen/requests/types.gen"; import { useTranslation } from "react-i18next"; import { MdOutlineTask } from "react-icons/md"; import { Link as RouterLink } from "react-router-dom"; import { StateBadge } from "src/components/StateBadge"; import { SearchParamsKeys } from "src/constants/searchParams"; import { MetricSection } from "./MetricSection"; type TaskInstanceMetricsProps = { readonly endDate?: string; readonly startDate: string; readonly taskInstanceStates: TaskInstanceStateCount; readonly total: number; }; const TASK_STATES: Array<keyof TaskInstanceStateCount> = [ "queued", "running", "success", "failed", "skipped", "removed", "scheduled", "restarting", "up_for_retry", "up_for_reschedule", "upstream_failed", "deferred", "no_status", ]; export const TaskInstanceMetrics = ({ endDate, startDate, taskInstanceStates, total, }: TaskInstanceMetricsProps) => { const { t: translate } = useTranslation(); return ( <Box borderRadius={5} borderWidth={1} mt={2} p={4}> <HStack> <RouterLink to={`/task_instances?${SearchParamsKeys.START_DATE}=${startDate}${endDate === undefined ? "" : `&${SearchParamsKeys.END_DATE}=${endDate}`}`} > <StateBadge colorPalette="brand" fontSize="md" variant="solid"> <MdOutlineTask /> {total} </StateBadge> </RouterLink> <Heading size="md">{translate("taskInstance", { count: total })}</Heading> </HStack> <Separator my={3} /> <Stack gap={4}> {TASK_STATES.sort((stateA, stateB) => taskInstanceStates[stateA] > taskInstanceStates[stateB] ? -1 : 1, ).map((state) => taskInstanceStates[state] > 0 ? ( <MetricSection endDate={endDate} key={state} kind="task_instances" runs={taskInstanceStates[state]} startDate={startDate} state={state as TaskInstanceState} total={total} /> ) : undefined, )} </Stack> </Box> ); };
typescript
github
https://github.com/apache/airflow
airflow-core/src/airflow/ui/src/pages/Dashboard/HistoricalMetrics/TaskInstanceMetrics.tsx
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #ifndef CALIB_COMMON_HPP #define CALIB_COMMON_HPP #include <opencv2/core.hpp> #include <memory> #include <vector> #include <string> namespace calib { #define OVERLAY_DELAY 1000 #define IMAGE_MAX_WIDTH 1280 #define IMAGE_MAX_HEIGHT 960 bool showOverlayMessage(const std::string& message); enum InputType { Video, Pictures }; enum InputVideoSource { Camera, File }; enum TemplateType { AcirclesGrid, Chessboard, ChArUco, DoubleAcirclesGrid, CirclesGrid }; static const std::string mainWindowName = "Calibration"; static const std::string gridWindowName = "Board locations"; static const std::string consoleHelp = "Hot keys:\nesc - exit application\n" "s - save current data to .xml file\n" "r - delete last frame\n" "u - enable/disable applying undistortion\n" "d - delete all frames\n" "v - switch visualization"; static const double sigmaMult = 1.96; struct calibrationData { cv::Mat cameraMatrix; cv::Mat distCoeffs; cv::Mat stdDeviations; cv::Mat perViewErrors; std::vector<cv::Mat> rvecs; std::vector<cv::Mat> tvecs; double totalAvgErr; cv::Size imageSize; std::vector<cv::Mat> allFrames; std::vector<std::vector<cv::Point2f> > imagePoints; std::vector< std::vector<cv::Point3f> > objectPoints; std::vector<cv::Mat> allCharucoCorners; std::vector<cv::Mat> allCharucoIds; cv::Mat undistMap1, undistMap2; calibrationData() { imageSize = cv::Size(IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT); } }; struct cameraParameters { cv::Mat cameraMatrix; cv::Mat distCoeffs; cv::Mat stdDeviations; double avgError; cameraParameters(){} cameraParameters(cv::Mat& _cameraMatrix, cv::Mat& _distCoeffs, cv::Mat& _stdDeviations, double _avgError = 0) : cameraMatrix(_cameraMatrix), distCoeffs(_distCoeffs), stdDeviations(_stdDeviations), avgError(_avgError) {} }; struct captureParameters { InputType captureMethod; InputVideoSource source; TemplateType board; cv::Size inputBoardSize; cv::Size boardSizeInnerCorners; // board size in inner corners for chessboard cv::Size boardSizeUnits; // board size in squares, circles, etc. int charucoDictName; std::string charucoDictFile; int calibrationStep; float charucoSquareLength, charucoMarkerSize; float captureDelay; float squareSize; float templDst; std::string videoFileName; bool flipVertical; int camID; int camBackend; int fps; cv::Size cameraResolution; int maxFramesNum; int minFramesNum; bool saveFrames; float zoom; bool forceReopen; captureParameters() { calibrationStep = 1; captureDelay = 500.f; maxFramesNum = 30; minFramesNum = 10; fps = 30; cameraResolution = cv::Size(IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT); saveFrames = false; } }; struct internalParameters { double solverEps; int solverMaxIters; bool fastSolving; bool rationalModel; bool thinPrismModel; bool tiltedModel; double filterAlpha; internalParameters() { solverEps = 1e-7; solverMaxIters = 30; fastSolving = false; rationalModel = false; thinPrismModel = false; tiltedModel = false; filterAlpha = 0.1; } }; } #endif
unknown
github
https://github.com/opencv/opencv
apps/interactive-calibration/calibCommon.hpp
#!/usr/bin/env python # -*- coding: utf-8 -*- import re import codecs from setuptools import setup, find_packages version = '' with open('pigar/version.py', 'r') as f: version = re.search( r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.M ).group(1) if not version: raise RuntimeError('Cannot find version information') long_description = """ [![](https://img.shields.io/github/workflow/status/damnever/pigar/PyCI?style=flat-square)](https://github.com/damnever/pigar/actions) - Generating requirements.txt for Python project. - Handling the difference between different Python versions. - Jupyter notebook (`*.ipynb`) support. - Including the import statements from `exec`/`eval`, doctest of docstring, etc. - Searching packages by import name. - Checking the latest versions for Python project. You can find more information on [GitHub](https://github.com/damnever/pigar). """ # noqa with codecs.open('CHANGELOG.md', encoding='utf-8') as f: change_logs = f.read() install_requires = [ 'colorama>=0.3.9', 'requests>=2.20.0', 'nbformat>=4.4.0', 'futures;python_version<"3.2"' ] setup( name='pigar', version=version, description=( 'A fantastic tool to generate requirements for your' ' Python project, and more than that.' ), long_description=long_description + '\n\n' + change_logs, long_description_content_type="text/markdown", url='https://github.com/damnever/pigar', author='damnever', author_email='dxc.wolf@gmail.com', license='The BSD 3-Clause License', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Utilities', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3', ], keywords='requirements.txt,automation,tool,module-search', packages=find_packages(), install_requires=install_requires, include_package_data=True, entry_points={'console_scripts': [ 'pigar=pigar.__main__:main', ]}, )
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ 2. Adding __str__() or __unicode__() to models Although it's not a strict requirement, each model should have a ``_str__()`` or ``__unicode__()`` method to return a "human-readable" representation of the object. Do this not only for your own sanity when dealing with the interactive prompt, but also because objects' representations are used throughout Django's automatically-generated admin. Normally, you should write ``__unicode__()`` method, since this will work for all field types (and Django will automatically provide an appropriate ``__str__()`` method). However, you can write a ``__str__()`` method directly, if you prefer. You must be careful to encode the results correctly, though. """ from django.db import models from django.utils.encoding import python_2_unicode_compatible class Article(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateTimeField() def __str__(self): # Caution: this is only safe if you are certain that headline will be # in ASCII. return self.headline @python_2_unicode_compatible class InternationalArticle(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateTimeField() def __str__(self): return self.headline
unknown
codeparrot/codeparrot-clean
#! /usr/bin/env python """ This script will test highgui's window functionality """ # name of this test and it's requirements TESTNAME = "cvShowImage" REQUIRED = ["cvLoadImagejpg", "cvNamedWindow"] # needed for sys.exit(int) and .works file handling import os import sys import works # path to imagefiles we need PREFIX=os.path.join(os.environ["srcdir"],"../../opencv_extra/testdata/python/images/") # check requirements and delete old flag file, if it exists if not works.check_files(REQUIRED,TESTNAME): sys.exit(77) # import the necessary things for OpenCV from highgui import * from cv import * # defined window name win_name = "testing..." # we expect a window to be createable, thanks to 'cvNamedWindow.works' cvNamedWindow(win_name, CV_WINDOW_AUTOSIZE) # we expect the image to be loadable, thanks to 'cvLoadImage.works' image = cvLoadImage(PREFIX+"cvShowImage.jpg") if image is None: print "(ERROR) Couldn't load image "+PREFIX+"cvShowImage.jpg" sys.exit(1) # try to show image in window res = cvShowImage( win_name, image ) cvWaitKey(0) if res == 0: cvReleaseImage(image) cvDestroyWindow(win_name) sys.exit(1) # destroy window cvDestroyWindow(win_name) # create flag file for following tests works.set_file(TESTNAME) # return 0 ('PASS') sys.exit(0)
unknown
codeparrot/codeparrot-clean
name: Label Flaky Test Issues on: issues: types: [labeled] permissions: contents: read jobs: label: if: github.event.label.name == 'flaky-test' runs-on: ubuntu-slim permissions: issues: write steps: - name: Extract labels id: extract-labels env: BODY: ${{ github.event.issue.body }} run: | BODY="${BODY//$'\n'/'\n'}" declare -A platform2label platform2label["AIX"]="aix"; platform2label["FreeBSD"]="freebsd"; platform2label["Linux ARM64"]="linux"; platform2label["Linux PPC64LE"]="ppc"; platform2label["Linux s390x"]="s390"; platform2label["Linux x64"]="linux"; platform2label["macOS ARM64"]="macos"; platform2label["macOS x64"]="macos"; platform2label["SmartOS"]="smartos"; platform2label["Windows"]="windows"; # sed is cleaning up the edges PLATFORMS=$(echo $BODY | sed 's/^.*Platform\\n\\n//' | sed 's/\(, Other\)\?\\n\\n.*$//') 2> /dev/null readarray -d , -t list <<< "$PLATFORMS" labels= for row in "${list[@]}"; do \ platform=$(echo $row | xargs); \ labels="${labels}${platform2label[$platform]},"; \ done; echo "LABELS=${labels::-1}" >> $GITHUB_OUTPUT - name: Add labels env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} NUMBER: ${{ github.event.issue.number }} run: gh issue edit "$NUMBER" --repo ${{ github.repository }} --add-label "${{ steps.extract-labels.outputs.LABELS }}"
unknown
github
https://github.com/nodejs/node
.github/workflows/label-flaky-test-issue.yml
# Copyright 2016, Openstack Foundation (http://www.openstack.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python import json import libvirt import socket return_data = dict() conn = libvirt.openReadOnly() try: domains = conn.listDomainsID() return_data['kvm_vms'] = len(domains) return_data['kvm_total_vcpus'] = conn.getCPUMap()[0] return_data['kvm_scheduled_vcpus'] = 0 for domain in domains: return_data['kvm_scheduled_vcpus'] += conn.lookupByID( domain ).maxVcpus() return_data['kvm_host_id'] = abs(hash(socket.getfqdn())) except Exception: raise SystemExit('Plugin failure') else: print(json.dumps(return_data)) finally: conn.close()
unknown
codeparrot/codeparrot-clean
from django.http import FileResponse, HttpResponse from django.urls import path def helloworld(request): return HttpResponse("Hello World!") def cookie(request): response = HttpResponse("Hello World!") response.set_cookie("key", "value") return response urlpatterns = [ path("", helloworld), path("cookie/", cookie), path("file/", lambda x: FileResponse(open(__file__, "rb"))), ]
python
github
https://github.com/django/django
tests/wsgi/urls.py
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from airflow.ti_deps.deps.base_ti_dep import BaseTIDep from airflow.utils.db import provide_session from airflow.utils.state import State class NotRunningDep(BaseTIDep): NAME = "Task Instance Not Already Running" # Task instances must not already be running, as running two copies of the same # task instance at the same time (AKA double-trigger) should be avoided at all # costs, even if the context specifies that all dependencies should be ignored. IGNOREABLE = False @provide_session def _get_dep_statuses(self, ti, session, dep_context): if ti.state == State.RUNNING: yield self._failing_status( reason="Task is already running, it started on {0}.".format( ti.start_date))
unknown
codeparrot/codeparrot-clean
# # test_codecmaps_jp.py # Codec mapping tests for Japanese encodings # from test import support from test import multibytecodec_support import unittest class TestCP932Map(multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'cp932' mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/' \ 'WINDOWS/CP932.TXT' supmaps = [ (b'\x80', '\u0080'), (b'\xa0', '\uf8f0'), (b'\xfd', '\uf8f1'), (b'\xfe', '\uf8f2'), (b'\xff', '\uf8f3'), ] for i in range(0xa1, 0xe0): supmaps.append((bytes([i]), chr(i+0xfec0))) class TestEUCJPCOMPATMap(multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'euc_jp' mapfilename = 'EUC-JP.TXT' mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JP.TXT' class TestSJISCOMPATMap(multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'shift_jis' mapfilename = 'SHIFTJIS.TXT' mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/OBSOLETE' \ '/EASTASIA/JIS/SHIFTJIS.TXT' pass_enctest = [ (b'\x81_', '\\'), ] pass_dectest = [ (b'\\', '\xa5'), (b'~', '\u203e'), (b'\x81_', '\\'), ] class TestEUCJISX0213Map(multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'euc_jisx0213' mapfilename = 'EUC-JISX0213.TXT' mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JISX0213.TXT' class TestSJISX0213Map(multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'shift_jisx0213' mapfilename = 'SHIFT_JISX0213.TXT' mapfileurl = 'http://people.freebsd.org/~perky/i18n/SHIFT_JISX0213.TXT' if __name__ == "__main__": unittest.main()
unknown
codeparrot/codeparrot-clean
% This is generated by ESQL's AbstractFunctionTestCase. Do not edit it. See ../README.md for how to regenerate it. **Example** ```esql ROW str1 = "5.20128E11", str2 = "foo" | EVAL dbl = TO_DOUBLE("520128000000"), dbl1 = TO_DOUBLE(str1), dbl2 = TO_DOUBLE(str2) ``` | str1:keyword | str2:keyword | dbl:double | dbl1:double | dbl2:double | | --- | --- | --- | --- | --- | | 5.20128E11 | foo | 5.20128E11 | 5.20128E11 | null | Note that in this example, the last conversion of the string isn’t possible. When this happens, the result is a `null` value. In this case a _Warning_ header is added to the response. The header will provide information on the source of the failure: `"Line 1:115: evaluation of [TO_DOUBLE(str2)] failed, treating result as null. Only first 20 failures recorded."` A following header will contain the failure reason and the offending value: `"java.lang.NumberFormatException: For input string: "foo""`
unknown
github
https://github.com/elastic/elasticsearch
docs/reference/query-languages/esql/_snippets/functions/examples/to_double.md
""" support methods for python clients """ import json import collections from datetime import datetime from uuid import UUID from enum import Enum from dateutil import parser # python2/3 compatible basestring, for use in to_dict try: basestring except NameError: basestring = str def timestamp_from_datetime(datetime): """ Convert from datetime format to timestamp format Input: Time in datetime format Output: Time in timestamp format """ return datetime.strftime('%Y-%m-%dT%H:%M:%S.%fZ') def timestamp_to_datetime(timestamp): """ Convert from timestamp format to datetime format Input: Time in timestamp format Output: Time in datetime format """ return parser.parse(timestamp).replace(tzinfo=None) def has_properties(cls, property, child_properties): for child_prop in child_properties: if getattr(property, child_prop, None) is None: return False return True def list_factory(val, member_type): if not isinstance(val, list): raise ValueError('list_factory: value must be a list') return [val_factory(v, member_type) for v in val] def dict_factory(val, objmap): # objmap is a dict outlining the structure of this value # its format is {'attrname': {'datatype': [type], 'required': bool}} objdict = {} for attrname, attrdict in objmap.items(): value = val.get(attrname) if value is not None: for dt in attrdict['datatype']: try: if isinstance(dt, dict): objdict[attrname] = dict_factory(value, attrdict) else: objdict[attrname] = val_factory(value, [dt]) except Exception: pass if objdict.get(attrname) is None: raise ValueError('dict_factory: {attr}: unable to instantiate with any supplied type'.format(attr=attrname)) elif attrdict.get('required'): raise ValueError('dict_factory: {attr} is required'.format(attr=attrname)) return objdict def val_factory(val, datatypes): """ return an instance of `val` that is of type `datatype`. keep track of exceptions so we can produce meaningful error messages. """ exceptions = [] for dt in datatypes: try: if isinstance(val, dt): return val return type_handler_object(val, dt) except Exception as e: exceptions.append(str(e)) # if we get here, we never found a valid value. raise an error raise ValueError('val_factory: Unable to instantiate {val} from types {types}. Exceptions: {excs}'. format(val=val, types=datatypes, excs=exceptions)) def to_json(cls, indent=0): """ serialize to JSON :rtype: str """ # for consistency, use as_dict then go to json from there return json.dumps(cls.as_dict(), indent=indent) def to_dict(cls, convert_datetime=True): """ return a dict representation of the Event and its sub-objects `convert_datetime` controls whether datetime objects are converted to strings or not :rtype: dict """ def todict(obj): """ recurse the objects and represent as a dict use the registered handlers if possible """ data = {} if isinstance(obj, dict): for (key, val) in obj.items(): data[key] = todict(val) return data if not convert_datetime and isinstance(obj, datetime): return obj elif type_handler_value(obj): return type_handler_value(obj) elif isinstance(obj, collections.Sequence) and not isinstance(obj, basestring): return [todict(v) for v in obj] elif hasattr(obj, "__dict__"): for key, value in obj.__dict__.items(): if not callable(value) and not key.startswith('_'): data[key] = todict(value) return data else: return obj return todict(cls) class DatetimeHandler(object): """ output datetime objects as iso-8601 compliant strings """ @classmethod def flatten(cls, obj): """flatten""" return timestamp_from_datetime(obj) @classmethod def restore(cls, data): """restore""" return timestamp_to_datetime(data) class UUIDHandler(object): """ output UUID objects as a string """ @classmethod def flatten(cls, obj): """flatten""" return str(obj) @classmethod def restore(cls, data): """restore""" return UUID(data) class EnumHandler(object): """ output Enum objects as their value """ @classmethod def flatten(cls, obj): """flatten""" return obj.value @classmethod def restore(cls, data): """ cannot restore here because we don't know what type of enum it is """ raise NotImplementedError handlers = { datetime: DatetimeHandler, Enum: EnumHandler, UUID: UUIDHandler, } def handler_for(obj): """return the handler for the object type""" for handler_type in handlers: if isinstance(obj, handler_type): return handlers[handler_type] try: for handler_type in handlers: if issubclass(obj, handler_type): return handlers[handler_type] except TypeError: # if obj isn't a class, issubclass will raise a TypeError pass def type_handler_value(obj): """ return the serialized (flattened) value from the registered handler for the type """ handler = handler_for(obj) if handler: return handler().flatten(obj) def type_handler_object(val, objtype): """ return the deserialized (restored) value from the registered handler for the type """ handler = handlers.get(objtype) if handler: return handler().restore(val) else: return objtype(val)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # # This is a free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This Ansible library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this library. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ec2_eni short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance description: - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status of the network interface. version_added: "2.0" author: "Rob White (@wimnat)" options: eni_id: description: - The ID of the ENI (to modify); if null and state is present, a new eni will be created. required: false default: null instance_id: description: - Instance ID that you wish to attach ENI to. Since version 2.2, use the 'attached' parameter to attach or detach an ENI. Prior to 2.2, to detach an ENI from an instance, use 'None'. required: false default: null private_ip_address: description: - Private IP address. required: false default: null subnet_id: description: - ID of subnet in which to create the ENI. required: false description: description: - Optional description of the ENI. required: false default: null security_groups: description: - List of security groups associated with the interface. Only used when state=present. Since version 2.2, you can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID. required: false default: null state: description: - Create or delete ENI required: false default: present choices: [ 'present', 'absent' ] device_index: description: - The index of the device for the network interface attachment on the instance. required: false default: 0 attached: description: - Specifies if network interface should be attached or detached from instance. If ommited, attachment status won't change required: false default: yes version_added: 2.2 force_detach: description: - Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent. required: false default: no delete_on_termination: description: - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation. required: false source_dest_check: description: - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation. required: false secondary_private_ip_addresses: description: - A list of IP addresses to assign as secondary IP addresses to the network interface. This option is mutually exclusive of secondary_private_ip_address_count required: false version_added: 2.2 secondary_private_ip_address_count: description: - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of secondary_private_ip_addresses required: false version_added: 2.2 extends_documentation_fragment: - aws - ec2 notes: - This module identifies and ENI based on either the eni_id, a combination of private_ip_address and subnet_id, or a combination of instance_id and device_id. Any of these options will let you specify a particular ENI. ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an ENI. As no security group is defined, ENI will be created in default security group - ec2_eni: private_ip_address: 172.31.0.20 subnet_id: subnet-xxxxxxxx state: present # Create an ENI and attach it to an instance - ec2_eni: instance_id: i-xxxxxxx device_index: 1 private_ip_address: 172.31.0.20 subnet_id: subnet-xxxxxxxx state: present # Create an ENI with two secondary addresses - ec2_eni: subnet_id: subnet-xxxxxxxx state: present secondary_private_ip_address_count: 2 # Assign a secondary IP address to an existing ENI # This will purge any existing IPs - ec2_eni: subnet_id: subnet-xxxxxxxx eni_id: eni-yyyyyyyy state: present secondary_private_ip_addresses: - 172.16.1.1 # Remove any secondary IP addresses from an existing ENI - ec2_eni: subnet_id: subnet-xxxxxxxx eni_id: eni-yyyyyyyy state: present secondary_private_ip_addresses: - # Destroy an ENI, detaching it from any instance if necessary - ec2_eni: eni_id: eni-xxxxxxx force_detach: yes state: absent # Update an ENI - ec2_eni: eni_id: eni-xxxxxxx description: "My new description" state: present # Update an ENI identifying it by private_ip_address and subnet_id - ec2_eni: subnet_id: subnet-xxxxxxx private_ip_address: 172.16.1.1 description: "My new description" # Detach an ENI from an instance - ec2_eni: eni_id: eni-xxxxxxx instance_id: None state: present ### Delete an interface on termination # First create the interface - ec2_eni: instance_id: i-xxxxxxx device_index: 1 private_ip_address: 172.31.0.20 subnet_id: subnet-xxxxxxxx state: present register: eni # Modify the interface to enable the delete_on_terminaton flag - ec2_eni: eni_id: "{{ eni.interface.id }}" delete_on_termination: true ''' RETURN = ''' interface: description: Network interface attributes returned: when state != absent type: complex contains: description: description: interface description type: string sample: Firewall network interface groups: description: list of security groups type: list of dictionaries sample: [ { "sg-f8a8a9da": "default" } ] id: description: network interface id type: string sample: "eni-1d889198" mac_address: description: interface's physical address type: string sample: "00:00:5E:00:53:23" owner_id: description: aws account id type: string sample: 812381371 private_ip_address: description: primary ip address of this interface type: string sample: 10.20.30.40 private_ip_addresses: description: list of all private ip addresses associated to this interface type: list of dictionaries sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ] source_dest_check: description: value of source/dest check flag type: boolean sample: True status: description: network interface status type: string sample: "pending" subnet_id: description: which vpc subnet the interface is bound type: string sample: subnet-b0a0393c vpc_id: description: which vpc this network interface is bound type: string sample: vpc-9a9a9da ''' import time import re try: import boto.ec2 import boto.vpc from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info, get_ec2_security_group_ids_from_names) def get_eni_info(interface): # Private addresses private_addresses = [] for ip in interface.private_ip_addresses: private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary}) interface_info = {'id': interface.id, 'subnet_id': interface.subnet_id, 'vpc_id': interface.vpc_id, 'description': interface.description, 'owner_id': interface.owner_id, 'status': interface.status, 'mac_address': interface.mac_address, 'private_ip_address': interface.private_ip_address, 'source_dest_check': interface.source_dest_check, 'groups': dict((group.id, group.name) for group in interface.groups), 'private_ip_addresses': private_addresses } if interface.attachment is not None: interface_info['attachment'] = {'attachment_id': interface.attachment.id, 'instance_id': interface.attachment.instance_id, 'device_index': interface.attachment.device_index, 'status': interface.attachment.status, 'attach_time': interface.attachment.attach_time, 'delete_on_termination': interface.attachment.delete_on_termination, } return interface_info def wait_for_eni(eni, status): while True: time.sleep(3) eni.update() # If the status is detached we just need attachment to disappear if eni.attachment is None: if status == "detached": break else: if status == "attached" and eni.attachment.status == "attached": break def create_eni(connection, vpc_id, module): instance_id = module.params.get("instance_id") attached = module.params.get("attached") if instance_id == 'None': instance_id = None device_index = module.params.get("device_index") subnet_id = module.params.get('subnet_id') private_ip_address = module.params.get('private_ip_address') description = module.params.get('description') security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False) secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count") changed = False try: eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups) if attached and instance_id is not None: try: eni.attach(instance_id, device_index) except BotoServerError: eni.delete() raise # Wait to allow creation / attachment to finish wait_for_eni(eni, "attached") eni.update() if secondary_private_ip_address_count is not None: try: connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count) except BotoServerError: eni.delete() raise if secondary_private_ip_addresses is not None: try: connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses) except BotoServerError: eni.delete() raise changed = True except BotoServerError as e: module.fail_json(msg=e.message) module.exit_json(changed=changed, interface=get_eni_info(eni)) def modify_eni(connection, vpc_id, module, eni): instance_id = module.params.get("instance_id") attached = module.params.get("attached") do_detach = module.params.get('state') == 'detached' device_index = module.params.get("device_index") description = module.params.get('description') security_groups = module.params.get('security_groups') force_detach = module.params.get("force_detach") source_dest_check = module.params.get("source_dest_check") delete_on_termination = module.params.get("delete_on_termination") secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count") changed = False try: if description is not None: if eni.description != description: connection.modify_network_interface_attribute(eni.id, "description", description) changed = True if len(security_groups) > 0: groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False) if sorted(get_sec_group_list(eni.groups)) != sorted(groups): connection.modify_network_interface_attribute(eni.id, "groupSet", groups) changed = True if source_dest_check is not None: if eni.source_dest_check != source_dest_check: connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check) changed = True if delete_on_termination is not None and eni.attachment is not None: if eni.attachment.delete_on_termination is not delete_on_termination: connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id) changed = True current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary] if secondary_private_ip_addresses is not None: secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)) if secondary_addresses_to_remove: connection.unassign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)), dry_run=False) connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses, secondary_private_ip_address_count=None, allow_reassignment=False, dry_run=False) if secondary_private_ip_address_count is not None: current_secondary_address_count = len(current_secondary_addresses) if secondary_private_ip_address_count > current_secondary_address_count: connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=None, secondary_private_ip_address_count=(secondary_private_ip_address_count - current_secondary_address_count), allow_reassignment=False, dry_run=False) changed = True elif secondary_private_ip_address_count < current_secondary_address_count: # How many of these addresses do we want to remove secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count connection.unassign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count], dry_run=False) if attached is True: if eni.attachment and eni.attachment.instance_id != instance_id: detach_eni(eni, module) eni.attach(instance_id, device_index) wait_for_eni(eni, "attached") changed = True if eni.attachment is None: eni.attach(instance_id, device_index) wait_for_eni(eni, "attached") changed = True elif attached is False: detach_eni(eni, module) except BotoServerError as e: module.fail_json(msg=e.message) eni.update() module.exit_json(changed=changed, interface=get_eni_info(eni)) def delete_eni(connection, module): eni_id = module.params.get("eni_id") force_detach = module.params.get("force_detach") try: eni_result_set = connection.get_all_network_interfaces(eni_id) eni = eni_result_set[0] if force_detach is True: if eni.attachment is not None: eni.detach(force_detach) # Wait to allow detachment to finish wait_for_eni(eni, "detached") eni.update() eni.delete() changed = True else: eni.delete() changed = True module.exit_json(changed=changed) except BotoServerError as e: regex = re.compile('The networkInterface ID \'.*\' does not exist') if regex.search(e.message) is not None: module.exit_json(changed=False) else: module.fail_json(msg=e.message) def detach_eni(eni, module): attached = module.params.get("attached") force_detach = module.params.get("force_detach") if eni.attachment is not None: eni.detach(force_detach) wait_for_eni(eni, "detached") if attached: return eni.update() module.exit_json(changed=True, interface=get_eni_info(eni)) else: module.exit_json(changed=False, interface=get_eni_info(eni)) def uniquely_find_eni(connection, module): eni_id = module.params.get("eni_id") private_ip_address = module.params.get('private_ip_address') subnet_id = module.params.get('subnet_id') instance_id = module.params.get('instance_id') device_index = module.params.get('device_index') attached = module.params.get('attached') try: filters = {} # proceed only if we're univocally specifying an ENI if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None): return None if private_ip_address and subnet_id: filters['private-ip-address'] = private_ip_address filters['subnet-id'] = subnet_id if not attached and instance_id and device_index: filters['attachment.instance-id'] = instance_id filters['attachment.device-index'] = device_index if eni_id is None and len(filters) == 0: return None eni_result = connection.get_all_network_interfaces(eni_id, filters=filters) if len(eni_result) == 1: return eni_result[0] else: return None except BotoServerError as e: module.fail_json(msg=e.message) return None def get_sec_group_list(groups): # Build list of remote security groups remote_security_groups = [] for group in groups: remote_security_groups.append(group.id.encode()) return remote_security_groups def _get_vpc_id(connection, module, subnet_id): try: return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id except BotoServerError as e: module.fail_json(msg=e.message) def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( eni_id=dict(default=None, type='str'), instance_id=dict(default=None, type='str'), private_ip_address=dict(type='str'), subnet_id=dict(type='str'), description=dict(type='str'), security_groups=dict(default=[], type='list'), device_index=dict(default=0, type='int'), state=dict(default='present', choices=['present', 'absent']), force_detach=dict(default='no', type='bool'), source_dest_check=dict(default=None, type='bool'), delete_on_termination=dict(default=None, type='bool'), secondary_private_ip_addresses=dict(default=None, type='list'), secondary_private_ip_address_count=dict(default=None, type='int'), attached=dict(default=None, type='bool') ) ) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[ ['secondary_private_ip_addresses', 'secondary_private_ip_address_count'] ], required_if=([ ('state', 'absent', ['eni_id']), ('attached', True, ['instance_id']) ]) ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region: try: connection = connect_to_aws(boto.ec2, region, **aws_connect_params) vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") state = module.params.get("state") if state == 'present': eni = uniquely_find_eni(connection, module) if eni is None: subnet_id = module.params.get("subnet_id") if subnet_id is None: module.fail_json(msg="subnet_id is required when creating a new ENI") vpc_id = _get_vpc_id(vpc_connection, module, subnet_id) create_eni(connection, vpc_id, module) else: vpc_id = eni.vpc_id modify_eni(connection, vpc_id, module, eni) elif state == 'absent': delete_eni(connection, module) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
//===--- ASTContext.cpp - ASTContext Implementation -----------------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // // This file implements the ASTContext class. // //===----------------------------------------------------------------------===// #include "swift/AST/ASTContext.h" #include "AbstractConformance.h" #include "ClangTypeConverter.h" #include "ForeignRepresentationInfo.h" #include "SubstitutionMapStorage.h" #include "swift/ABI/MetadataValues.h" #include "swift/AST/ASTContextGlobalCache.h" #include "swift/AST/AvailabilityContextStorage.h" #include "swift/AST/ClangModuleLoader.h" #include "swift/AST/ConcreteDeclRef.h" #include "swift/AST/ConformanceLookup.h" #include "swift/AST/DiagnosticEngine.h" #include "swift/AST/DiagnosticsFrontend.h" #include "swift/AST/DiagnosticsSema.h" #include "swift/AST/DistributedDecl.h" #include "swift/AST/ExistentialLayout.h" #include "swift/AST/ExtInfo.h" #include "swift/AST/FileUnit.h" #include "swift/AST/ForeignAsyncConvention.h" #include "swift/AST/ForeignErrorConvention.h" #include "swift/AST/GenericEnvironment.h" #include "swift/AST/GenericSignature.h" #include "swift/AST/ImportCache.h" #include "swift/AST/IndexSubset.h" #include "swift/AST/KnownProtocols.h" #include "swift/AST/LazyResolver.h" #include "swift/AST/LocalArchetypeRequirementCollector.h" #include "swift/AST/MacroDiscriminatorContext.h" #include "swift/AST/ModuleDependencies.h" #include "swift/AST/ModuleLoader.h" #include "swift/AST/NameLookup.h" #include "swift/AST/PackConformance.h" #include "swift/AST/ParameterList.h" #include "swift/AST/PluginLoader.h" #include "swift/AST/PrettyStackTrace.h" #include "swift/AST/PropertyWrappers.h" #include "swift/AST/ProtocolConformance.h" #include "swift/AST/RawComment.h" #include "swift/AST/RequirementMatch.h" #include "swift/AST/SILLayout.h" #include "swift/AST/SearchPathOptions.h" #include "swift/AST/SemanticAttrs.h" #include "swift/AST/SourceFile.h" #include "swift/AST/SubstitutionMap.h" #include "swift/AST/TypeCheckRequests.h" #include "swift/AST/TypeTransform.h" #include "swift/Basic/APIntMap.h" #include "swift/Basic/Assertions.h" #include "swift/Basic/BasicBridging.h" #include "swift/Basic/BlockList.h" #include "swift/Basic/Compiler.h" #include "swift/Basic/SourceManager.h" #include "swift/Basic/Statistic.h" #include "swift/Basic/StringExtras.h" #include "swift/Bridging/ASTGen.h" #include "swift/ClangImporter/ClangModule.h" #include "swift/Frontend/ModuleInterfaceLoader.h" #include "swift/Serialization/SerializedModuleLoader.h" #include "swift/Strings.h" #include "swift/Subsystems.h" #include "swift/SymbolGraphGen/SymbolGraphOptions.h" #include "clang/AST/Type.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/VersionTuple.h" #include "llvm/Support/VirtualOutputBackend.h" #include "llvm/Support/VirtualOutputBackends.h" #include <algorithm> #include <memory> #include <queue> #if !defined(_WIN32) #include <dlfcn.h> #endif #include "RequirementMachine/RewriteContext.h" using namespace swift; #define DEBUG_TYPE "ASTContext" STATISTIC(NumCollapsedSpecializedProtocolConformances, "# of specialized protocol conformances collapsed"); void ModuleLoader::anchor() {} void ClangModuleLoader::anchor() {} llvm::StringRef swift::getProtocolName(KnownProtocolKind kind) { switch (kind) { #define PROTOCOL_WITH_NAME(Id, Name) \ case KnownProtocolKind::Id: \ return Name; #include "swift/AST/KnownProtocols.def" } llvm_unreachable("bad KnownProtocolKind"); } /// Maps a KnownProtocol to the set of InvertibleProtocols, if a mapping exists. std::optional<InvertibleProtocolKind> swift::getInvertibleProtocolKind(KnownProtocolKind kp) { switch (kp) { #define INVERTIBLE_PROTOCOL_WITH_NAME(Id, Name) \ case KnownProtocolKind::Id: return InvertibleProtocolKind::Id; #include "swift/AST/KnownProtocols.def" default: return std::nullopt; } } /// Returns the KnownProtocolKind corresponding to an InvertibleProtocolKind. KnownProtocolKind swift::getKnownProtocolKind(InvertibleProtocolKind ip) { switch (ip) { #define INVERTIBLE_PROTOCOL_WITH_NAME(Id, Name) \ case InvertibleProtocolKind::Id: return KnownProtocolKind::Id; #include "swift/AST/KnownProtocols.def" } } void swift::simple_display(llvm::raw_ostream &out, const InvertibleProtocolKind &value) { out << getProtocolName(getKnownProtocolKind(value)); } std::optional<RepressibleProtocolKind> swift::getRepressibleProtocolKind(KnownProtocolKind kp) { switch (kp) { #define REPRESSIBLE_PROTOCOL_WITH_NAME(Id, Name) \ case KnownProtocolKind::Id: \ return RepressibleProtocolKind::Id; #include "swift/AST/KnownProtocols.def" default: return std::nullopt; } } /// Returns the KnownProtocolKind corresponding to an RepressibleProtocolKind. KnownProtocolKind swift::getKnownProtocolKind(RepressibleProtocolKind ip) { switch (ip) { #define REPRESSIBLE_PROTOCOL_WITH_NAME(Id, Name) \ case RepressibleProtocolKind::Id: \ return KnownProtocolKind::Id; #include "swift/AST/KnownProtocols.def" } } void swift::simple_display(llvm::raw_ostream &out, const RepressibleProtocolKind &value) { out << getProtocolName(getKnownProtocolKind(value)); } // Metadata stores a 16-bit field for invertible protocols. Trigger a build // error when we assign the 15th bit so we can think about what to do. #define INVERTIBLE_PROTOCOL(Name, Bit) \ static_assert(Bit < 15); #include "swift/ABI/InvertibleProtocols.def" namespace { enum class SearchPathKind : uint8_t { Import = 1 << 0, Framework = 1 << 1, }; } // end anonymous namespace using AssociativityCacheType = llvm::DenseMap<std::pair<PrecedenceGroupDecl *, PrecedenceGroupDecl *>, Associativity>; struct OverrideSignatureKey { GenericSignature baseMethodSig; const NominalTypeDecl *baseNominal; const NominalTypeDecl *derivedNominal; const GenericParamList *derivedParams; OverrideSignatureKey(GenericSignature baseMethodSig, const NominalTypeDecl *baseNominal, const NominalTypeDecl *derivedNominal, const GenericParamList *derivedParams) : baseMethodSig(baseMethodSig), baseNominal(baseNominal), derivedNominal(derivedNominal), derivedParams(derivedParams) {} }; namespace llvm { template <> struct DenseMapInfo<OverrideSignatureKey> { using Type = swift::Type; using GenericSignature = swift::GenericSignature; static bool isEqual(const OverrideSignatureKey lhs, const OverrideSignatureKey rhs) { return lhs.baseMethodSig.getPointer() == rhs.baseMethodSig.getPointer() && lhs.baseNominal == rhs.baseNominal && lhs.derivedNominal == rhs.derivedNominal && lhs.derivedParams == rhs.derivedParams; } static inline OverrideSignatureKey getEmptyKey() { return OverrideSignatureKey(DenseMapInfo<GenericSignature>::getEmptyKey(), DenseMapInfo<NominalTypeDecl *>::getEmptyKey(), DenseMapInfo<NominalTypeDecl *>::getEmptyKey(), DenseMapInfo<GenericParamList *>::getEmptyKey()); } static inline OverrideSignatureKey getTombstoneKey() { return OverrideSignatureKey( DenseMapInfo<GenericSignature>::getTombstoneKey(), DenseMapInfo<NominalTypeDecl *>::getTombstoneKey(), DenseMapInfo<NominalTypeDecl *>::getTombstoneKey(), DenseMapInfo<GenericParamList *>::getTombstoneKey()); } static unsigned getHashValue(const OverrideSignatureKey &Val) { return hash_combine( DenseMapInfo<GenericSignature>::getHashValue(Val.baseMethodSig), DenseMapInfo<NominalTypeDecl *>::getHashValue(Val.baseNominal), DenseMapInfo<NominalTypeDecl *>::getHashValue(Val.derivedNominal), DenseMapInfo<GenericParamList *>::getHashValue(Val.derivedParams)); } }; } // namespace llvm namespace { /// If the conformance is in a primary file, we might diagnose some failures /// early via request evaluation, with all remaining failures diagnosed when /// we completely force the conformance from typeCheckDecl(). To emit the /// diagnostics together, we batch them up in the Diags vector. /// /// If the conformance is in a secondary file, we instead just diagnose a /// generic "T does not conform to P" error the first time we hit an error /// via request evaluation. The detailed delayed conformance diagnostics /// are discarded, since we'll emit them again when we compile the file as /// a primary file. struct DelayedConformanceDiags { /// The delayed conformance diagnostics that have not been emitted yet. /// Never actually emitted for a secondary file. std::vector<ASTContext::DelayedConformanceDiag> Diags; /// Any missing witnesses that need to be diagnosed. std::vector<ASTContext::MissingWitness> MissingWitnesses; /// We set this if we've ever seen an error diagnostic here. unsigned HadError : 1; DelayedConformanceDiags() { HadError = false; } }; } struct ASTContext::Implementation { Implementation(); ~Implementation(); llvm::BumpPtrAllocator Allocator; // used in later initializations /// The global cache of side tables for random things. GlobalCache globalCache; /// The set of cleanups to be called when the ASTContext is destroyed. std::vector<std::function<void(void)>> Cleanups; /// The set of top-level modules we have loaded. /// This map is used for iteration, therefore it's a MapVector and not a /// DenseMap. llvm::MapVector<Identifier, ModuleDecl *> LoadedModules; /// The map from a module's name to a vector of modules that share that name. /// The name can be either the module's real name of the module's ABI name. llvm::DenseMap<Identifier, llvm::SmallVector<ModuleDecl *, 1>> NameToModules; // FIXME: This is a StringMap rather than a StringSet because StringSet // doesn't allow passing in a pre-existing allocator. llvm::StringMap<Identifier::Aligner, llvm::BumpPtrAllocator&> IdentifierTable; /// The declaration of Swift.AssignmentPrecedence. PrecedenceGroupDecl *AssignmentPrecedence = nullptr; /// The declaration of Swift.CastingPrecedence. PrecedenceGroupDecl *CastingPrecedence = nullptr; /// The declaration of Swift.FunctionArrowPrecedence. PrecedenceGroupDecl *FunctionArrowPrecedence = nullptr; /// The declaration of Swift.TernaryPrecedence. PrecedenceGroupDecl *TernaryPrecedence = nullptr; /// The declaration of Swift.DefaultPrecedence. PrecedenceGroupDecl *DefaultPrecedence = nullptr; /// The AnyObject type. CanType AnyObjectType; #define KNOWN_STDLIB_TYPE_DECL(NAME, DECL_CLASS, NUM_GENERIC_PARAMS) \ /** The declaration of Swift.NAME. */ \ DECL_CLASS *NAME##Decl = nullptr; #include "swift/AST/KnownStdlibTypes.def" #define KNOWN_SDK_TYPE_DECL(MODULE, NAME, DECL_CLASS, NUM_GENERIC_PARAMS) \ /** The declaration of MODULE.NAME. */ \ DECL_CLASS *NAME##Decl = nullptr; #include "swift/AST/KnownSDKTypes.def" /// The declaration of '+' function for two RangeReplaceableCollection. FuncDecl *PlusFunctionOnRangeReplaceableCollection = nullptr; /// The declaration of '+' function for two String. FuncDecl *PlusFunctionOnString = nullptr; /// The declaration of 'Sequence.makeIterator()'. FuncDecl *MakeIterator = nullptr; /// The declaration of 'AsyncSequence.makeAsyncIterator()'. FuncDecl *MakeAsyncIterator = nullptr; /// The declaration of 'BorrowingSequence.makeBorrowingIterator()'. FuncDecl *MakeBorrowingIterator = nullptr; /// The declaration of 'IteratorProtocol.next()'. FuncDecl *IteratorNext = nullptr; /// The declaration of 'IteratorProtocol.nextSpan(maximumCount:)'. FuncDecl *BorrowingIteratorNextSpan = nullptr; /// The declaration of 'AsyncIteratorProtocol.next()'. FuncDecl *AsyncIteratorNext = nullptr; /// The declaration of 'AsyncIteratorProtocol.next(isolation:)' that takes /// an actor isolation. FuncDecl *AsyncIteratorNextIsolated = nullptr; /// The declaration of Swift.Optional<T>.Some. EnumElementDecl *OptionalSomeDecl = nullptr; /// The declaration of Swift.Optional<T>.None. EnumElementDecl *OptionalNoneDecl = nullptr; /// The declaration of Optional<T>.TangentVector.init ConstructorDecl *OptionalTanInitDecl = nullptr; /// The declaration of Optional<T>.TangentVector.value VarDecl *OptionalTanValueDecl = nullptr; /// The declaration of Swift.Void. TypeAliasDecl *VoidDecl = nullptr; /// The declaration of Swift.UnsafeMutableRawPointer.memory. VarDecl *UnsafeMutableRawPointerMemoryDecl = nullptr; /// The declaration of Swift.UnsafeRawPointer.memory. VarDecl *UnsafeRawPointerMemoryDecl = nullptr; /// The declaration of Swift.UnsafeMutablePointer<T>.memory. VarDecl *UnsafeMutablePointerMemoryDecl = nullptr; /// The declaration of Swift.UnsafePointer<T>.memory. VarDecl *UnsafePointerMemoryDecl = nullptr; /// The declaration of Swift.AutoreleasingUnsafeMutablePointer<T>.memory. VarDecl *AutoreleasingUnsafeMutablePointerMemoryDecl = nullptr; /// The declaration of _Concurrency.DefaultActor. ClassDecl *DefaultActorDecl = nullptr; /// The declaration of _Concurrency.NSObjectDefaultActor. ClassDecl *NSObjectDefaultActorDecl = nullptr; // Declare cached declarations for each of the known declarations. #define FUNC_DECL(Name, Id) FuncDecl *Get##Name = nullptr; #include "swift/AST/KnownDecls.def" // Declare cached declarations for each of the known declarations. #define KNOWN_SDK_FUNC_DECL(Module, Name, Id) FuncDecl *Get##Name = nullptr; #include "swift/AST/KnownSDKDecls.def" /// func <Int, Int) -> Bool FuncDecl *LessThanIntDecl = nullptr; /// func ==(Int, Int) -> Bool FuncDecl *EqualIntDecl = nullptr; /// func _hashValue<H: Hashable>(for: H) -> Int FuncDecl *HashValueForDecl = nullptr; /// func append(Element) -> void FuncDecl *ArrayAppendElementDecl = nullptr; /// init(Builtin.RawPointer, Builtin.Word, Builtin.Int1) ConstructorDecl *MakeUTF8StringDecl = nullptr; /// func reserveCapacityForAppend(newElementsCount: Int) FuncDecl *ArrayReserveCapacityDecl = nullptr; /// func _stdlib_isOSVersionAtLeast(Builtin.Word,Builtin.Word, Builtin.word) /// -> Builtin.Int1 FuncDecl *IsOSVersionAtLeastDecl = nullptr; /// func _stdlib_isVariantOSVersionAtLeast( /// Builtin.Word, /// Builtin.Word, /// Builtin.word) /// -> Builtin.Int1 FuncDecl *IsVariantOSVersionAtLeastDecl = nullptr; /// func _stdlib_isOSVersionAtLeastOrVariantVersionAtLeast( /// Builtin.Word, /// Builtin.Word, /// Builtin.Word, /// Builtin.Word, /// Builtin.Word, /// Builtin.Word) /// -> Builtin.Int1 FuncDecl *IsOSVersionAtLeastOrVariantVersionAtLeastDecl = nullptr; /// func _isSwiftRuntimeVersionAtLeast( /// Builtin.Word, /// Builtin.Word, /// Builtin.word) /// -> Builtin.Int1 FuncDecl *IsSwiftRuntimeVersionAtLeastDecl = nullptr; /// The set of known protocols, lazily populated as needed. ProtocolDecl *KnownProtocols[NumKnownProtocols] = { }; /// The module interface checker owned by the ASTContext. std::unique_ptr<ModuleInterfaceChecker> InterfaceChecker; /// The various module loaders that import external modules into this /// ASTContext. SmallVector<std::unique_ptr<swift::ModuleLoader>, 4> ModuleLoaders; /// Singleton used to cache the import graph. swift::namelookup::ImportCache TheImportCache; /// The module loader used to load explicit Swift modules. SerializedModuleLoaderBase *TheExplicitSwiftModuleLoader = nullptr; /// The module loader used to load Clang modules. ClangModuleLoader *TheClangModuleLoader = nullptr; /// The module loader used to load Clang modules from DWARF. ClangModuleLoader *TheDWARFModuleLoader = nullptr; /// Map from Swift declarations to deserialized resolved locations, ie. /// actual \c SourceLocs that require opening their external buffer. llvm::DenseMap<const Decl *, ExternalSourceLocs *> ExternalSourceLocs; /// Map from declarations to foreign error conventions. /// This applies to both actual imported functions and to @objc functions. llvm::DenseMap<const AbstractFunctionDecl *, ForeignErrorConvention> ForeignErrorConventions; /// Map from declarations to foreign async conventions. llvm::DenseMap<const AbstractFunctionDecl *, ForeignAsyncConvention> ForeignAsyncConventions; /// Cache of previously looked-up precedence queries. AssociativityCacheType AssociativityCache; /// Map from normal protocol conformances to diagnostics that have /// been delayed until the conformance is fully checked. llvm::DenseMap<NormalProtocolConformance *, ::DelayedConformanceDiags> DelayedConformanceDiags; /// Stores information about lazy deserialization of various declarations. llvm::DenseMap<const Decl *, LazyContextData *> LazyContexts; /// A fake generic parameter list <Self> for parsing @opened archetypes /// in textual SIL. GenericParamList *SelfGenericParamList = nullptr; /// The single-parameter generic signature with no constraints, <T>. CanGenericSignature SingleGenericParameterSignature; /// The element signature for a generic signature, which contains a clone /// of the context generic signature with new type parameters and requirements /// for opened pack elements in the given shape equivalence class. llvm::DenseMap<std::pair<CanType, const GenericSignatureImpl *>, CanGenericSignature> ElementSignatures; /// Overridden declarations. llvm::DenseMap<const ValueDecl *, ArrayRef<ValueDecl *>> Overrides; /// Default witnesses. llvm::DenseMap<std::pair<const ProtocolDecl *, ValueDecl *>, Witness> DefaultWitnesses; /// Default type witnesses for protocols. llvm::DenseMap<std::pair<const ProtocolDecl *, AssociatedTypeDecl *>, Type> DefaultTypeWitnesses; /// Default associated conformance witnesses for protocols. llvm::DenseMap<std::tuple<const ProtocolDecl *, CanType, ProtocolDecl *>, ProtocolConformanceRef> DefaultAssociatedConformanceWitnesses; /// Caches of default types for DefaultTypeRequest. /// Used to be instance variables in the TypeChecker. /// There is a logically separate cache for each SourceFile and /// KnownProtocolKind. llvm::DenseMap<SourceFile *, std::array<Type, NumKnownProtocols>> DefaultTypeRequestCaches; /// Mapping from property declarations to the backing variable types. llvm::DenseMap<const VarDecl *, Type> PropertyWrapperBackingVarTypes; /// A mapping from the backing storage of a property that has a wrapper or /// is `lazy` to the original property. llvm::DenseMap<const VarDecl *, VarDecl *> OriginalVarsForBackingStorage; /// The builtin initializer witness for a literal. Used when building /// LiteralExprs in fully-checked AST. llvm::DenseMap<std::pair<const NominalTypeDecl *, KnownProtocolKind>, ConcreteDeclRef> BuiltinInitWitness; /// Mapping from the function decl to its original body's source range. This /// is populated if the body is reparsed from other source buffers. llvm::DenseMap<const AbstractFunctionDecl *, SourceRange> OriginalBodySourceRanges; /// Macro discriminators per context. llvm::DenseMap<std::pair<const void *, DeclBaseName>, unsigned> NextMacroDiscriminator; /// Local and closure discriminators per context. llvm::DenseMap<const DeclContext *, unsigned> NextDiscriminator; /// Cached generic signatures for generic builtin types. static const unsigned NumBuiltinGenericTypes = unsigned(TypeKind::Last_BuiltinGenericType) - unsigned(TypeKind::First_BuiltinGenericType) + 1; std::array<GenericSignature, NumBuiltinGenericTypes> BuiltinGenericTypeSignatures = {}; /// Structure that captures data that is segregated into different /// arenas. struct Arena { static_assert(alignof(TypeBase) >= 8, "TypeBase not 8-byte aligned?"); static_assert(alignof(TypeBase) > static_cast<unsigned>( MetatypeRepresentation::Last_MetatypeRepresentation) + 1, "Use std::pair for MetatypeTypes and ExistentialMetatypeTypes."); using OpenedExistentialKey = std::pair<SubstitutionMap, UUID>; llvm::DenseMap<Type, ErrorType *> ErrorTypesWithOriginal; llvm::FoldingSet<TypeAliasType> TypeAliasTypes; llvm::FoldingSet<LocatableType> LocatableTypes; llvm::FoldingSet<TupleType> TupleTypes; llvm::FoldingSet<PackType> PackTypes; llvm::FoldingSet<PackExpansionType> PackExpansionTypes; llvm::FoldingSet<PackElementType> PackElementTypes; llvm::DenseMap<llvm::PointerIntPair<TypeBase*, 3, unsigned>, MetatypeType*> MetatypeTypes; llvm::DenseMap<llvm::PointerIntPair<TypeBase*, 3, unsigned>, ExistentialMetatypeType*> ExistentialMetatypeTypes; llvm::DenseMap<Type, ArraySliceType*> ArraySliceTypes; llvm::DenseMap<std::pair<Type, Type>, InlineArrayType *> InlineArrayTypes; llvm::DenseMap<Type, VariadicSequenceType*> VariadicSequenceTypes; llvm::DenseMap<std::pair<Type, Type>, DictionaryType *> DictionaryTypes; llvm::DenseMap<Type, OptionalType*> OptionalTypes; llvm::DenseMap<uintptr_t, ReferenceStorageType*> ReferenceStorageTypes; llvm::DenseMap<Type, LValueType*> LValueTypes; llvm::DenseMap<Type, InOutType*> InOutTypes; llvm::DenseMap<std::pair<Type, void*>, DependentMemberType *> DependentMemberTypes; llvm::FoldingSet<ErrorUnionType> ErrorUnionTypes; llvm::DenseMap<void *, PlaceholderType *> PlaceholderTypes; llvm::DenseMap<Type, DynamicSelfType *> DynamicSelfTypes; llvm::DenseMap<std::pair<EnumDecl*, Type>, EnumType*> EnumTypes; llvm::DenseMap<std::pair<StructDecl*, Type>, StructType*> StructTypes; llvm::DenseMap<std::pair<ClassDecl*, Type>, ClassType*> ClassTypes; llvm::DenseMap<std::pair<ProtocolDecl*, Type>, ProtocolType*> ProtocolTypes; llvm::DenseMap<Type, ExistentialType *> ExistentialTypes; llvm::FoldingSet<UnboundGenericType> UnboundGenericTypes; llvm::FoldingSet<BoundGenericType> BoundGenericTypes; llvm::FoldingSet<BuiltinFixedArrayType> BuiltinFixedArrayTypes; llvm::FoldingSet<BuiltinBorrowType> BuiltinBorrowTypes; llvm::FoldingSet<ProtocolCompositionType> ProtocolCompositionTypes; llvm::FoldingSet<ParameterizedProtocolType> ParameterizedProtocolTypes; llvm::FoldingSet<LayoutConstraintInfo> LayoutConstraints; llvm::DenseMap<std::pair<OpaqueTypeDecl *, SubstitutionMap>, GenericEnvironment *> OpaqueArchetypeEnvironments; llvm::DenseMap<CanType, OpenedExistentialSignature> ExistentialSignatures; llvm::DenseMap<OpenedExistentialKey, GenericEnvironment *> OpenedExistentialEnvironments; /// The set of function types. llvm::FoldingSet<FunctionType> FunctionTypes; /// The set of specialized protocol conformances. llvm::FoldingSet<SpecializedProtocolConformance> SpecializedConformances; /// The set of inherited protocol conformances. llvm::FoldingSet<InheritedProtocolConformance> InheritedConformances; /// The set of builtin protocol conformances. llvm::DenseMap<std::pair<Type, ProtocolDecl *>, BuiltinProtocolConformance *> BuiltinConformances; /// The set of pack conformances. llvm::FoldingSet<PackConformance> PackConformances; /// The set of substitution maps (uniqued by their storage). llvm::FoldingSet<SubstitutionMap::Storage> SubstitutionMaps; /// The set of abstract conformances (uniqued by their storage). llvm::FoldingSet<AbstractConformance> AbstractConformances; ~Arena() { for (auto &conformance : SpecializedConformances) conformance.~SpecializedProtocolConformance(); // Work around MSVC warning: local variable is initialized but // not referenced. #if SWIFT_COMPILER_IS_MSVC #pragma warning (disable: 4189) #endif for (auto &conformance : InheritedConformances) conformance.~InheritedProtocolConformance(); #if SWIFT_COMPILER_IS_MSVC #pragma warning (default: 4189) #endif } size_t getTotalMemory() const; void dump(llvm::raw_ostream &out) const; }; llvm::DenseMap<ModuleDecl*, ModuleType*> ModuleTypes; llvm::FoldingSet<GenericTypeParamType> GenericParamTypes; llvm::FoldingSet<GenericFunctionType> GenericFunctionTypes; llvm::FoldingSet<SILFunctionType> SILFunctionTypes; llvm::FoldingSet<SILPackType> SILPackTypes; llvm::DenseMap<CanType, SILBlockStorageType *> SILBlockStorageTypes; llvm::DenseMap<CanType, SILMoveOnlyWrappedType *> SILMoveOnlyWrappedTypes; llvm::FoldingSet<SILBoxType> SILBoxTypes; llvm::FoldingSet<IntegerType> IntegerTypes; llvm::DenseMap<BuiltinIntegerWidth, BuiltinIntegerType*> BuiltinIntegerTypes; llvm::DenseMap<unsigned, BuiltinUnboundGenericType*> BuiltinUnboundGenericTypes; llvm::FoldingSet<BuiltinVectorType> BuiltinVectorTypes; llvm::FoldingSet<DeclName::CompoundDeclName> CompoundNames; llvm::FoldingSet<DeclNameRef::SelectiveDeclNameRef> SelectiveNameRefs; llvm::DenseMap<UUID, GenericEnvironment *> OpenedElementEnvironments; llvm::FoldingSet<IndexSubset> IndexSubsets; llvm::FoldingSet<AutoDiffDerivativeFunctionIdentifier> AutoDiffDerivativeFunctionIdentifiers; llvm::FoldingSet<GenericSignatureImpl> GenericSignatures; llvm::FoldingSet<NormalProtocolConformance> NormalConformances; llvm::DenseMap<ProtocolDecl*, SelfProtocolConformance*> SelfConformances; /// The set of unique AvailabilityContexts (uniqued by their storage). llvm::FoldingSet<AvailabilityContext::Storage> AvailabilityContexts; /// The set of unique custom availability domains. llvm::FoldingSet<CustomAvailabilityDomain> CustomAvailabilityDomains; /// A cache of information about whether particular nominal types /// are representable in a foreign language. llvm::DenseMap<NominalTypeDecl *, ForeignRepresentationInfo> ForeignRepresentableCache; llvm::StringMap<OptionSet<SearchPathKind>> SearchPathsSet; /// Plugin loader. std::unique_ptr<swift::PluginLoader> Plugins; /// The permanent arena. Arena Permanent; /// Temporary arena used for a constraint solver. struct ConstraintSolverArena : public Arena { /// The allocator used for all allocations within this arena. llvm::BumpPtrAllocator &Allocator; ConstraintSolverArena(llvm::BumpPtrAllocator &allocator) : Allocator(allocator) { } ConstraintSolverArena(const ConstraintSolverArena &) = delete; ConstraintSolverArena(ConstraintSolverArena &&) = delete; ConstraintSolverArena &operator=(const ConstraintSolverArena &) = delete; ConstraintSolverArena &operator=(ConstraintSolverArena &&) = delete; }; /// The current constraint solver arena, if any. std::unique_ptr<ConstraintSolverArena> CurrentConstraintSolverArena; Arena &getArena(AllocationArena arena) { switch (arena) { case AllocationArena::Permanent: return Permanent; case AllocationArena::ConstraintSolver: assert(CurrentConstraintSolverArena && "No constraint solver active?"); return *CurrentConstraintSolverArena; } llvm_unreachable("bad AllocationArena"); } llvm::FoldingSet<SILLayout> SILLayouts; llvm::DenseMap<OverrideSignatureKey, GenericSignature> overrideSigCache; std::optional<ClangTypeConverter> Converter; /// The IRGen specific SIL transforms that have been registered. SILTransformCtors IRGenSILPasses; /// The scratch context used to allocate intrinsic data on behalf of \c swift::IntrinsicInfo std::unique_ptr<llvm::LLVMContext> IntrinsicScratchContext; mutable std::optional<std::unique_ptr<clang::DarwinSDKInfo>> SDKInfo; /// Memory allocation arena for the term rewriting system. std::unique_ptr<rewriting::RewriteContext> TheRewriteContext; /// The singleton Builtin.TheTupleType. BuiltinTupleDecl *TheTupleTypeDecl = nullptr; /// The declared interface type of Builtin.TheTupleType. BuiltinTupleType *TheTupleType = nullptr; std::array<ProtocolDecl *, NumInvertibleProtocols> InvertibleProtocolDecls = {}; void dump(llvm::raw_ostream &out) const; }; ASTContext::Implementation::Implementation() : IdentifierTable(Allocator), IntrinsicScratchContext(new llvm::LLVMContext()) {} ASTContext::Implementation::~Implementation() { for (auto &conformance : NormalConformances) conformance.~NormalProtocolConformance(); for (auto &cleanup : Cleanups) cleanup(); } ConstraintCheckerArenaRAII:: ConstraintCheckerArenaRAII(ASTContext &self, llvm::BumpPtrAllocator &allocator) : Self(self), Data(self.getImpl().CurrentConstraintSolverArena.release()) { Self.getImpl().CurrentConstraintSolverArena.reset( new ASTContext::Implementation::ConstraintSolverArena(allocator)); } ConstraintCheckerArenaRAII::~ConstraintCheckerArenaRAII() { Self.getImpl().CurrentConstraintSolverArena.reset( (ASTContext::Implementation::ConstraintSolverArena *)Data); } static ModuleDecl *createBuiltinModule(ASTContext &ctx) { auto *M = ModuleDecl::create(ctx.getIdentifier(BUILTIN_NAME), ctx, [&](ModuleDecl *M, auto addFile) { addFile(new (ctx) BuiltinUnit(*M)); }); M->setHasResolvedImports(); return M; } inline ASTContext::Implementation &ASTContext::getImpl() const { auto pointer = reinterpret_cast<char*>(const_cast<ASTContext*>(this)); auto offset = llvm::alignAddr((void *)sizeof(*this), llvm::Align(alignof(Implementation))); return *reinterpret_cast<Implementation*>(pointer + offset); } ASTContext::GlobalCache &ASTContext::getGlobalCache() const { return getImpl().globalCache; } void ASTContext::operator delete(void *Data) throw() { AlignedFree(Data); } ASTContext *ASTContext::get( LangOptions &langOpts, TypeCheckerOptions &typecheckOpts, SILOptions &silOpts, SearchPathOptions &SearchPathOpts, ClangImporterOptions &ClangImporterOpts, symbolgraphgen::SymbolGraphOptions &SymbolGraphOpts, CASOptions &casOpts, SerializationOptions &serializationOpts, SourceManager &SourceMgr, DiagnosticEngine &Diags, llvm::IntrusiveRefCntPtr<llvm::vfs::OutputBackend> OutputBackend) { // If more than two data structures are concatentated, then the aggregate // size math needs to become more complicated due to per-struct alignment // constraints. auto align = std::max(alignof(ASTContext), alignof(Implementation)); auto size = llvm::alignTo(sizeof(ASTContext) + sizeof(Implementation), align); auto mem = AlignedAlloc(size, align); auto impl = reinterpret_cast<void*>((char*)mem + sizeof(ASTContext)); impl = reinterpret_cast<void *>( llvm::alignAddr(impl, llvm::Align(alignof(Implementation)))); new (impl) Implementation(); return new (mem) ASTContext(langOpts, typecheckOpts, silOpts, SearchPathOpts, ClangImporterOpts, SymbolGraphOpts, casOpts, serializationOpts, SourceMgr, Diags, std::move(OutputBackend)); } ASTContext::ASTContext( LangOptions &langOpts, TypeCheckerOptions &typecheckOpts, SILOptions &silOpts, SearchPathOptions &SearchPathOpts, ClangImporterOptions &ClangImporterOpts, symbolgraphgen::SymbolGraphOptions &SymbolGraphOpts, CASOptions &casOpts, SerializationOptions &SerializationOpts, SourceManager &SourceMgr, DiagnosticEngine &Diags, llvm::IntrusiveRefCntPtr<llvm::vfs::OutputBackend> OutBackend) : LangOpts(langOpts), TypeCheckerOpts(typecheckOpts), SILOpts(silOpts), SearchPathOpts(SearchPathOpts), ClangImporterOpts(ClangImporterOpts), SymbolGraphOpts(SymbolGraphOpts), CASOpts(casOpts), SerializationOpts(SerializationOpts), SourceMgr(SourceMgr), Diags(Diags), OutputBackend(std::move(OutBackend)), evaluator(Diags, langOpts), TheBuiltinModule(createBuiltinModule(*this)), StdlibModuleName(getIdentifier(STDLIB_NAME)), SwiftShimsModuleName(getIdentifier(SWIFT_SHIMS_NAME)), blockListConfig(SourceMgr), TheErrorType(new (*this, AllocationArena::Permanent) ErrorType(*this, Type())), TheEmptyTupleType(TupleType::get(ArrayRef<TupleTypeElt>(), *this)), TheEmptyPackType(PackType::get(*this, {})), TheAnyType(ProtocolCompositionType::theAnyType(*this)), TheUnconstrainedAnyType( ProtocolCompositionType::theUnconstrainedAnyType(*this)), TheSelfType( CanGenericTypeParamType(GenericTypeParamType::getType(0, 0, *this))), #define SINGLETON_TYPE(SHORT_ID, ID) \ The##SHORT_ID##Type(new (*this, AllocationArena::Permanent) \ ID##Type(*this)), #include "swift/AST/TypeNodes.def" TheIEEE32Type(new (*this, AllocationArena::Permanent) BuiltinFloatType(BuiltinFloatType::IEEE32, *this)), TheIEEE64Type(new (*this, AllocationArena::Permanent) BuiltinFloatType(BuiltinFloatType::IEEE64, *this)), TheIEEE16Type(new (*this, AllocationArena::Permanent) BuiltinFloatType(BuiltinFloatType::IEEE16, *this)), TheIEEE80Type(new (*this, AllocationArena::Permanent) BuiltinFloatType(BuiltinFloatType::IEEE80, *this)), TheIEEE128Type(new (*this, AllocationArena::Permanent) BuiltinFloatType(BuiltinFloatType::IEEE128, *this)), ThePPC128Type(new (*this, AllocationArena::Permanent) BuiltinFloatType(BuiltinFloatType::PPC128, *this)) { // Initialize all of the known identifiers. #define IDENTIFIER_WITH_NAME(Name, IdStr) Id_##Name = getIdentifier(IdStr); #include "swift/AST/KnownIdentifiers.def" Identifier stdlibOverlayNames[] = { Id_Concurrency, Id_StringProcessing }; StdlibOverlayNames = AllocateCopy(stdlibOverlayNames); // Record the initial set of search paths. for (const auto &path : SearchPathOpts.getImportSearchPaths()) getImpl().SearchPathsSet[path.Path] |= SearchPathKind::Import; for (const auto &framepath : SearchPathOpts.getFrameworkSearchPaths()) getImpl().SearchPathsSet[framepath.Path] |= SearchPathKind::Framework; // Register any request-evaluator functions available at the AST layer. registerAccessRequestFunctions(evaluator); registerNameLookupRequestFunctions(evaluator); // Register canImport module info. for (auto &info: SearchPathOpts.CanImportModuleInfo) addSucceededCanImportModule(info.ModuleName, info.Version, info.UnderlyingVersion); // Provide a default OnDiskOutputBackend if user didn't supply one. if (!OutputBackend) OutputBackend = llvm::makeIntrusiveRefCnt<llvm::vfs::OnDiskOutputBackend>(); // Insert all block list config paths. for (auto path: langOpts.BlocklistConfigFilePaths) blockListConfig.addConfigureFilePath(path); } void ASTContext::Implementation::dump(llvm::raw_ostream &os) const { os << "-------------------------------------------------\n"; os << "Arena\t0\t" << Allocator.getBytesAllocated() << "\n"; Permanent.dump(os); #define SIZE(Name) os << #Name << "\t" << Name.size() << "\t0\n" #define SIZE_AND_BYTES(Name) os << #Name << "\t" \ << Name.size() << "\t" \ << llvm::capacity_in_bytes(Name) << "\n" SIZE(LoadedModules); SIZE(NameToModules); SIZE(IdentifierTable); SIZE(Cleanups); SIZE_AND_BYTES(ModuleLoaders); SIZE_AND_BYTES(ExternalSourceLocs); SIZE_AND_BYTES(ForeignErrorConventions); SIZE_AND_BYTES(ForeignAsyncConventions); SIZE_AND_BYTES(AssociativityCache); SIZE_AND_BYTES(DelayedConformanceDiags); SIZE_AND_BYTES(LazyContexts); SIZE_AND_BYTES(ElementSignatures); SIZE_AND_BYTES(Overrides); SIZE_AND_BYTES(DefaultWitnesses); SIZE_AND_BYTES(DefaultTypeWitnesses); SIZE_AND_BYTES(DefaultAssociatedConformanceWitnesses); SIZE_AND_BYTES(DefaultTypeRequestCaches); SIZE_AND_BYTES(PropertyWrapperBackingVarTypes); SIZE_AND_BYTES(OriginalVarsForBackingStorage); SIZE_AND_BYTES(BuiltinInitWitness); SIZE_AND_BYTES(OriginalBodySourceRanges); SIZE_AND_BYTES(NextMacroDiscriminator); SIZE_AND_BYTES(NextDiscriminator); SIZE_AND_BYTES(ModuleTypes); SIZE_AND_BYTES(SILBlockStorageTypes); SIZE_AND_BYTES(SILMoveOnlyWrappedTypes); SIZE_AND_BYTES(BuiltinIntegerTypes); SIZE_AND_BYTES(OpenedElementEnvironments); SIZE(NormalConformances); SIZE(SelfConformances); SIZE(AvailabilityContexts); SIZE(CustomAvailabilityDomains); SIZE_AND_BYTES(ForeignRepresentableCache); SIZE(SearchPathsSet); #undef SIZE #undef SIZE_AND_BYTES } ASTContext::~ASTContext() { if (LangOpts.AnalyzeRequestEvaluator) { evaluator.dump(llvm::dbgs()); getImpl().dump(llvm::dbgs()); } getImpl().~Implementation(); } void ASTContext::SetPreModuleImportCallback( PreModuleImportCallbackPtr callback) { PreModuleImportCallback = callback; } void ASTContext::PreModuleImportHook(StringRef ModuleName, ModuleImportKind Kind) const { if (PreModuleImportCallback) PreModuleImportCallback(ModuleName, Kind); } llvm::BumpPtrAllocator &ASTContext::getAllocator(AllocationArena arena) const { switch (arena) { case AllocationArena::Permanent: return getImpl().Allocator; case AllocationArena::ConstraintSolver: assert(getImpl().CurrentConstraintSolverArena != nullptr); return getImpl().CurrentConstraintSolverArena->Allocator; } llvm_unreachable("bad AllocationArena"); } void *detail::allocateInASTContext(size_t bytes, const ASTContext &ctx, AllocationArena arena, unsigned alignment) { return ctx.Allocate(bytes, alignment, arena); } ImportPath::Raw swift::detail::ImportPathBuilder_copyToImpl(ASTContext &ctx, ImportPath::Raw raw) { return ctx.AllocateCopy(raw); } Identifier swift::detail::ImportPathBuilder_getIdentifierImpl(ASTContext &ctx, StringRef string) { return ctx.getIdentifier(string); } /// Set a new stats reporter. void ASTContext::setStatsReporter(UnifiedStatsReporter *stats) { if (!stats) return; Stats = stats; stats->getFrontendCounters().NumASTBytesAllocated = getAllocator().getBytesAllocated(); if (stats->fineGrainedTimers()) evaluator.setStatsReporter(stats); } /// getIdentifier - Return the uniqued and AST-Context-owned version of the /// specified string. Identifier ASTContext::getIdentifier(StringRef Str) const { // Make sure null pointers stay null. if (Str.data() == nullptr) return Identifier(nullptr); auto pair = std::make_pair(Str, Identifier::Aligner()); auto I = getImpl().IdentifierTable.insert(pair).first; return Identifier(I->getKeyData()); } Identifier ASTContext::getDollarIdentifier(size_t Idx) const { SmallVector<char, 4> StrBuf; StringRef varName = ("$" + Twine(Idx)).toStringRef(StrBuf); return getIdentifier(varName); } void ASTContext::lookupInModule( ModuleDecl *M, StringRef name, SmallVectorImpl<ValueDecl *> &results) const { if (!M) return; // Find all of the declarations with this name in the Swift module. auto identifier = getIdentifier(name); M->lookupValue(identifier, NLKind::UnqualifiedLookup, results); } void ASTContext::lookupInSwiftModule( StringRef name, SmallVectorImpl<ValueDecl *> &results) const { lookupInModule(getStdlibModule(), name, results); } FuncDecl *ASTContext::getPlusFunctionOnRangeReplaceableCollection() const { if (getImpl().PlusFunctionOnRangeReplaceableCollection) { return getImpl().PlusFunctionOnRangeReplaceableCollection; } // Find all of the declarations with this name in the Swift module. SmallVector<ValueDecl *, 1> Results; lookupInSwiftModule("+", Results); for (auto Result : Results) { if (auto *FD = dyn_cast<FuncDecl>(Result)) { if (!FD->getOperatorDecl()) continue; for (auto Req: FD->getGenericRequirements()) { if (Req.getKind() == RequirementKind::Conformance && Req.getProtocolDecl() == getProtocol(KnownProtocolKind::RangeReplaceableCollection)) { getImpl().PlusFunctionOnRangeReplaceableCollection = FD; } } } } return getImpl().PlusFunctionOnRangeReplaceableCollection; } FuncDecl *ASTContext::getPlusFunctionOnString() const { if (getImpl().PlusFunctionOnString) { return getImpl().PlusFunctionOnString; } // Find all of the declarations with this name in the Swift module. SmallVector<ValueDecl *, 1> Results; lookupInSwiftModule("+", Results); for (auto Result : Results) { if (auto *FD = dyn_cast<FuncDecl>(Result)) { if (!FD->getOperatorDecl()) continue; auto ResultType = FD->getResultInterfaceType(); if (!ResultType->isString()) continue; auto ParamList = FD->getParameters(); if (ParamList->size() != 2) continue; if (ParamList->get(0)->getInterfaceType()->isString() && ParamList->get(1)->getInterfaceType()->isString()) { getImpl().PlusFunctionOnString = FD; break; } } } return getImpl().PlusFunctionOnString; } static FuncDecl *lookupRequirement(ProtocolDecl *proto, Identifier requirement) { for (auto result : proto->lookupDirect(requirement)) { if (result->getDeclContext() != proto) continue; if (auto func = dyn_cast<FuncDecl>(result)) { return func; } } return nullptr; } FuncDecl *ASTContext::getSequenceMakeIterator() const { if (getImpl().MakeIterator) { return getImpl().MakeIterator; } auto proto = getProtocol(KnownProtocolKind::Sequence); if (!proto) return nullptr; if (auto *func = lookupRequirement(proto, Id_makeIterator)) { getImpl().MakeIterator = func; return func; } return nullptr; } FuncDecl *ASTContext::getBorrowingSequenceMakeBorrowingIterator() const { if (getImpl().MakeBorrowingIterator) { return getImpl().MakeBorrowingIterator; } auto proto = getProtocol(KnownProtocolKind::BorrowingSequence); if (!proto) return nullptr; if (auto *func = lookupRequirement(proto, Id_makeBorrowingIterator)) { getImpl().MakeBorrowingIterator = func; return func; } return nullptr; } FuncDecl *ASTContext::getAsyncSequenceMakeAsyncIterator() const { if (getImpl().MakeAsyncIterator) { return getImpl().MakeAsyncIterator; } auto proto = getProtocol(KnownProtocolKind::AsyncSequence); if (!proto) return nullptr; if (auto *func = lookupRequirement(proto, Id_makeAsyncIterator)) { getImpl().MakeAsyncIterator = func; return func; } return nullptr; } FuncDecl *ASTContext::getIteratorNext() const { if (getImpl().IteratorNext) { return getImpl().IteratorNext; } auto proto = getProtocol(KnownProtocolKind::IteratorProtocol); if (!proto) return nullptr; if (auto *func = lookupRequirement(proto, Id_next)) { getImpl().IteratorNext = func; return func; } return nullptr; } FuncDecl *ASTContext::getBorrowingIteratorNextSpan() const { auto proto = getProtocol(KnownProtocolKind::BorrowingIteratorProtocol); if (!proto) return nullptr; if (auto *func = lookupRequirement(proto, Id_nextSpan)) { getImpl().BorrowingIteratorNextSpan = func; return func; } return nullptr; } static std::pair<FuncDecl *, FuncDecl *> getAsyncIteratorNextRequirements(const ASTContext &ctx) { auto proto = ctx.getProtocol(KnownProtocolKind::AsyncIteratorProtocol); if (!proto) return { nullptr, nullptr }; FuncDecl *next = nullptr; FuncDecl *nextThrowing = nullptr; for (auto result : proto->lookupDirect(ctx.Id_next)) { if (result->getDeclContext() != proto) continue; if (auto func = dyn_cast<FuncDecl>(result)) { switch (func->getParameters()->size()) { case 0: next = func; break; case 1: nextThrowing = func; break; default: break; } } } return { next, nextThrowing }; } FuncDecl *ASTContext::getAsyncIteratorNext() const { if (getImpl().AsyncIteratorNext) { return getImpl().AsyncIteratorNext; } auto next = getAsyncIteratorNextRequirements(*this).first; getImpl().AsyncIteratorNext = next; return next; } FuncDecl *ASTContext::getAsyncIteratorNextIsolated() const { if (getImpl().AsyncIteratorNextIsolated) { return getImpl().AsyncIteratorNextIsolated; } auto nextThrowing = getAsyncIteratorNextRequirements(*this).second; getImpl().AsyncIteratorNextIsolated = nextThrowing; return nextThrowing; } namespace { template<typename DeclClass> DeclClass *synthesizeBuiltinDecl(const ASTContext &ctx, StringRef name) { if (name == "Never") { auto never = new (ctx) EnumDecl(SourceLoc(), ctx.getIdentifier(name), SourceLoc(), { }, nullptr, ctx.MainModule); return (DeclClass *)never; } return nullptr; } } #define KNOWN_STDLIB_TYPE_DECL(NAME, DECL_CLASS, NUM_GENERIC_PARAMS) \ DECL_CLASS *ASTContext::get##NAME##Decl() const { \ if (getImpl().NAME##Decl) \ return getImpl().NAME##Decl; \ SmallVector<ValueDecl *, 1> results; \ lookupInSwiftModule(#NAME, results); \ for (auto result : results) { \ if (auto type = dyn_cast<DECL_CLASS>(result)) { \ auto params = type->getGenericParams(); \ if (NUM_GENERIC_PARAMS == (params == nullptr ? 0 : params->size())) { \ getImpl().NAME##Decl = type; \ return type; \ } \ } \ } \ getImpl().NAME##Decl = synthesizeBuiltinDecl<DECL_CLASS>(*this, #NAME); \ return getImpl().NAME##Decl; \ } \ \ Type ASTContext::get##NAME##Type() const { \ if (!get##NAME##Decl()) \ return Type(); \ return get##NAME##Decl()->getDeclaredInterfaceType(); \ } #include "swift/AST/KnownStdlibTypes.def" CanType ASTContext::getErrorExistentialType() const { if (auto *errorProto = getErrorDecl()) { return errorProto->getDeclaredExistentialType()->getCanonicalType(); } else { // Use Builtin.NativeObject just as a stand-in. return TheNativeObjectType; } } ProtocolDecl *ASTContext::getErrorDecl() const { return getProtocol(KnownProtocolKind::Error); } EnumElementDecl *ASTContext::getOptionalSomeDecl() const { if (!getImpl().OptionalSomeDecl) getImpl().OptionalSomeDecl = getOptionalDecl()->getUniqueElement(/*hasVal*/true); return getImpl().OptionalSomeDecl; } EnumElementDecl *ASTContext::getOptionalNoneDecl() const { if (!getImpl().OptionalNoneDecl) getImpl().OptionalNoneDecl =getOptionalDecl()->getUniqueElement(/*hasVal*/false); return getImpl().OptionalNoneDecl; } TypeAliasDecl *ASTContext::getVoidDecl() const { if (getImpl().VoidDecl) { return getImpl().VoidDecl; } SmallVector<ValueDecl *, 1> results; lookupInSwiftModule("Void", results); for (auto result : results) { if (auto typealias = dyn_cast<TypeAliasDecl>(result)) { getImpl().VoidDecl = typealias; return typealias; } } return nullptr; } Type ASTContext::getVoidType() const { auto decl = getVoidDecl(); if (!decl) return Type(); return decl->getDeclaredInterfaceType(); } static VarDecl *getPointeeProperty(VarDecl *&cache, NominalTypeDecl *(ASTContext::*getNominal)() const, const ASTContext &ctx) { if (cache) return cache; // There must be a generic type with one argument. NominalTypeDecl *nominal = (ctx.*getNominal)(); if (!nominal) return nullptr; auto sig = nominal->getGenericSignature(); if (sig.getGenericParams().size() != 1) return nullptr; // There must be a property named "pointee". auto identifier = ctx.getIdentifier("pointee"); auto results = nominal->lookupDirect(identifier); for (auto result : results) { // The property must have type T. auto *property = dyn_cast<VarDecl>(result); if (!property) continue; if (!property->getInterfaceType()->isEqual(sig.getGenericParams()[0])) continue; if (property->getFormalAccess() != AccessLevel::Public) continue; cache = property; return property; } llvm_unreachable("Could not find pointee property"); return nullptr; } VarDecl * ASTContext::getPointerPointeePropertyDecl(PointerTypeKind ptrKind) const { switch (ptrKind) { case PTK_UnsafeMutableRawPointer: return getPointeeProperty(getImpl().UnsafeMutableRawPointerMemoryDecl, &ASTContext::getUnsafeMutableRawPointerDecl, *this); case PTK_UnsafeRawPointer: return getPointeeProperty(getImpl().UnsafeRawPointerMemoryDecl, &ASTContext::getUnsafeRawPointerDecl, *this); case PTK_UnsafeMutablePointer: return getPointeeProperty(getImpl().UnsafeMutablePointerMemoryDecl, &ASTContext::getUnsafeMutablePointerDecl, *this); case PTK_UnsafePointer: return getPointeeProperty(getImpl().UnsafePointerMemoryDecl, &ASTContext::getUnsafePointerDecl, *this); case PTK_AutoreleasingUnsafeMutablePointer: return getPointeeProperty(getImpl().AutoreleasingUnsafeMutablePointerMemoryDecl, &ASTContext::getAutoreleasingUnsafeMutablePointerDecl, *this); } llvm_unreachable("bad pointer kind"); } CanType ASTContext::getAnyExistentialType() const { return ExistentialType::get(TheAnyType)->getCanonicalType(); } CanType ASTContext::getUnconstrainedAnyExistentialType() const { return ExistentialType::get(TheUnconstrainedAnyType)->getCanonicalType(); } CanType ASTContext::getAnyObjectConstraint() const { if (getImpl().AnyObjectType) { return getImpl().AnyObjectType; } getImpl().AnyObjectType = CanType( ProtocolCompositionType::theAnyObjectType(*this)); return getImpl().AnyObjectType; } CanType ASTContext::getAnyObjectType() const { return ExistentialType::get(getAnyObjectConstraint()) ->getCanonicalType(); } #define KNOWN_SDK_TYPE_DECL(MODULE, NAME, DECLTYPE, GENERIC_ARGS) \ DECLTYPE *ASTContext::get##NAME##Decl() const { \ if (!getImpl().NAME##Decl) { \ if (ModuleDecl *M = getLoadedModule(Id_##MODULE)) { \ /* Note: lookupQualified() will search both the Swift overlay \ * and the Clang module it imports. */ \ SmallVector<ValueDecl *, 1> decls; \ M->lookupQualified(M, DeclNameRef(getIdentifier(#NAME)), SourceLoc(), \ NL_OnlyTypes, decls); \ if (decls.size() == 1 && isa<DECLTYPE>(decls[0])) { \ auto decl = cast<DECLTYPE>(decls[0]); \ if (isa<ProtocolDecl>(decl) \ || (bool)decl->getGenericParams() == (bool)GENERIC_ARGS) { \ getImpl().NAME##Decl = decl; \ } \ } \ } \ } \ \ return getImpl().NAME##Decl; \ } \ \ Type ASTContext::get##NAME##Type() const { \ auto *decl = get##NAME##Decl(); \ if (!decl) \ return Type(); \ return decl->getDeclaredInterfaceType(); \ } #include "swift/AST/KnownSDKTypes.def" ProtocolDecl * ASTContext::synthesizeInvertibleProtocolDecl(InvertibleProtocolKind ip) const { const uint8_t index = (uint8_t)ip; if (auto *proto = getImpl().InvertibleProtocolDecls[index]) return proto; ModuleDecl *stdlib = getStdlibModule(); if (stdlib && stdlib->failedToLoad()) { stdlib = nullptr; // Use the Builtin module instead. // Ensure we emitted an error diagnostic! if (!Diags.hadAnyError()) Diags.diagnose(SourceLoc(), diag::serialization_load_failed, "Swift"); } FileUnit *file = nullptr; if (stdlib) { file = &stdlib->getFiles()[0]->getOrCreateSynthesizedFile(); } else { file = &TheBuiltinModule->getMainFile(FileUnitKind::Builtin); } // No need to form an inheritance clause; invertible protocols do not // implicitly inherit from other invertible protocols. auto identifier = getIdentifier(getProtocolName(getKnownProtocolKind(ip))); ProtocolDecl *protocol = new (*this) ProtocolDecl(file, SourceLoc(), SourceLoc(), identifier, /*primaryAssocTypes=*/{}, /*inherited=*/{}, /*whereClause=*/nullptr); protocol->setImplicit(true); // @_marker protocol->addAttribute(new (*this) MarkerAttr(/*implicit=*/true)); // public protocol->setAccess(AccessLevel::Public); // Hack to get name lookup to work after synthesizing it into the stdlib. if (stdlib) { cast<SynthesizedFileUnit>(file)->addTopLevelDecl(protocol); stdlib->clearLookupCache(); } getImpl().InvertibleProtocolDecls[index] = protocol; return protocol; } ProtocolDecl *ASTContext::getProtocol(KnownProtocolKind kind) const { // Check whether we've already looked for and cached this protocol. unsigned index = (unsigned)kind; assert(index < NumKnownProtocols && "Number of known protocols is wrong"); if (getImpl().KnownProtocols[index]) return getImpl().KnownProtocols[index]; // Find all of the declarations with this name in the appropriate module. SmallVector<ValueDecl *, 1> results; const ModuleDecl *M; NLKind NameLookupKind = NLKind::UnqualifiedLookup; switch (kind) { case KnownProtocolKind::BridgedNSError: case KnownProtocolKind::BridgedStoredNSError: case KnownProtocolKind::ErrorCodeProtocol: M = getLoadedModule(Id_Foundation); break; case KnownProtocolKind::CFObject: M = getLoadedModule(Id_CoreFoundation); break; case KnownProtocolKind::Differentiable: M = getLoadedModule(Id_Differentiation); break; case KnownProtocolKind::Actor: case KnownProtocolKind::GlobalActor: case KnownProtocolKind::AsyncSequence: case KnownProtocolKind::AsyncIteratorProtocol: case KnownProtocolKind::Executor: case KnownProtocolKind::TaskExecutor: case KnownProtocolKind::SerialExecutor: case KnownProtocolKind::ExecutorFactory: M = getLoadedModule(Id_Concurrency); break; case KnownProtocolKind::DistributedActor: case KnownProtocolKind::DistributedActorSystem: case KnownProtocolKind::DistributedTargetInvocationEncoder: case KnownProtocolKind::DistributedTargetInvocationDecoder: case KnownProtocolKind::DistributedTargetInvocationResultHandler: M = getLoadedModule(Id_Distributed); break; case KnownProtocolKind::CxxConvertibleToBool: case KnownProtocolKind::CxxConvertibleToCollection: case KnownProtocolKind::CxxDictionary: case KnownProtocolKind::CxxPair: case KnownProtocolKind::CxxOptional: case KnownProtocolKind::CxxRandomAccessCollection: case KnownProtocolKind::CxxMutableRandomAccessCollection: case KnownProtocolKind::CxxSet: case KnownProtocolKind::CxxSequence: case KnownProtocolKind::CxxBorrowingSequence: case KnownProtocolKind::CxxUniqueSet: case KnownProtocolKind::CxxVector: case KnownProtocolKind::CxxSpan: case KnownProtocolKind::CxxMutableSpan: case KnownProtocolKind::UnsafeCxxInputIterator: case KnownProtocolKind::UnsafeCxxMutableInputIterator: case KnownProtocolKind::UnsafeCxxRandomAccessIterator: case KnownProtocolKind::UnsafeCxxMutableRandomAccessIterator: case KnownProtocolKind::UnsafeCxxContiguousIterator: case KnownProtocolKind::UnsafeCxxMutableContiguousIterator: M = getLoadedModule(Id_Cxx); break; case KnownProtocolKind::Copyable: case KnownProtocolKind::Escapable: // If there's no stdlib, do qualified lookup in the Builtin module, // which will trigger the correct synthesis of the protocols in that module. M = getStdlibModule(); if (!M) { NameLookupKind = NLKind::QualifiedLookup; M = TheBuiltinModule; } break; default: M = getStdlibModule(); break; } if (!M) return nullptr; M->lookupValue(getIdentifier(getProtocolName(kind)), NameLookupKind, ModuleLookupFlags::ExcludeMacroExpansions, results); for (auto result : results) { if (auto protocol = dyn_cast<ProtocolDecl>(result)) { getImpl().KnownProtocols[index] = protocol; return protocol; } } // If the invertible protocol wasn't found in the stdlib, synthesize it there. if (auto ip = getInvertibleProtocolKind(kind)) { assert(M == getStdlibModule()); auto *protocol = synthesizeInvertibleProtocolDecl(*ip); getImpl().KnownProtocols[index] = protocol; return protocol; } return nullptr; } /// Find the implementation for the given "intrinsic" library function, /// in the passed in module. static FuncDecl *findLibraryIntrinsic(const ASTContext &ctx, ModuleDecl *M, StringRef name) { SmallVector<ValueDecl *, 1> results; ctx.lookupInModule(M, name, results); if (results.size() == 1) return dyn_cast_or_null<FuncDecl>(results.front()); return nullptr; } /// Find the implementation for the given "intrinsic" library function. static FuncDecl *findLibraryIntrinsic(const ASTContext &ctx, StringRef name) { return findLibraryIntrinsic(ctx, ctx.getStdlibModule(), name); } /// Returns the type of an intrinsic function if it is not generic, otherwise /// returns nullptr. static FunctionType * getIntrinsicCandidateType(FuncDecl *fn, bool allowTypeMembers) { auto type = fn->getInterfaceType(); if (allowTypeMembers && fn->getDeclContext()->isTypeContext()) { auto fnType = type->getAs<FunctionType>(); if (!fnType) return nullptr; type = fnType->getResult(); } return type->getAs<FunctionType>(); } /// Check whether the given type is Builtin.Int1. static bool isBuiltinInt1Type(Type type) { if (auto intType = type->getAs<BuiltinIntegerType>()) return intType->isFixedWidth() && intType->getFixedWidth() == 1; return false; } /// Check whether the given type is Builtin.Word. static bool isBuiltinWordType(Type type) { if (auto intType = type->getAs<BuiltinIntegerType>()) return intType->getWidth().isPointerWidth(); return false; } /// Looks up all implementations of an operator (globally and declared in types) /// and passes potential matches to the given callback. The search stops when /// the predicate returns true (in which case the matching function declaration /// is returned); otherwise, nullptr is returned if there are no matches. /// \p C The AST context. /// \p oper The name of the operator. /// \p contextType If the operator is declared on a type, then only operators /// defined on this type should be considered. /// \p pred A callback predicate that takes as its argument the type of a /// candidate function declaration and returns true if the function matches /// the desired criteria. /// \return The matching function declaration, or nullptr if there was no match. static FuncDecl * lookupOperatorFunc(const ASTContext &ctx, StringRef oper, Type contextType, llvm::function_ref<bool(FunctionType *)> pred) { SmallVector<ValueDecl *, 32> candidates; ctx.lookupInSwiftModule(oper, candidates); for (auto candidate : candidates) { // All operator declarations should be functions, but make sure. auto *fnDecl = dyn_cast<FuncDecl>(candidate); if (!fnDecl) continue; if (fnDecl->getDeclContext()->isTypeContext()) { auto contextTy = fnDecl->getDeclContext()->getDeclaredInterfaceType(); if (!contextTy->isEqual(contextType)) continue; } auto *funcTy = getIntrinsicCandidateType(fnDecl, /*allowTypeMembers=*/true); if (!funcTy) continue; if (pred(funcTy)) return fnDecl; } return nullptr; } ConcreteDeclRef ASTContext::getBoolBuiltinInitDecl() const { auto fn = [&](ASTContext &ctx) { return DeclName(ctx, DeclBaseName::createConstructor(), { Id_builtinBooleanLiteral }); }; auto builtinProtocolKind = KnownProtocolKind::ExpressibleByBuiltinBooleanLiteral; return getBuiltinInitDecl(getBoolDecl(), builtinProtocolKind, fn); } ConcreteDeclRef ASTContext::getIntBuiltinInitDecl(NominalTypeDecl *intDecl) const { auto fn = [&](ASTContext &ctx) { return DeclName(ctx, DeclBaseName::createConstructor(), { Id_builtinIntegerLiteral }); }; auto builtinProtocolKind = KnownProtocolKind::ExpressibleByBuiltinIntegerLiteral; return getBuiltinInitDecl(intDecl, builtinProtocolKind, fn); } ConcreteDeclRef ASTContext::getFloatBuiltinInitDecl(NominalTypeDecl *floatDecl) const { auto fn = [&](ASTContext &ctx) { return DeclName(ctx, DeclBaseName::createConstructor(), { Id_builtinFloatLiteral }); }; auto builtinProtocolKind = KnownProtocolKind::ExpressibleByBuiltinFloatLiteral; return getBuiltinInitDecl(floatDecl, builtinProtocolKind, fn); } ConcreteDeclRef ASTContext::getStringBuiltinInitDecl(NominalTypeDecl *stringDecl) const { auto fn = [&](ASTContext &ctx) { return DeclName(ctx, DeclBaseName::createConstructor(), { Id_builtinStringLiteral, getIdentifier("utf8CodeUnitCount"), getIdentifier("isASCII") }); }; auto builtinProtocolKind = KnownProtocolKind::ExpressibleByBuiltinStringLiteral; return getBuiltinInitDecl(stringDecl, builtinProtocolKind, fn); } ConcreteDeclRef ASTContext::getBuiltinInitDecl( NominalTypeDecl *decl, KnownProtocolKind builtinProtocolKind, llvm::function_ref<DeclName(ASTContext &ctx)> initName) const { // Note the initializer name is expected to be unique for each protocol kind // so we don't need it to be part of the key. auto &witness = getImpl().BuiltinInitWitness[{decl, builtinProtocolKind}]; if (witness) return witness; auto type = decl->getDeclaredInterfaceType(); auto builtinProtocol = getProtocol(builtinProtocolKind); auto builtinConformance = lookupConformance(type, builtinProtocol); if (builtinConformance.isInvalid()) { witness = ConcreteDeclRef(); return witness; } auto *ctx = const_cast<ASTContext *>(this); witness = builtinConformance.getWitnessByName(initName(*ctx)); if (!witness) { witness = ConcreteDeclRef(); return witness; } return witness; } ConcreteDeclRef ASTContext::getRegexInitDecl(Type regexType) const { auto *spModule = getLoadedModule(Id_StringProcessing); DeclName name(*const_cast<ASTContext *>(this), DeclBaseName::createConstructor(), {Id_regexString, Id_version}); SmallVector<ValueDecl *, 1> results; spModule->lookupQualified(getRegexType(), DeclNameRef(name), SourceLoc(), NL_IncludeUsableFromInline, results); assert(results.size() == 1); auto *foundDecl = cast<ConstructorDecl>(results[0]); auto subs = regexType->getMemberSubstitutionMap(foundDecl); return ConcreteDeclRef(foundDecl, subs); } static FuncDecl *getBinaryComparisonOperatorIntDecl(const ASTContext &C, StringRef op, FuncDecl *&cached) { if (cached) return cached; if (!C.getIntDecl() || !C.getBoolDecl()) return nullptr; auto isIntParam = [&](AnyFunctionType::Param param) { return (!param.isVariadic() && !param.isInOut() && param.getPlainType()->isInt()); }; auto decl = lookupOperatorFunc(C, op, C.getIntType(), [=](FunctionType *type) { // Check for the signature: (Int, Int) -> Bool if (type->getParams().size() != 2) return false; if (!isIntParam(type->getParams()[0]) || !isIntParam(type->getParams()[1])) return false; return type->getResult()->isBool(); }); cached = decl; return decl; } FuncDecl *ASTContext::getLessThanIntDecl() const { return getBinaryComparisonOperatorIntDecl(*this, "<", getImpl().LessThanIntDecl); } FuncDecl *ASTContext::getEqualIntDecl() const { return getBinaryComparisonOperatorIntDecl(*this, "==", getImpl().EqualIntDecl); } FuncDecl *ASTContext::getHashValueForDecl() const { if (getImpl().HashValueForDecl) return getImpl().HashValueForDecl; SmallVector<ValueDecl *, 1> results; lookupInSwiftModule("_hashValue", results); for (auto result : results) { auto *fd = dyn_cast<FuncDecl>(result); if (!fd) continue; auto paramList = fd->getParameters(); if (paramList->size() != 1) continue; auto paramDecl = paramList->get(0); if (paramDecl->getArgumentName() != Id_for) continue; auto genericParams = fd->getGenericParams(); if (!genericParams || genericParams->size() != 1) continue; getImpl().HashValueForDecl = fd; return fd; } return nullptr; } FuncDecl *ASTContext::getArrayAppendElementDecl() const { if (getImpl().ArrayAppendElementDecl) return getImpl().ArrayAppendElementDecl; auto AppendFunctions = getArrayDecl()->lookupDirect(getIdentifier("append")); for (auto CandidateFn : AppendFunctions) { auto FnDecl = dyn_cast<FuncDecl>(CandidateFn); auto Attrs = FnDecl->getAttrs(); for (auto *A : Attrs.getAttributes<SemanticsAttr, false>()) { if (A->Value != "array.append_element") continue; auto SelfDecl = FnDecl->getImplicitSelfDecl(); if (!SelfDecl->isInOut()) return nullptr; auto SelfInOutTy = SelfDecl->getInterfaceType(); if (!SelfInOutTy->isArray()) return nullptr; auto ParamList = FnDecl->getParameters(); if (ParamList->size() != 1) return nullptr; GenericTypeParamType *ElementType = ParamList->get(0)-> getInterfaceType()->getAs<GenericTypeParamType>(); if (!ElementType) return nullptr; if (ElementType->getName() != getIdentifier("Element")) return nullptr; if (!FnDecl->getResultInterfaceType()->isVoid()) return nullptr; getImpl().ArrayAppendElementDecl = FnDecl; return FnDecl; } } return nullptr; } FuncDecl *ASTContext::getArrayReserveCapacityDecl() const { if (getImpl().ArrayReserveCapacityDecl) return getImpl().ArrayReserveCapacityDecl; auto ReserveFunctions = getArrayDecl()->lookupDirect( getIdentifier("reserveCapacityForAppend")); for (auto CandidateFn : ReserveFunctions) { auto FnDecl = dyn_cast<FuncDecl>(CandidateFn); auto Attrs = FnDecl->getAttrs(); for (auto *A : Attrs.getAttributes<SemanticsAttr, false>()) { if (A->Value != "array.reserve_capacity_for_append") continue; auto SelfDecl = FnDecl->getImplicitSelfDecl(); if (!SelfDecl->isInOut()) return nullptr; auto SelfInOutTy = SelfDecl->getInterfaceType(); if (!SelfInOutTy->isArray()) return nullptr; auto ParamList = FnDecl->getParameters(); if (ParamList->size() != 1) return nullptr; StructType *IntType = ParamList->get(0)->getInterfaceType()->getAs<StructType>(); if (!IntType) return nullptr; StructDecl *IntDecl = IntType->getDecl(); auto StoredProperties = IntDecl->getStoredProperties(); if (StoredProperties.size() != 1) return nullptr; VarDecl *field = StoredProperties[0]; if (field->hasClangNode()) return nullptr; if (!field->getInterfaceType()->is<BuiltinIntegerType>()) return nullptr; if (!FnDecl->getResultInterfaceType()->isVoid()) return nullptr; getImpl().ArrayReserveCapacityDecl = FnDecl; return FnDecl; } } return nullptr; } ConstructorDecl *ASTContext::getMakeUTF8StringDecl() const { if (getImpl().MakeUTF8StringDecl) return getImpl().MakeUTF8StringDecl; auto initializers = getStringDecl()->lookupDirect(DeclBaseName::createConstructor()); for (Decl *initializer : initializers) { auto *constructor = cast<ConstructorDecl>(initializer); auto Attrs = constructor->getAttrs(); for (auto *A : Attrs.getAttributes<SemanticsAttr, false>()) { if (A->Value != semantics::STRING_MAKE_UTF8) continue; auto ParamList = constructor->getParameters(); if (ParamList->size() != 3) continue; ParamDecl *param = constructor->getParameters()->get(0); if (param->getArgumentName().str() != "_builtinStringLiteral") continue; getImpl().MakeUTF8StringDecl = constructor; return constructor; } } return nullptr; } FuncDecl *ASTContext::getIsOSVersionAtLeastDecl() const { if (getImpl().IsOSVersionAtLeastDecl) return getImpl().IsOSVersionAtLeastDecl; // Look for the function. auto decl = findLibraryIntrinsic(*this, "_stdlib_isOSVersionAtLeast"); if (!decl) return nullptr; auto *fnType = getIntrinsicCandidateType(decl, /*allowTypeMembers=*/false); if (!fnType) return nullptr; // Input must be (Builtin.Word, Builtin.Word, Builtin.Word) auto intrinsicsParams = fnType->getParams(); if (intrinsicsParams.size() != 3) return nullptr; if (llvm::any_of(intrinsicsParams, [](AnyFunctionType::Param param) { return (param.isVariadic() || param.isInOut() || !isBuiltinWordType(param.getPlainType())); })) { return nullptr; } // Output must be Builtin.Int1 if (!isBuiltinInt1Type(fnType->getResult())) return nullptr; getImpl().IsOSVersionAtLeastDecl = decl; return decl; } FuncDecl *ASTContext::getIsVariantOSVersionAtLeastDecl() const { if (getImpl().IsVariantOSVersionAtLeastDecl) return getImpl().IsVariantOSVersionAtLeastDecl; auto decl = findLibraryIntrinsic(*this, "_stdlib_isVariantOSVersionAtLeast"); if (!decl) return nullptr; getImpl().IsVariantOSVersionAtLeastDecl = decl; return decl; } FuncDecl *ASTContext::getIsOSVersionAtLeastOrVariantVersionAtLeast() const { if (getImpl().IsOSVersionAtLeastOrVariantVersionAtLeastDecl) return getImpl().IsOSVersionAtLeastOrVariantVersionAtLeastDecl; auto decl = findLibraryIntrinsic(*this, "_stdlib_isOSVersionAtLeastOrVariantVersionAtLeast"); if (!decl) return nullptr; getImpl().IsOSVersionAtLeastOrVariantVersionAtLeastDecl = decl; return decl; } FuncDecl *ASTContext::getIsSwiftRuntimeVersionAtLeast() const { if (getImpl().IsSwiftRuntimeVersionAtLeastDecl) return getImpl().IsSwiftRuntimeVersionAtLeastDecl; auto decl = findLibraryIntrinsic(*this, "_isSwiftRuntimeVersionAtLeast"); if (!decl) return nullptr; getImpl().IsSwiftRuntimeVersionAtLeastDecl = decl; return decl; } static bool isHigherPrecedenceThan(PrecedenceGroupDecl *a, PrecedenceGroupDecl *b) { assert(a != b && "exact match should already have been filtered"); SmallVector<PrecedenceGroupDecl*, 4> stack; // Compute the transitive set of precedence groups that are // explicitly lower than 'b', including 'b' itself. This is expected // to be very small, since it's only legal in downstream modules. SmallPtrSet<PrecedenceGroupDecl*, 4> targets; targets.insert(b); stack.push_back(b); do { auto cur = stack.pop_back_val(); for (auto &rel : cur->getLowerThan()) { auto group = rel.Group; // If we ever see 'a', we're done. if (group == a) return true; // Protect against invalid ASTs where the group isn't actually set. if (!group) continue; // If we've already inserted this, don't add it to the queue. if (!targets.insert(group).second) continue; stack.push_back(group); } } while (!stack.empty()); // Walk down the higherThan relationships from 'a' and look for // anything in the set we just built. stack.push_back(a); do { auto cur = stack.pop_back_val(); assert(!targets.count(cur)); for (auto &rel : cur->getHigherThan()) { auto group = rel.Group; if (!group) continue; // If we ever see a group that's in the targets set, we're done. if (targets.count(group)) return true; stack.push_back(group); } } while (!stack.empty()); return false; } static Associativity computeAssociativity(AssociativityCacheType &cache, PrecedenceGroupDecl *left, PrecedenceGroupDecl *right) { auto it = cache.find({left, right}); if (it != cache.end()) return it->second; auto result = Associativity::None; if (isHigherPrecedenceThan(left, right)) result = Associativity::Left; else if (isHigherPrecedenceThan(right, left)) result = Associativity::Right; cache.insert({{left, right}, result}); return result; } Associativity ASTContext::associateInfixOperators(PrecedenceGroupDecl *left, PrecedenceGroupDecl *right) const { // If the operators are in the same precedence group, use the group's // associativity. if (left == right) { return left->getAssociativity(); } // This relationship is antisymmetric, so we can canonicalize to avoid // computing it twice. Arbitrarily, if the pointer value of 'left' // is greater than the pointer value of 'right', we flip them and // then flip the result. if (uintptr_t(left) < uintptr_t(right)) { return computeAssociativity(getImpl().AssociativityCache, left, right); } switch (computeAssociativity(getImpl().AssociativityCache, right, left)) { case Associativity::Left: return Associativity::Right; case Associativity::Right: return Associativity::Left; case Associativity::None: return Associativity::None; } llvm_unreachable("bad associativity"); } // Find library intrinsic function. static FuncDecl *findLibraryFunction(const ASTContext &ctx, FuncDecl *&cache, StringRef name) { if (cache) return cache; // Look for a generic function. cache = findLibraryIntrinsic(ctx, name); return cache; } // Find library intrinsic function in passed in module static FuncDecl *findLibraryFunction(const ASTContext &ctx, ModuleDecl *M, FuncDecl *&cache, StringRef name) { if (cache) return cache; // Look for a generic function. cache = findLibraryIntrinsic(ctx, M, name); return cache; } #define FUNC_DECL(Name, Id) \ FuncDecl *ASTContext::get##Name() const { \ return findLibraryFunction(*this, getImpl().Get##Name, Id); \ } #include "swift/AST/KnownDecls.def" #define KNOWN_SDK_FUNC_DECL(Module, Name, Id) \ FuncDecl *ASTContext::get##Name() const { \ if (ModuleDecl *M = getLoadedModule(Id_##Module)) { \ return findLibraryFunction(*this, M, getImpl().Get##Name, Id); \ } else { \ return findLibraryFunction(*this, getImpl().Get##Name, Id); \ } \ } #include "swift/AST/KnownSDKDecls.def" bool ASTContext::hasOptionalIntrinsics() const { return getOptionalDecl() && getOptionalSomeDecl() && getOptionalNoneDecl() && getDiagnoseUnexpectedNilOptional(); } bool ASTContext::hasPointerArgumentIntrinsics() const { return getUnsafeMutableRawPointerDecl() && getUnsafeRawPointerDecl() && getUnsafeMutablePointerDecl() && getUnsafePointerDecl() && (!LangOpts.EnableObjCInterop || getAutoreleasingUnsafeMutablePointerDecl()) && getUnsafeBufferPointerDecl() && getUnsafeMutableBufferPointerDecl() && getUnsafeRawBufferPointerDecl() && getUnsafeMutableRawBufferPointerDecl() && getConvertPointerToPointerArgument() && getConvertMutableArrayToPointerArgument() && getConvertConstArrayToPointerArgument() && getConvertConstStringToUTF8PointerArgument() && getConvertInOutToPointerArgument(); } bool ASTContext::hasArrayLiteralIntrinsics() const { return getArrayDecl() && getAllocateUninitializedArray() && getDeallocateUninitializedArray(); } void ASTContext::addCleanup(std::function<void(void)> cleanup) { getImpl().Cleanups.push_back(std::move(cleanup)); } bool ASTContext::hadError() const { return Diags.hadAnyError() || hasDelayedConformanceErrors(); } /// Retrieve the arena from which we should allocate storage for a type. static AllocationArena getArena(RecursiveTypeProperties properties) { return properties.isSolverAllocated() ? AllocationArena::ConstraintSolver : AllocationArena::Permanent; } void ASTContext::addSearchPath(StringRef searchPath, bool isFramework, bool isSystem) { OptionSet<SearchPathKind> &loaded = getImpl().SearchPathsSet[searchPath]; auto kind = isFramework ? SearchPathKind::Framework : SearchPathKind::Import; if (loaded.contains(kind)) return; loaded |= kind; if (isFramework) { SearchPathOpts.addFrameworkSearchPath({searchPath, isSystem}, SourceMgr.getFileSystem().get()); } else { SearchPathOpts.addImportSearchPath({searchPath, isSystem}, SourceMgr.getFileSystem().get()); } if (auto *clangLoader = getClangModuleLoader()) clangLoader->addSearchPath(searchPath, isFramework, isSystem); } void ASTContext::addExplicitModulePath(StringRef name, std::string path) { if (getImpl().TheExplicitSwiftModuleLoader) getImpl().TheExplicitSwiftModuleLoader->addExplicitModulePath(name, path); } void ASTContext::addModuleLoader(std::unique_ptr<ModuleLoader> loader, bool IsClang, bool IsDwarf, bool IsInterface, bool IsExplicit) { if (IsClang && !IsDwarf && !getImpl().TheClangModuleLoader) getImpl().TheClangModuleLoader = static_cast<ClangModuleLoader *>(loader.get()); if (IsClang && IsDwarf && !getImpl().TheDWARFModuleLoader) getImpl().TheDWARFModuleLoader = static_cast<ClangModuleLoader *>(loader.get()); if (IsExplicit && !getImpl().TheExplicitSwiftModuleLoader) getImpl().TheExplicitSwiftModuleLoader = static_cast<SerializedModuleLoaderBase *>(loader.get()); getImpl().ModuleLoaders.push_back(std::move(loader)); } void ASTContext::addModuleInterfaceChecker( std::unique_ptr<ModuleInterfaceChecker> checker) { assert(!getImpl().InterfaceChecker && "Checker has been set already"); getImpl().InterfaceChecker = std::move(checker); } void ASTContext::setModuleAliases( const llvm::StringMap<std::string> &aliasMap) { // This setter should be called only once after ASTContext has been initialized assert(ModuleAliasMap.empty()); for (auto &entry : aliasMap) { if (!entry.getValue().empty()) addModuleAlias(entry.getKey(), entry.getValue()); } } void ASTContext::addModuleAlias(StringRef moduleAlias, StringRef realName) { auto key = getIdentifier(moduleAlias); auto val = getIdentifier(realName); // key is a module alias, val is its corresponding real name ModuleAliasMap[key] = std::make_pair(val, true); // add an entry with an alias as key for an easier lookup later ModuleAliasMap[val] = std::make_pair(key, false); } Identifier ASTContext::getRealModuleName(Identifier key, ModuleAliasLookupOption option) const { auto found = ModuleAliasMap.find(key); if (found == ModuleAliasMap.end()) return key; // No module aliasing was used, so just return the given key // Found an entry auto value = found->second; // With the alwaysRealName option, look up the real name by treating // the given key as an alias; if the key's not an alias, return the key // itself since that's the real name. if (option == ModuleAliasLookupOption::alwaysRealName) { return value.second ? value.first : key; } // With realNameFromAlias or aliasFromRealName option, only return the value // if the given key matches the description (whether it's an alias or real name) // by looking up the value.second (true if keyed by an alias). If not matched, // return an empty Identifier. if ((option == ModuleAliasLookupOption::realNameFromAlias && !value.second) || (option == ModuleAliasLookupOption::aliasFromRealName && value.second)) return Identifier(); // Otherwise return the value found (whether the key is an alias or real name) return value.first; } void ASTContext::loadExtensions(NominalTypeDecl *nominal, unsigned previousGeneration) { PrettyStackTraceDecl stackTrace("loading extensions for", nominal); for (auto &loader : getImpl().ModuleLoaders) { loader->loadExtensions(nominal, previousGeneration); } } void ASTContext::loadObjCMethods( NominalTypeDecl *tyDecl, ObjCSelector selector, bool isInstanceMethod, unsigned previousGeneration, llvm::TinyPtrVector<AbstractFunctionDecl *> &methods, bool swiftOnly) { PrettyStackTraceSelector stackTraceSelector("looking for", selector); PrettyStackTraceDecl stackTraceDecl("...in", tyDecl); for (auto &loader : getImpl().ModuleLoaders) { // Ignore the Clang importer if we've been asked for Swift-only results. if (swiftOnly && loader.get() == getClangModuleLoader()) continue; loader->loadObjCMethods(tyDecl, selector, isInstanceMethod, previousGeneration, methods); } } ConstructorDecl *ASTContext::getOptionalTanInitDecl(CanType optionalTanType) { if (!getImpl().OptionalTanInitDecl) { auto *optionalTanDecl = optionalTanType.getNominalOrBoundGenericNominal(); // Look up the `Optional<T>.TangentVector.init` declaration. auto initLookup = optionalTanDecl->lookupDirect(DeclBaseName::createConstructor()); ConstructorDecl *constructorDecl = nullptr; for (auto *candidate : initLookup) { auto candidateModule = candidate->getModuleContext(); if (candidateModule->getName() == Id_Differentiation || candidateModule->isStdlibModule()) { assert(!constructorDecl && "Multiple `Optional.TangentVector.init`s"); constructorDecl = cast<ConstructorDecl>(candidate); #ifdef NDEBUG break; #endif } } assert(constructorDecl && "No `Optional.TangentVector.init`"); getImpl().OptionalTanInitDecl = constructorDecl; } return getImpl().OptionalTanInitDecl; } VarDecl *ASTContext::getOptionalTanValueDecl(CanType optionalTanType) { if (!getImpl().OptionalTanValueDecl) { // TODO: Maybe it would be better to have getters / setters here that we // can call and hide this implementation detail? StructDecl *optStructDecl = optionalTanType.getStructOrBoundGenericStruct(); assert(optStructDecl && "Unexpected type of Optional.TangentVector"); ArrayRef<VarDecl *> properties = optStructDecl->getStoredProperties(); assert(properties.size() == 1 && "Unexpected type of Optional.TangentVector"); VarDecl *wrappedValueVar = properties[0]; assert(wrappedValueVar->getTypeInContext()->getEnumOrBoundGenericEnum() == getOptionalDecl() && "Unexpected type of Optional.TangentVector"); getImpl().OptionalTanValueDecl = wrappedValueVar; } return getImpl().OptionalTanValueDecl; } void ASTContext::loadDerivativeFunctionConfigurations( AbstractFunctionDecl *originalAFD, unsigned previousGeneration, llvm::SetVector<AutoDiffConfig> &results) { PrettyStackTraceDecl stackTrace( "loading derivative function configurations for", originalAFD); for (auto &loader : getImpl().ModuleLoaders) { loader->loadDerivativeFunctionConfigurations(originalAFD, previousGeneration, results); } } unsigned ASTContext::getNextMacroDiscriminator( MacroDiscriminatorContext context, DeclBaseName baseName ) { std::pair<const void *, DeclBaseName> key( context.getOpaqueValue(), baseName); return getImpl().NextMacroDiscriminator[key]++; } /// Get the next discriminator within the given declaration context. unsigned ASTContext::getNextDiscriminator(const DeclContext *dc) { // Top-level code declarations don't have their own discriminators. if (auto tlcd = dyn_cast<TopLevelCodeDecl>(dc)) dc = tlcd->getParent(); return getImpl().NextDiscriminator[dc]; } /// Set the maximum assigned discriminator within the given declaration context. void ASTContext::setMaxAssignedDiscriminator( const DeclContext *dc, unsigned discriminator) { // Top-level code declarations don't have their own discriminators. if (auto tlcd = dyn_cast<TopLevelCodeDecl>(dc)) dc = tlcd->getParent(); assert(discriminator >= getImpl().NextDiscriminator[dc]); getImpl().NextDiscriminator[dc] = discriminator; } void ASTContext::verifyAllLoadedModules() const { #ifndef NDEBUG FrontendStatsTracer tracer(Stats, "verify-all-loaded-modules"); for (auto &loader : getImpl().ModuleLoaders) loader->verifyAllModules(); #endif } swift::namelookup::ImportCache &ASTContext::getImportCache() const { return getImpl().TheImportCache; } const AvailabilityMacroMap &ASTContext::getAvailabilityMacroMap() const { auto *ctx = const_cast<ASTContext *>(this); return *evaluateOrFatal(ctx->evaluator, AvailabilityMacroArgumentsRequest{ctx}); } ClangModuleLoader *ASTContext::getClangModuleLoader() const { return getImpl().TheClangModuleLoader; } ClangModuleLoader *ASTContext::getDWARFModuleLoader() const { return getImpl().TheDWARFModuleLoader; } ModuleInterfaceChecker *ASTContext::getModuleInterfaceChecker() const { auto *result = getImpl().InterfaceChecker.get(); assert(result); return result; } ModuleDecl *ASTContext::getLoadedModule( ImportPath::Module ModulePath) const { assert(!ModulePath.empty()); // TODO: Swift submodules. if (ModulePath.size() == 1) { return getLoadedModule(ModulePath[0].Item); } return nullptr; } iterator_range<llvm::MapVector<Identifier, ModuleDecl *>::const_iterator> ASTContext::getLoadedModules() const { return {getImpl().LoadedModules.begin(), getImpl().LoadedModules.end()}; } ModuleDecl *ASTContext::getLoadedModule(Identifier ModuleName) const { // Look up a loaded module using an actual module name (physical name // on disk). If the -module-alias option is used, the module name that // appears in source code will be different from the real module name // on disk, otherwise the same. // // For example, if '-module-alias Foo=Bar' is passed in to the frontend, // and a source file has 'import Foo', a module called Bar (real name) // will be loaded and returned. auto realName = getRealModuleName(ModuleName); return getImpl().LoadedModules.lookup(realName); } void ASTContext::addLoadedModule(ModuleDecl *M) { assert(M); // Add a loaded module using an actual module name (physical name // on disk), in case -module-alias is used (otherwise same). // // For example, if '-module-alias Foo=Bar' is passed in to the frontend, // and a source file has 'import Foo', a module called Bar (real name) // will be loaded and added to the map. auto RealName = M->getRealName(); auto ABIName = M->getABIName(); auto &LoadedModules = getImpl().LoadedModules; auto &NameToModules = getImpl().NameToModules; // If a module with the same name has been loaded before, remove it from the // list of modules that share the same name. if (auto *current = LoadedModules.lookup(RealName)) { auto isCurrentModule = [&](ModuleDecl *module) { return module == current; }; llvm::erase_if(NameToModules[RealName], isCurrentModule); if (RealName != ABIName) llvm::erase_if(NameToModules[ABIName], isCurrentModule); } LoadedModules[RealName] = M; // Add the module to the mapping from module name to list of modules that // share that name. NameToModules[RealName].push_back(M); // If the ABI name differs from the real name, also add the module to the list // that share that ABI name. if (RealName != ABIName) NameToModules[ABIName].push_back(M); } void ASTContext::removeLoadedModule(Identifier RealName) { // First remove the module from the mappings of names to modules. if (ModuleDecl *M = getLoadedModule(RealName)) { auto eraseModule = [&](ModuleDecl *module) { return module->getRealName() == RealName; }; auto &vector = getImpl().NameToModules[M->getRealName()]; llvm::erase_if(vector, eraseModule); if (M->getRealName() != M->getABIName()) { auto &vector = getImpl().NameToModules[M->getABIName()]; llvm::erase_if(vector, eraseModule); } } getImpl().LoadedModules.erase(RealName); } void ASTContext::moduleABINameWillChange(ModuleDecl *module, Identifier newName) { auto it = llvm::find_if(getLoadedModules(), [&](auto pair) { return pair.second == module; }); // If this module isn't in the loaded modules list (perhaps because there is // no memory cache) theere's nothing to do. if (it == getLoadedModules().end()) return; // If the names are the same there's nothing to do. if (module->getABIName() == newName) return; // If the real and ABI names are different, ASTContext needs to remove the // module from the mapping whose key is the old ABI name. if (module->getRealName() != module->getABIName()) { auto &vector = getImpl().NameToModules[module->getABIName()]; llvm::erase_if(vector, [&](ModuleDecl *current) { return module == current; }); } // Now add the module to the vector that's mapped from the new name, if it's // not there already. auto &vector = getImpl().NameToModules[newName]; if (llvm::find(vector, module) == vector.end()) vector.push_back(module); } void ASTContext::setIgnoreAdjacentModules(bool value) { IgnoreAdjacentModules = value; } rewriting::RewriteContext & ASTContext::getRewriteContext() { auto &rewriteCtx = getImpl().TheRewriteContext; if (!rewriteCtx) rewriteCtx.reset(new rewriting::RewriteContext(*this)); return *rewriteCtx; } bool ASTContext::isRecursivelyConstructingRequirementMachine( CanGenericSignature sig) { return getRewriteContext().isRecursivelyConstructingRequirementMachine(sig); } bool ASTContext::isRecursivelyConstructingRequirementMachine( const ProtocolDecl *proto) { return getRewriteContext().isRecursivelyConstructingRequirementMachine(proto); } std::optional<llvm::TinyPtrVector<ValueDecl *>> OverriddenDeclsRequest::getCachedResult() const { auto decl = std::get<0>(getStorage()); if (!decl->LazySemanticInfo.hasOverriddenComputed) return std::nullopt; // If there are no overridden declarations (the common case), return. llvm::TinyPtrVector<ValueDecl *> overridden; if (!decl->LazySemanticInfo.hasOverridden) return overridden; // Retrieve the set of overrides from the ASTContext. ASTContext &ctx = decl->getASTContext(); auto known = ctx.getImpl().Overrides.find(decl); assert(known != ctx.getImpl().Overrides.end()); overridden.insert(overridden.end(), known->second.begin(), known->second.end()); return overridden; } void OverriddenDeclsRequest::cacheResult( llvm::TinyPtrVector<ValueDecl *> value) const { auto decl = std::get<0>(getStorage()); decl->LazySemanticInfo.hasOverriddenComputed = true; decl->LazySemanticInfo.hasOverridden = !value.empty(); if (value.empty()) return; // Soundness-check the declarations we were given. for (auto overriddenDecl : value) { assert(overriddenDecl->getKind() == decl->getKind() && "Overridden decl kind mismatch"); if (auto func = dyn_cast<AbstractFunctionDecl>(overriddenDecl)) func->setIsOverridden(); } // Record the overrides in the context. auto &ctx = decl->getASTContext(); auto overriddenCopy = ctx.AllocateCopy(ArrayRef<ValueDecl *>(value)); (void)ctx.getImpl().Overrides.insert({decl, overriddenCopy}); } /// Returns the default witness for a requirement, or nullptr if there is /// no default. Witness ProtocolDecl::getDefaultWitness(ValueDecl *requirement) const { loadAllMembers(); ASTContext &ctx = getASTContext(); auto found = ctx.getImpl().DefaultWitnesses.find({this, requirement}); if (found == ctx.getImpl().DefaultWitnesses.end()) return Witness(); return found->second; } /// Record the default witness for a requirement. void ProtocolDecl::setDefaultWitness(ValueDecl *requirement, Witness witness) { assert(witness); ASTContext &ctx = getASTContext(); auto pair = ctx.getImpl().DefaultWitnesses.insert( std::make_pair(std::make_pair(this, requirement), witness)); assert(pair.second && "Already have a default witness!"); (void) pair; } /// Returns the default type witness for an associated type, or a null /// type if there is no default. Type ProtocolDecl::getDefaultTypeWitness(AssociatedTypeDecl *assocType) const { auto &ctx = getASTContext(); auto found = ctx.getImpl().DefaultTypeWitnesses.find({this, assocType}); if (found == ctx.getImpl().DefaultTypeWitnesses.end()) return Type(); return found->second; } /// Set the default type witness for an associated type. void ProtocolDecl::setDefaultTypeWitness(AssociatedTypeDecl *assocType, Type witness) { assert(witness); assert(!witness->hasArchetype() && "Only record interface types"); ASTContext &ctx = getASTContext(); auto pair = ctx.getImpl().DefaultTypeWitnesses.insert( std::make_pair(std::make_pair(this, assocType), witness)); assert(pair.second && "Already have a default witness"); (void)pair; } ProtocolConformanceRef ProtocolDecl::getDefaultAssociatedConformanceWitness( CanType association, ProtocolDecl *requirement) const { auto &ctx = getASTContext(); auto found = ctx.getImpl().DefaultAssociatedConformanceWitnesses.find( std::make_tuple(this, association, requirement)); if (found == ctx.getImpl().DefaultAssociatedConformanceWitnesses.end()) return ProtocolConformanceRef::forInvalid(); return found->second; } void ProtocolDecl::setDefaultAssociatedConformanceWitness( CanType association, ProtocolDecl *requirement, ProtocolConformanceRef conformance) { auto &ctx = getASTContext(); auto pair = ctx.getImpl().DefaultAssociatedConformanceWitnesses.insert( std::make_pair(std::make_tuple(this, association, requirement), conformance)); assert(pair.second && "Already have a default associated conformance"); (void)pair; } void ASTContext::getVisibleTopLevelModuleNames( SmallVectorImpl<Identifier> &names) const { names.clear(); for (auto &importer : getImpl().ModuleLoaders) importer->collectVisibleTopLevelModuleNames(names); // Sort and unique. std::sort(names.begin(), names.end(), [](Identifier LHS, Identifier RHS) { return LHS.str().compare_insensitive(RHS.str()) < 0; }); names.erase(std::unique(names.begin(), names.end()), names.end()); } bool ASTContext::shouldPerformTypoCorrection() { NumTypoCorrections += 1; return NumTypoCorrections <= LangOpts.TypoCorrectionLimit; } static bool isClangModuleVersion(const ModuleLoader::ModuleVersionInfo &info) { switch (info.getSourceKind()) { case ModuleLoader::ModuleVersionSourceKind::ClangModuleTBD: return true; case ModuleLoader::ModuleVersionSourceKind::SwiftBinaryModule: case ModuleLoader::ModuleVersionSourceKind::SwiftInterface: return false; } } void ASTContext::addSucceededCanImportModule( StringRef moduleName, const llvm::VersionTuple &versionInfo, const llvm::VersionTuple &underlyingVersionInfo) { // We have previously recorded a successful canImport // information for this module. if (CanImportModuleVersions.count(moduleName.str())) return; auto &entry = CanImportModuleVersions[moduleName.str()]; entry.Version = versionInfo; entry.UnderlyingVersion = underlyingVersionInfo; } bool ASTContext::canImportModuleImpl( ImportPath::Module ModuleName, SourceLoc loc, llvm::VersionTuple version, bool isUnderlyingVersion, bool isSourceCanImport, llvm::VersionTuple &foundVersion, llvm::VersionTuple &foundUnderlyingClangVersion) const { SmallString<64> FullModuleName; ModuleName.getString(FullModuleName); auto ModuleNameStr = FullModuleName.str().str(); // If we've failed loading this module before, don't look for it again. if (FailedModuleImportNames.count(ModuleNameStr)) return false; auto missingVersion = [this, &loc, &ModuleName, &isUnderlyingVersion]() -> bool { // The module version could not be parsed from the preferred source for // this query. Diagnose and return `true` to indicate that the unversioned // module will satisfy the query. auto mID = ModuleName[0]; auto diagLoc = mID.Loc; if (mID.Loc.isInvalid()) diagLoc = loc; Diags.diagnose(diagLoc, diag::cannot_find_module_version, mID.Item.str(), isUnderlyingVersion); return true; }; // If this module has already been checked or there is information for the // module from commandline, use that information instead of loading the // module. auto Found = CanImportModuleVersions.find(ModuleNameStr); if (Found != CanImportModuleVersions.end()) { if (version.empty()) return true; const auto &foundComparisonVersion = isUnderlyingVersion ? Found->second.UnderlyingVersion : Found->second.Version; if (!foundComparisonVersion.empty()) return version <= foundComparisonVersion; else return missingVersion(); } // When looking up a module, each module importer will report back // if it finds a module with a specified version. This routine verifies // whether said version is valid and if it superceeds the best // previously-discovered version of this module found. auto validateVersion = [](const ModuleLoader::ModuleVersionInfo &bestVersionInfo, const ModuleLoader::ModuleVersionInfo &versionInfo, bool underlyingVersion) { if (!versionInfo.isValid()) return false; // The loader didn't attempt to parse a version. if (underlyingVersion && !isClangModuleVersion(versionInfo)) return false; // We're only matching Clang module versions. if (bestVersionInfo.isValid() && versionInfo.getSourceKind() <= bestVersionInfo.getSourceKind()) return false; // This module version's source is lower priority. return true; }; // For each module loader, attempt to discover queried module, // along the way record the discovered module's version as well as // the discovered module's underlying Clang module's version. auto lookupVersionedModule = [&](ModuleLoader::ModuleVersionInfo &bestVersionInfo, ModuleLoader::ModuleVersionInfo &bestUnderlyingVersionInfo) -> bool { for (auto &importer : getImpl().ModuleLoaders) { ModuleLoader::ModuleVersionInfo versionInfo; if (!importer->canImportModule(ModuleName, loc, &versionInfo)) continue; // The loader can't find the module. if (validateVersion(bestVersionInfo, versionInfo, /* underlyingVersion */ false)) bestVersionInfo = versionInfo; if (validateVersion(bestUnderlyingVersionInfo, versionInfo, /* underlyingVersion */ true)) bestUnderlyingVersionInfo = versionInfo; } if (!isUnderlyingVersion && !bestVersionInfo.isValid()) return false; if (isUnderlyingVersion && !bestUnderlyingVersionInfo.isValid()) return false; foundVersion = bestVersionInfo.getVersion(); foundUnderlyingClangVersion = bestUnderlyingVersionInfo.getVersion(); return true; }; // For queries which do not care about any kind of module information // such as e.g. `testImportModule`, simply return `true` as soon // as *any* loader can find the queried module. auto lookupModule = [&]() -> bool { for (auto &importer : getImpl().ModuleLoaders) { ModuleLoader::ModuleVersionInfo versionInfo; if (!importer->canImportModule(ModuleName, loc, &versionInfo)) continue; // The loader can't find the module. return true; } return false; }; if (version.empty()) { // If this module has already been successfully imported, it is importable. if (getLoadedModule(ModuleName) != nullptr) return true; if (!isSourceCanImport) return lookupModule(); // Otherwise, ask whether any module loader can load the module, // and record the module version that the succeeding loader // observed. ModuleLoader::ModuleVersionInfo versionInfo, underlyingVersionInfo; if (lookupVersionedModule(versionInfo, underlyingVersionInfo)) return true; if (isSourceCanImport) FailedModuleImportNames.insert(ModuleNameStr); return false; } // We need to check whether the version of the module is high enough. // Retrieve a module version from each module loader that can find the module // and use the best source available for the query. ModuleLoader::ModuleVersionInfo versionInfo, underlyingVersionInfo; if (!lookupVersionedModule(versionInfo, underlyingVersionInfo)) return false; const auto &queryVersion = isUnderlyingVersion ? underlyingVersionInfo : versionInfo; if (queryVersion.getVersion().empty()) return missingVersion(); return version <= queryVersion.getVersion(); } void ASTContext::forEachCanImportVersionCheck( std::function<void(StringRef, const llvm::VersionTuple &, const llvm::VersionTuple &)> Callback) const { for (auto &entry : CanImportModuleVersions) Callback(entry.first, entry.second.Version, entry.second.UnderlyingVersion); } bool ASTContext::canImportModule(ImportPath::Module moduleName, SourceLoc loc, llvm::VersionTuple version, bool underlyingVersion) { llvm::VersionTuple versionInfo; llvm::VersionTuple underlyingVersionInfo; bool canImport = canImportModuleImpl(moduleName, loc, version, underlyingVersion, true, versionInfo, underlyingVersionInfo); // If found an import or resolved an version, recorded the module. if (canImport || !versionInfo.empty() || !underlyingVersionInfo.empty()) { SmallString<64> fullModuleName; moduleName.getString(fullModuleName); addSucceededCanImportModule(fullModuleName, versionInfo, underlyingVersionInfo); } return canImport;; } bool ASTContext::testImportModule(ImportPath::Module ModuleName, llvm::VersionTuple version, bool underlyingVersion) const { llvm::VersionTuple versionInfo; llvm::VersionTuple underlyingVersionInfo; return canImportModuleImpl(ModuleName, SourceLoc(), version, underlyingVersion, false, versionInfo, underlyingVersionInfo); } ModuleDecl * ASTContext::getModule(ImportPath::Module ModulePath, bool AllowMemoryCached) { assert(!ModulePath.empty()); if (AllowMemoryCached) if (auto *M = getLoadedModule(ModulePath)) return M; auto moduleID = ModulePath[0]; PreModuleImportHook(moduleID.Item.str(), ModuleImportKind::Module); for (auto &importer : getImpl().ModuleLoaders) { if (ModuleDecl *M = importer->loadModule(moduleID.Loc, ModulePath, AllowMemoryCached)) { if (LangOpts.EnableModuleLoadingRemarks) { Diags.diagnose(ModulePath.getSourceRange().Start, diag::module_loaded, M->getName(), /*overlay=*/false, M->getModuleSourceFilename(), M->getModuleLoadedFilename()); } return M; } } return nullptr; } ModuleDecl *ASTContext::getOverlayModule(const FileUnit *FU) { assert(FU && FU->getKind() == FileUnitKind::ClangModule && "Overlays can only be retrieved for clang modules!"); ImportPath::Module::Builder builder(FU->getParentModule()->getName()); auto ModPath = builder.get(); if (auto *Existing = getLoadedModule(ModPath)) { if (!Existing->isNonSwiftModule()) return Existing; } if (PreModuleImportCallback) { SmallString<16> path; ModPath.getString(path); if (!path.empty()) PreModuleImportCallback(path.str(), ModuleImportKind::Overlay); } for (auto &importer : getImpl().ModuleLoaders) { if (importer.get() == getClangModuleLoader()) continue; if (ModuleDecl *M = importer->loadModule(SourceLoc(), ModPath)) { if (LangOpts.EnableModuleLoadingRemarks) { Diags.diagnose(SourceLoc(), diag::module_loaded, M->getName(), /*overlay=*/true, M->getModuleSourceFilename(), M->getModuleLoadedFilename()); } return M; } } return nullptr; } ModuleDecl *ASTContext::getModuleByName(StringRef ModuleName) { ImportPath::Module::Builder builder(*this, ModuleName, /*separator=*/'.'); return getModule(builder.get()); } ModuleDecl *ASTContext::getModuleByIdentifier(Identifier ModuleID) { ImportPath::Module::Builder builder(ModuleID); return getModule(builder.get()); } llvm::ArrayRef<ModuleDecl *> ASTContext::getModulesByRealOrABIName(StringRef ModuleName) { auto Identifier = getIdentifier(ModuleName); auto it = getImpl().NameToModules.find(Identifier); if (it != getImpl().NameToModules.end()) return it->second; // If we didn't find the module it might have not been loaded yet, try // triggering a module load and searching again. getModuleByName(ModuleName); it = getImpl().NameToModules.find(Identifier); if (it != getImpl().NameToModules.end()) return it->second; return {}; } ModuleDecl *ASTContext::getStdlibModule(bool loadIfAbsent) { if (TheStdlibModule) return TheStdlibModule; if (loadIfAbsent) { auto mutableThis = const_cast<ASTContext*>(this); TheStdlibModule = mutableThis->getModuleByIdentifier(StdlibModuleName); } else { TheStdlibModule = getLoadedModule(StdlibModuleName); } return TheStdlibModule; } std::optional<ExternalSourceLocs *> ASTContext::getExternalSourceLocs(const Decl *D) { auto Known = getImpl().ExternalSourceLocs.find(D); if (Known == getImpl().ExternalSourceLocs.end()) return std::nullopt; return Known->second; } void ASTContext::setExternalSourceLocs(const Decl *D, ExternalSourceLocs *Locs) { getImpl().ExternalSourceLocs[D] = Locs; } NormalProtocolConformance * ASTContext::getNormalConformance(Type conformingType, ProtocolDecl *protocol, SourceLoc loc, TypeRepr *inheritedTypeRepr, DeclContext *dc, ProtocolConformanceState state, ProtocolConformanceOptions options) { assert(dc->isTypeContext()); llvm::FoldingSetNodeID id; NormalProtocolConformance::Profile(id, protocol, dc); // Did we already record the normal conformance? void *insertPos; auto &normalConformances = getImpl().NormalConformances; if (auto result = normalConformances.FindNodeOrInsertPos(id, insertPos)) return result; // Build a new normal protocol conformance. auto result = new (*this) NormalProtocolConformance( conformingType, protocol, loc, inheritedTypeRepr, dc, state, options); normalConformances.InsertNode(result, insertPos); return result; } /// Produce a self-conformance for the given protocol. SelfProtocolConformance * ASTContext::getSelfConformance(ProtocolDecl *protocol) { auto &selfConformances = getImpl().SelfConformances; auto &entry = selfConformances[protocol]; if (!entry) { entry = new (*this) SelfProtocolConformance( protocol->getDeclaredExistentialType()); } return entry; } /// Produce the builtin conformance for some non-nominal to some protocol. BuiltinProtocolConformance * ASTContext::getBuiltinConformance(Type type, ProtocolDecl *protocol, BuiltinConformanceKind kind) { auto key = std::make_pair(type, protocol); AllocationArena arena = getArena(type->getRecursiveProperties()); auto &builtinConformances = getImpl().getArena(arena).BuiltinConformances; auto &entry = builtinConformances[key]; if (!entry) { entry = new (*this) BuiltinProtocolConformance(type, protocol, kind); } return entry; } static bool collapseSpecializedConformance(Type type, NormalProtocolConformance *conformance, SubstitutionMap substitutions) { if (!conformance->getType()->isEqual(type)) return false; for (auto subConformance : substitutions.getConformances()) { if (!subConformance.isAbstract()) return false; } return true; } ProtocolConformance * ASTContext::getSpecializedConformance(Type type, NormalProtocolConformance *generic, SubstitutionMap substitutions) { CONDITIONAL_ASSERT(substitutions.getGenericSignature().getCanonicalSignature() == generic->getGenericSignature().getCanonicalSignature()); // If the specialization is a no-op, use the root conformance instead. if (collapseSpecializedConformance(type, generic, substitutions)) { ++NumCollapsedSpecializedProtocolConformances; return generic; } llvm::FoldingSetNodeID id; SpecializedProtocolConformance::Profile(id, type, generic, substitutions); // Figure out which arena this conformance should go into. AllocationArena arena = getArena(type->getRecursiveProperties()); // Did we already record the specialized conformance? void *insertPos; auto &specializedConformances = getImpl().getArena(arena).SpecializedConformances; if (auto result = specializedConformances.FindNodeOrInsertPos(id, insertPos)) return result; // Vanishing tuple conformances must be handled by the caller. if (isa<BuiltinTupleDecl>(generic->getDeclContext()->getSelfNominalTypeDecl())) { assert(type->is<TupleType>() && "Vanishing tuple substitution is not " "here. Did you mean to use ProtocolConformanceRef::subst() " "instead?"); } // Build a new specialized conformance. auto result = new (*this, arena) SpecializedProtocolConformance(type, generic, substitutions); auto node = specializedConformances.FindNodeOrInsertPos(id, insertPos); (void)node; assert(!node); specializedConformances.InsertNode(result, insertPos); return result; } ProtocolConformance * ASTContext::getInheritedConformance(Type type, ProtocolConformance *inherited) { // Collapse multiple levels of inherited conformance. if (auto *otherInherited = dyn_cast<InheritedProtocolConformance>(inherited)) inherited = otherInherited->getInheritedConformance(); assert(isa<SpecializedProtocolConformance>(inherited) || isa<NormalProtocolConformance>(inherited) || isa<BuiltinProtocolConformance>(inherited)); // Collapse useless inherited conformances. Conformance lookup with aa // archetype T that has a superclass bound C will return a concrete // conformance if C conforms to the protocol P. This is wrapped in an // inherited conformance with the archetype type T. If you then substitute // T := C, you don't want to form an inherited conformance with a type of // C, because the underlying conformance already has a type of C. if (inherited->getType()->isEqual(type)) return inherited; llvm::FoldingSetNodeID id; InheritedProtocolConformance::Profile(id, type, inherited); // Figure out which arena this conformance should go into. AllocationArena arena = getArena(type->getRecursiveProperties()); // Did we already record the inherited protocol conformance? void *insertPos; auto &inheritedConformances = getImpl().getArena(arena).InheritedConformances; if (auto result = inheritedConformances.FindNodeOrInsertPos(id, insertPos)) return result; // Build a new inherited protocol conformance. auto result = new (*this, arena) InheritedProtocolConformance(type, inherited); inheritedConformances.InsertNode(result, insertPos); return result; } PackConformance *PackConformance::get(PackType *conformingType, ProtocolDecl *protocol, ArrayRef<ProtocolConformanceRef> conformances) { auto properties = conformingType->getRecursiveProperties(); for (auto conformance : conformances) { if (conformance.isAbstract() || conformance.isInvalid()) continue; auto *concrete = conformance.getConcrete(); properties |= concrete->getType()->getRecursiveProperties(); } auto &ctx = protocol->getASTContext(); llvm::FoldingSetNodeID id; PackConformance::Profile(id, conformingType, protocol, conformances); // Figure out which arena this conformance should go into. AllocationArena arena = getArena(properties); // Did we already record the pack conformance? void *insertPos; auto &packConformances = ctx.getImpl().getArena(arena).PackConformances; if (auto result = packConformances.FindNodeOrInsertPos(id, insertPos)) return result; // Build a new pack conformance. auto size = totalSizeToAlloc<ProtocolConformanceRef>(conformances.size()); auto mem = ctx.Allocate(size, alignof(PackConformance), arena); auto result = new (mem) PackConformance(conformingType, protocol, conformances); auto node = packConformances.FindNodeOrInsertPos(id, insertPos); (void)node; assert(!node); packConformances.InsertNode(result, insertPos); return result; } LazyContextData *ASTContext::getLazyContextData(const Decl *decl) const { return getImpl().LazyContexts.lookup(decl); } LazyContextData * ASTContext::getOrCreateLazyContextData(const Decl *decl, LazyMemberLoader *lazyLoader) { if (auto *data = getLazyContextData(decl)) { // Make sure we didn't provide an incompatible lazy loader. ASSERT(!lazyLoader || lazyLoader == data->loader); return data; } LazyContextData *&entry = getImpl().LazyContexts[decl]; // Create new lazy context data with the given loader. ASSERT(lazyLoader && "Queried lazy data for non-lazy iterable context"); if (isa<ProtocolDecl>(decl)) { entry = Allocate<LazyProtocolData>(); } else if (isa<NominalTypeDecl>(decl) || isa<ExtensionDecl>(decl)) { entry = Allocate<LazyIterableDeclContextData>(); } else if (isa<OpaqueTypeDecl>(decl)) { entry = Allocate<LazyOpaqueTypeData>(); } else { ASSERT(isa<AssociatedTypeDecl>(decl)); entry = Allocate<LazyAssociatedTypeData>(); } entry->loader = lazyLoader; return entry; } LazyIterableDeclContextData *ASTContext::getOrCreateLazyIterableContextData( const IterableDeclContext *idc, LazyMemberLoader *lazyLoader) { if (auto ext = dyn_cast<ExtensionDecl>(idc)) { return (LazyIterableDeclContextData *)getOrCreateLazyContextData( ext, lazyLoader); } auto nominal = cast<NominalTypeDecl>(idc); return (LazyIterableDeclContextData *)getOrCreateLazyContextData(nominal, lazyLoader); } bool ASTContext::hasDelayedConformanceErrors( NormalProtocolConformance const* conformance) const { if (conformance) { auto entry = getImpl().DelayedConformanceDiags.find(conformance); if (entry != getImpl().DelayedConformanceDiags.end()) return entry->second.HadError; return false; // unknown conformance, so no delayed diags either. } // check all conformances for any delayed errors for (const auto &entry : getImpl().DelayedConformanceDiags) { auto const& diagnostics = entry.getSecond(); if (diagnostics.HadError) return true; } return false; } ASTContext::MissingWitness::MissingWitness(ValueDecl *requirement, ArrayRef<RequirementMatch> matches) : requirement(requirement), matches(matches.begin(), matches.end()) { } static void maybeEmitFallbackConformanceDiagnostic( ASTContext &ctx, NormalProtocolConformance *conformance, DelayedConformanceDiags &diagnostics) { if (diagnostics.HadError) return; auto *proto = conformance->getProtocol(); auto *dc = conformance->getDeclContext(); auto *sf = dc->getParentSourceFile(); // FIXME: There should probably still be a diagnostic even without a file. if (!sf) return; auto *mod = sf->getParentModule(); assert(mod->isMainModule()); diagnostics.HadError = true; // If we have at least one primary file and the conformance is declared in a // non-primary file, emit a fallback diagnostic. if ((!sf->isPrimary() && !mod->getPrimarySourceFiles().empty()) || ctx.TypeCheckerOpts.EnableLazyTypecheck) { auto complainLoc = ctx.evaluator.getInnermostSourceLoc([&](SourceLoc loc) { if (loc.isInvalid()) return false; auto *otherSF = mod->getSourceFileContainingLocation(loc); if (otherSF == nullptr) return false; return otherSF->isPrimary(); }); if (complainLoc.isInvalid()) { complainLoc = conformance->getLoc(); } ctx.Diags.diagnose(complainLoc, diag::type_does_not_conform, dc->getSelfInterfaceType(), proto->getDeclaredInterfaceType()); } } void ASTContext::addDelayedConformanceDiag( NormalProtocolConformance *conformance, bool isError, std::function<void(NormalProtocolConformance *)> callback) { if (isError) conformance->setInvalid(); auto &diagnostics = getImpl().DelayedConformanceDiags[conformance]; if (isError) maybeEmitFallbackConformanceDiagnostic(*this, conformance, diagnostics); diagnostics.Diags.push_back({isError, callback}); } void ASTContext::addDelayedMissingWitness( NormalProtocolConformance *conformance, ASTContext::MissingWitness missingWitness) { conformance->setInvalid(); auto &diagnostics = getImpl().DelayedConformanceDiags[conformance]; maybeEmitFallbackConformanceDiagnostic(*this, conformance, diagnostics); diagnostics.MissingWitnesses.push_back(missingWitness); } std::vector<ASTContext::MissingWitness> ASTContext::takeDelayedMissingWitnesses( NormalProtocolConformance *conformance) { std::vector<ASTContext::MissingWitness> result; auto known = getImpl().DelayedConformanceDiags.find(conformance); if (known != getImpl().DelayedConformanceDiags.end()) { auto &diagnostics = known->second; std::swap(result, diagnostics.MissingWitnesses); } return result; } std::vector<ASTContext::DelayedConformanceDiag> ASTContext::takeDelayedConformanceDiags(NormalProtocolConformance const* cnfrm){ std::vector<ASTContext::DelayedConformanceDiag> result; auto known = getImpl().DelayedConformanceDiags.find(cnfrm); if (known != getImpl().DelayedConformanceDiags.end()) { auto &diagnostics = known->second; std::swap(result, diagnostics.Diags); } return result; } size_t ASTContext::getTotalMemory() const { size_t Size = sizeof(*this) + // LoadedModules ? llvm::capacity_in_bytes(CanonicalGenericTypeParamTypeNames) + // RemappedTypes ? sizeof(getImpl()) + getImpl().Allocator.getTotalMemory() + getImpl().Cleanups.capacity() + llvm::capacity_in_bytes(getImpl().ModuleLoaders) + llvm::capacity_in_bytes(getImpl().ModuleTypes) + // getImpl().GenericParamTypes ? // getImpl().GenericFunctionTypes ? // getImpl().SILFunctionTypes ? llvm::capacity_in_bytes(getImpl().SILBlockStorageTypes) + llvm::capacity_in_bytes(getImpl().BuiltinIntegerTypes) + // getImpl().ProtocolCompositionTypes ? // getImpl().BuiltinVectorTypes ? // getImpl().GenericSignatures ? // getImpl().CompoundNames ? // getImpl().IntegerTypes ? // getImpl().NormalConformances ? // getImpl().SelfConformances ? // getImpl().AvailabilityContexts getImpl().Permanent.getTotalMemory(); Size += getSolverMemory(); return Size; } size_t ASTContext::getSolverMemory() const { size_t Size = 0; if (getImpl().CurrentConstraintSolverArena) { Size += getImpl().CurrentConstraintSolverArena->getTotalMemory(); Size += getImpl().CurrentConstraintSolverArena->Allocator.getBytesAllocated(); } return Size; } size_t ASTContext::Implementation::Arena::getTotalMemory() const { return sizeof(*this) + // TupleTypes ? llvm::capacity_in_bytes(MetatypeTypes) + llvm::capacity_in_bytes(ExistentialMetatypeTypes) + llvm::capacity_in_bytes(ArraySliceTypes) + llvm::capacity_in_bytes(DictionaryTypes) + llvm::capacity_in_bytes(OptionalTypes) + llvm::capacity_in_bytes(VariadicSequenceTypes) + llvm::capacity_in_bytes(ReferenceStorageTypes) + llvm::capacity_in_bytes(LValueTypes) + llvm::capacity_in_bytes(InOutTypes) + llvm::capacity_in_bytes(DependentMemberTypes) + llvm::capacity_in_bytes(EnumTypes) + llvm::capacity_in_bytes(StructTypes) + llvm::capacity_in_bytes(ClassTypes) + llvm::capacity_in_bytes(ProtocolTypes) + llvm::capacity_in_bytes(DynamicSelfTypes) + OpaqueArchetypeEnvironments.getMemorySize() + OpenedExistentialEnvironments.getMemorySize(); // FunctionTypes ? // UnboundGenericTypes ? // BoundGenericTypes ? // SpecializedConformances ? // InheritedConformances ? // BuiltinConformances ? } void ASTContext::Implementation::Arena::dump(llvm::raw_ostream &os) const { #define SIZE(Name) os << #Name << "\t" << Name.size() << "\t0\n" #define SIZE_AND_BYTES(Name) os << #Name << "\t" \ << Name.size() << "\t" \ << llvm::capacity_in_bytes(Name) << "\n" SIZE_AND_BYTES(ErrorTypesWithOriginal); SIZE(TypeAliasTypes); SIZE(LocatableTypes); SIZE(TupleTypes); SIZE(PackTypes); SIZE(PackExpansionTypes); SIZE(PackElementTypes); SIZE_AND_BYTES(MetatypeTypes); SIZE_AND_BYTES(ExistentialMetatypeTypes); SIZE_AND_BYTES(ArraySliceTypes); SIZE_AND_BYTES(VariadicSequenceTypes); SIZE_AND_BYTES(DictionaryTypes); SIZE_AND_BYTES(OptionalTypes); SIZE_AND_BYTES(ReferenceStorageTypes); SIZE_AND_BYTES(LValueTypes); SIZE_AND_BYTES(InOutTypes); SIZE_AND_BYTES(DependentMemberTypes); SIZE(ErrorUnionTypes); SIZE_AND_BYTES(PlaceholderTypes); SIZE_AND_BYTES(DynamicSelfTypes); SIZE_AND_BYTES(EnumTypes); SIZE_AND_BYTES(StructTypes); SIZE_AND_BYTES(ClassTypes); SIZE_AND_BYTES(ProtocolTypes); SIZE_AND_BYTES(ExistentialTypes); SIZE(UnboundGenericTypes); SIZE(BoundGenericTypes); SIZE(ProtocolCompositionTypes); SIZE(ParameterizedProtocolTypes); SIZE(LayoutConstraints); SIZE_AND_BYTES(OpaqueArchetypeEnvironments); SIZE_AND_BYTES(OpenedExistentialEnvironments); SIZE(FunctionTypes); SIZE(SpecializedConformances); SIZE(InheritedConformances); SIZE_AND_BYTES(BuiltinConformances); SIZE(PackConformances); SIZE(SubstitutionMaps); SIZE(AbstractConformances); #undef SIZE #undef SIZE_AND_BYTES } void AbstractFunctionDecl::setForeignErrorConvention( const ForeignErrorConvention &conv) { assert(hasThrows() && "setting error convention on non-throwing decl"); auto &conventionsMap = getASTContext().getImpl().ForeignErrorConventions; assert(!conventionsMap.count(this) && "error convention already set"); conventionsMap.insert({this, conv}); } std::optional<ForeignErrorConvention> AbstractFunctionDecl::getForeignErrorConvention() const { if (!hasThrows()) return std::nullopt; auto &conventionsMap = getASTContext().getImpl().ForeignErrorConventions; auto it = conventionsMap.find(this); if (it == conventionsMap.end()) return std::nullopt; return it->second; } void AbstractFunctionDecl::setForeignAsyncConvention( const ForeignAsyncConvention &conv) { assert(hasAsync() && "setting error convention on non-throwing decl"); auto &conventionsMap = getASTContext().getImpl().ForeignAsyncConventions; assert(!conventionsMap.count(this) && "error convention already set"); conventionsMap.insert({this, conv}); } std::optional<ForeignAsyncConvention> AbstractFunctionDecl::getForeignAsyncConvention() const { if (!hasAsync()) return std::nullopt; auto &conventionsMap = getASTContext().getImpl().ForeignAsyncConventions; auto it = conventionsMap.find(this); if (it == conventionsMap.end()) return std::nullopt; return it->second; } std::optional<KnownFoundationEntity> swift::getKnownFoundationEntity(StringRef name) { return llvm::StringSwitch<std::optional<KnownFoundationEntity>>(name) #define FOUNDATION_ENTITY(Name) .Case(#Name, KnownFoundationEntity::Name) #include "swift/AST/KnownFoundationEntities.def" .Default(std::nullopt); } StringRef swift::getSwiftName(KnownFoundationEntity kind) { StringRef objcName; switch (kind) { #define FOUNDATION_ENTITY(Name) case KnownFoundationEntity::Name: \ objcName = #Name; \ break; #include "swift/AST/KnownFoundationEntities.def" } return objcName; } //===----------------------------------------------------------------------===// // Type manipulation routines. //===----------------------------------------------------------------------===// TypeAliasType::TypeAliasType(TypeAliasDecl *typealias, Type parent, ArrayRef<Type> genericArgs, Type underlying, RecursiveTypeProperties properties) : SugarType(TypeKind::TypeAlias, underlying, properties), typealias(typealias) { // Record the parent (or absence of a parent). if (parent) { Bits.TypeAliasType.HasParent = true; *getTrailingObjects() = parent; } else { Bits.TypeAliasType.HasParent = false; } auto *params = typealias->getGenericParams(); unsigned count = genericArgs.size(); // Record the generic arguments. if (count > 0) { ASSERT(params->size() == count); Bits.TypeAliasType.GenericArgCount = count; std::uninitialized_copy(genericArgs.begin(), genericArgs.end(), getTrailingObjects() + (parent ? 1 : 0)); } else { ASSERT(params == nullptr); Bits.TypeAliasType.GenericArgCount = 0; } } TypeAliasType *TypeAliasType::get(TypeAliasDecl *typealias, Type parent, ArrayRef<Type> genericArgs, Type underlying) { // Compute the recursive properties. // auto properties = underlying->getRecursiveProperties(); if (parent) properties |= parent->getRecursiveProperties(); for (auto arg : genericArgs) properties |= arg->getRecursiveProperties(); // Figure out which arena this type will go into. auto &ctx = underlying->getASTContext(); auto arena = getArena(properties); // Typealiases can't meaningfully be unsafe; it's the underlying type that // matters. properties.removeIsUnsafe(); if (underlying->isUnsafe()) properties |= RecursiveTypeProperties::IsUnsafe; // Profile the type. llvm::FoldingSetNodeID id; TypeAliasType::Profile(id, typealias, parent, genericArgs, underlying); // Did we already record this type? void *insertPos; auto &types = ctx.getImpl().getArena(arena).TypeAliasTypes; if (auto result = types.FindNodeOrInsertPos(id, insertPos)) return result; // Build a new type. auto size = totalSizeToAlloc<Type>((parent ? 1 : 0) + genericArgs.size()); auto mem = ctx.Allocate(size, alignof(TypeAliasType), arena); auto result = new (mem) TypeAliasType(typealias, parent, genericArgs, underlying, properties); types.InsertNode(result, insertPos); return result; } void TypeAliasType::Profile(llvm::FoldingSetNodeID &id) const { Profile(id, getDecl(), getParent(), getDirectGenericArgs(), Type(getSinglyDesugaredType())); } void TypeAliasType::Profile( llvm::FoldingSetNodeID &id, TypeAliasDecl *typealias, Type parent, ArrayRef<Type> genericArgs, Type underlying) { id.AddPointer(typealias); id.AddPointer(parent.getPointer()); id.AddInteger(genericArgs.size()); for (auto arg : genericArgs) id.AddPointer(arg.getPointer()); id.AddPointer(underlying.getPointer()); } LocatableType::LocatableType(SourceLoc loc, Type underlying, RecursiveTypeProperties properties) : SugarType(TypeKind::Locatable, underlying, properties), Loc(loc) { ASSERT(loc.isValid()); } LocatableType *LocatableType::get(SourceLoc loc, Type underlying) { auto properties = underlying->getRecursiveProperties(); // Figure out which arena this type will go into. auto &ctx = underlying->getASTContext(); auto arena = getArena(properties); // Profile the type. llvm::FoldingSetNodeID id; LocatableType::Profile(id, loc, underlying); // Did we already record this type? void *insertPos; auto &types = ctx.getImpl().getArena(arena).LocatableTypes; if (auto result = types.FindNodeOrInsertPos(id, insertPos)) return result; // Build a new type. auto result = new (ctx, arena) LocatableType(loc, underlying, properties); types.InsertNode(result, insertPos); return result; } void LocatableType::Profile(llvm::FoldingSetNodeID &id) const { Profile(id, Loc, Type(getSinglyDesugaredType())); } void LocatableType::Profile(llvm::FoldingSetNodeID &id, SourceLoc loc, Type underlying) { id.AddPointer(loc.getOpaquePointerValue()); id.AddPointer(underlying.getPointer()); } // Simple accessors. Type ErrorType::get(const ASTContext &C) { return C.TheErrorType; } static Type replacingTypeVariablesAndPlaceholders(Type ty) { if (!ty || !ty->hasTypeVariableOrPlaceholder()) return ty; struct Transform : public TypeTransform<Transform> { Transform(ASTContext &ctx) : TypeTransform(ctx) {} std::optional<Type> transform(TypeBase *ty, TypePosition) { if (!ty->hasTypeVariableOrPlaceholder()) return ty; if (isa<TypeVariableType>(ty) || isa<PlaceholderType>(ty)) return ErrorType::get(ctx); return std::nullopt; } std::pair<Type, /*sendable*/ bool> transformSendableDependentType(Type ty) { // Fold away the sendable dependence if present, the function type will // just become non-Sendable. return std::make_pair(Type(), false); } }; return Transform(ty->getASTContext()).doIt(ty, TypePosition::Invariant); } Type ErrorType::get(Type originalType) { // The original type is only used for printing/debugging, and we don't support // solver-allocated ErrorTypes. As such, fold any type variables and // placeholders into ErrorTypes. If we have a top-level one, we can return // that directly. originalType = replacingTypeVariablesAndPlaceholders(originalType); if (isa<ErrorType>(originalType.getPointer())) return originalType; ASSERT(originalType); ASSERT(!originalType->getRecursiveProperties().isSolverAllocated() && "Solver-allocated error types not supported"); auto originalProperties = originalType->getRecursiveProperties(); auto arena = getArena(originalProperties); auto &ctx = originalType->getASTContext(); auto &entry = ctx.getImpl().getArena(arena).ErrorTypesWithOriginal[originalType]; if (entry) return entry; void *mem = ctx.Allocate(sizeof(ErrorType) + sizeof(Type), alignof(ErrorType), arena); return entry = new (mem) ErrorType(ctx, originalType); } void ErrorUnionType::Profile(llvm::FoldingSetNodeID &id, ArrayRef<Type> terms) { id.AddInteger(terms.size()); for (auto term : terms) { id.AddPointer(term.getPointer()); } } Type ErrorUnionType::get(const ASTContext &ctx, ArrayRef<Type> terms) { // Peep-hole the simple cases. Error union types are always synthesized by // the type checker and never written explicitly, so we have no use for // extra type sugar around them. switch (terms.size()) { case 0: return ctx.getNeverType(); case 1: return terms[0]; default: break; } // Determine canonicality and recursive type properties. bool isCanonical = true; RecursiveTypeProperties properties; for (Type term : terms) { if (!term->isCanonical()) isCanonical = false; properties |= term->getRecursiveProperties(); } // Check whether we've seen this type before. auto arena = getArena(properties); void *insertPos = nullptr; llvm::FoldingSetNodeID id; ErrorUnionType::Profile(id, terms); if (auto knownTy = ctx.getImpl().getArena(arena).ErrorUnionTypes .FindNodeOrInsertPos(id, insertPos)) return knownTy; // Use trailing objects for term storage. auto size = totalSizeToAlloc<Type>(terms.size()); auto mem = ctx.Allocate(size, alignof(ErrorUnionType), arena); auto unionTy = new (mem) ErrorUnionType(isCanonical ? &ctx : nullptr, terms, properties); ctx.getImpl().getArena(arena).ErrorUnionTypes.InsertNode(unionTy, insertPos); return unionTy; } Type PlaceholderType::get(ASTContext &ctx, Originator originator) { assert(originator); auto originatorProps = [&]() -> RecursiveTypeProperties { if (auto *tv = originator.dyn_cast<TypeVariableType *>()) return tv->getRecursiveProperties(); if (auto *depTy = originator.dyn_cast<DependentMemberType *>()) return depTy->getRecursiveProperties(); if (auto *errTy = originator.dyn_cast<ErrorType *>()) return errTy->getRecursiveProperties(); return RecursiveTypeProperties(); }(); auto arena = getArena(originatorProps); auto &cache = ctx.getImpl().getArena(arena).PlaceholderTypes; auto &entry = cache[originator.getOpaqueValue()]; if (entry) return entry; RecursiveTypeProperties properties = RecursiveTypeProperties::HasPlaceholder; // We need to preserve the solver allocated bit, to ensure any wrapping // types are solver allocated too. if (originatorProps.isSolverAllocated()) properties |= RecursiveTypeProperties::SolverAllocated; entry = new (ctx, arena) PlaceholderType(ctx, originator, properties); return entry; } IntegerType *IntegerType::get(StringRef value, bool isNegative, const ASTContext &ctx) { llvm::FoldingSetNodeID id; IntegerType::Profile(id, value, isNegative); void *insertPos; if (auto intType = ctx.getImpl().IntegerTypes.FindNodeOrInsertPos(id, insertPos)) { return intType; } auto strCopy = ctx.AllocateCopy(value); auto intType = new (ctx, AllocationArena::Permanent) IntegerType(strCopy, isNegative, ctx); ctx.getImpl().IntegerTypes.InsertNode(intType, insertPos); return intType; } BuiltinIntegerType *BuiltinIntegerType::get(BuiltinIntegerWidth BitWidth, const ASTContext &C) { assert(!BitWidth.isArbitraryWidth()); BuiltinIntegerType *&Result = C.getImpl().BuiltinIntegerTypes[BitWidth]; if (Result == nullptr) Result = new (C, AllocationArena::Permanent) BuiltinIntegerType(BitWidth,C); return Result; } BuiltinUnboundGenericType * BuiltinUnboundGenericType::get(TypeKind genericTypeKind, const ASTContext &C) { BuiltinUnboundGenericType *&Result = C.getImpl().BuiltinUnboundGenericTypes[unsigned(genericTypeKind)]; if (Result == nullptr) { Result = new (C, AllocationArena::Permanent) BuiltinUnboundGenericType(C, genericTypeKind); } return Result; } BuiltinFixedArrayType *BuiltinFixedArrayType::get(CanType Size, CanType ElementType) { RecursiveTypeProperties properties; properties |= Size->getRecursiveProperties(); properties |= ElementType->getRecursiveProperties(); AllocationArena arena = getArena(properties); llvm::FoldingSetNodeID id; BuiltinFixedArrayType::Profile(id, Size, ElementType); auto &ctx = Size->getASTContext(); void *insertPos; if (BuiltinFixedArrayType *faTy = ctx.getImpl().getArena(arena).BuiltinFixedArrayTypes .FindNodeOrInsertPos(id, insertPos)) return faTy; BuiltinFixedArrayType *faTy = new (ctx, arena) BuiltinFixedArrayType(Size, ElementType, properties); ctx.getImpl().getArena(arena).BuiltinFixedArrayTypes .InsertNode(faTy, insertPos); return faTy; } CanBuiltinBorrowType BuiltinBorrowType::get(CanType Referent) { RecursiveTypeProperties properties; properties |= Referent->getRecursiveProperties(); AllocationArena arena = getArena(properties); llvm::FoldingSetNodeID id; BuiltinBorrowType::Profile(id, Referent); auto &ctx = Referent->getASTContext(); void *insertPos; if (BuiltinBorrowType *faTy = ctx.getImpl().getArena(arena).BuiltinBorrowTypes .FindNodeOrInsertPos(id, insertPos)) return CanBuiltinBorrowType(faTy); BuiltinBorrowType *faTy = new (ctx, arena) BuiltinBorrowType(Referent, properties); ctx.getImpl().getArena(arena).BuiltinBorrowTypes .InsertNode(faTy, insertPos); return CanBuiltinBorrowType(faTy); } BuiltinVectorType *BuiltinVectorType::get(const ASTContext &context, Type elementType, unsigned numElements) { llvm::FoldingSetNodeID id; BuiltinVectorType::Profile(id, elementType, numElements); void *insertPos; if (BuiltinVectorType *vecType = context.getImpl().BuiltinVectorTypes.FindNodeOrInsertPos(id, insertPos)) return vecType; assert(elementType->isCanonical() && "Non-canonical builtin vector?"); BuiltinVectorType *vecTy = new (context, AllocationArena::Permanent) BuiltinVectorType(context, elementType, numElements); context.getImpl().BuiltinVectorTypes.InsertNode(vecTy, insertPos); return vecTy; } CanTupleType TupleType::getEmpty(const ASTContext &C) { return cast<TupleType>(CanType(C.TheEmptyTupleType)); } void TupleType::Profile(llvm::FoldingSetNodeID &ID, ArrayRef<TupleTypeElt> Fields) { ID.AddInteger(Fields.size()); for (const TupleTypeElt &Elt : Fields) { ID.AddPointer(Elt.Name.get()); ID.AddPointer(Elt.getType().getPointer()); } } /// getTupleType - Return the uniqued tuple type with the specified elements. TupleType *TupleType::get(ArrayRef<TupleTypeElt> Fields, const ASTContext &C) { RecursiveTypeProperties properties; for (const TupleTypeElt &Elt : Fields) { auto eltTy = Elt.getType(); if (!eltTy) continue; properties |= eltTy->getRecursiveProperties(); } auto arena = getArena(properties); void *InsertPos = nullptr; // Check to see if we've already seen this tuple before. llvm::FoldingSetNodeID ID; TupleType::Profile(ID, Fields); if (TupleType *TT = C.getImpl().getArena(arena).TupleTypes.FindNodeOrInsertPos(ID,InsertPos)) return TT; bool IsCanonical = true; // All canonical elts means this is canonical. for (const TupleTypeElt &Elt : Fields) { if (Elt.getType().isNull() || !Elt.getType()->isCanonical()) { IsCanonical = false; break; } } size_t bytes = totalSizeToAlloc<TupleTypeElt>(Fields.size()); // TupleType will copy the fields list into ASTContext owned memory. void *mem = C.Allocate(bytes, alignof(TupleType), arena); auto New = new (mem) TupleType(Fields, IsCanonical ? &C : nullptr, properties); C.getImpl().getArena(arena).TupleTypes.InsertNode(New, InsertPos); return New; } TupleTypeElt::TupleTypeElt(Type ty, Identifier name) : Name(name), ElementType(ty) { assert(!ty->is<InOutType>() && "Cannot have InOutType in a tuple"); } PackExpansionType::PackExpansionType(Type patternType, Type countType, RecursiveTypeProperties properties, const ASTContext *canCtx) : TypeBase(TypeKind::PackExpansion, canCtx, properties), patternType(patternType), countType(countType) {} CanPackExpansionType CanPackExpansionType::get(CanType patternType, CanType countType) { return CanPackExpansionType(PackExpansionType::get(patternType, countType)); } PackExpansionType *PackExpansionType::get(Type patternType, Type countType) { assert(!patternType->is<PackExpansionType>()); assert(!countType->is<PackExpansionType>()); // FIXME: stop doing this deliberately in PackExpansionMatcher //assert(!patternType->is<PackType>()); //assert(!countType->is<PackType>()); auto properties = patternType->getRecursiveProperties(); properties |= countType->getRecursiveProperties(); auto arena = getArena(properties); auto &context = patternType->getASTContext(); llvm::FoldingSetNodeID id; PackExpansionType::Profile(id, patternType, countType); void *insertPos; if (PackExpansionType *expType = context.getImpl().getArena(arena) .PackExpansionTypes.FindNodeOrInsertPos(id, insertPos)) return expType; // The canonical pack expansion type uses the canonical shape. // For interface types, we'd need a signature to do this properly, // but for archetypes we can do it directly. bool countIsCanonical = countType->isCanonical(); if (countIsCanonical) { if (auto archetype = dyn_cast<PackArchetypeType>(countType.getPointer())) { auto reducedShape = archetype->getReducedShape(); countIsCanonical = (reducedShape.getPointer() == archetype); } } const ASTContext *canCtx = (patternType->isCanonical() && countIsCanonical) ? &context : nullptr; PackExpansionType *expansionType = new (context, arena) PackExpansionType(patternType, countType, properties, canCtx); context.getImpl().getArena(arena).PackExpansionTypes.InsertNode(expansionType, insertPos); return expansionType; } void PackExpansionType::Profile(llvm::FoldingSetNodeID &ID, Type patternType, Type countType) { ID.AddPointer(patternType.getPointer()); ID.AddPointer(countType.getPointer()); } PackType *PackType::getEmpty(const ASTContext &C) { return cast<PackType>(CanType(C.TheEmptyPackType)); } PackElementType::PackElementType(Type packType, unsigned level, RecursiveTypeProperties properties, const ASTContext *canCtx) : TypeBase(TypeKind::PackElement, canCtx, properties), packType(packType), level(level) { assert(packType->isParameterPack() || packType->is<PackArchetypeType>() || packType->isTypeVariableOrMember()); assert(level > 0); } PackElementType *PackElementType::get(Type packType, unsigned level) { auto properties = packType->getRecursiveProperties(); auto arena = getArena(properties); auto &context = packType->getASTContext(); llvm::FoldingSetNodeID id; PackElementType::Profile(id, packType, level); void *insertPos; if (PackElementType *elementType = context.getImpl().getArena(arena) .PackElementTypes.FindNodeOrInsertPos(id, insertPos)) return elementType; const ASTContext *canCtx = packType->isCanonical() ? &context : nullptr; PackElementType *elementType = new (context, arena) PackElementType(packType, level, properties, canCtx); context.getImpl().getArena(arena).PackElementTypes.InsertNode(elementType, insertPos); return elementType; } void PackElementType::Profile(llvm::FoldingSetNodeID &ID, Type packType, unsigned level) { ID.AddPointer(packType.getPointer()); ID.AddInteger(level); } CanPackType CanPackType::get(const ASTContext &C, ArrayRef<CanType> elements) { SmallVector<Type, 8> ncElements(elements.begin(), elements.end()); return CanPackType(PackType::get(C, ncElements)); } CanPackType CanPackType::get(const ASTContext &C, CanTupleEltTypeArrayRef elements) { SmallVector<Type, 8> ncElements(elements.begin(), elements.end()); return CanPackType(PackType::get(C, ncElements)); } CanPackType CanPackType::get(const ASTContext &C, AnyFunctionType::CanParamArrayRef params) { SmallVector<Type, 8> ncElements; ncElements.reserve(params.size()); for (auto param : params) { ncElements.push_back(param.getParameterType()); } return CanPackType(PackType::get(C, ncElements)); } PackType *PackType::get(const ASTContext &C, ArrayRef<Type> elements) { RecursiveTypeProperties properties = RecursiveTypeProperties::HasPack; bool isCanonical = true; for (Type eltTy : elements) { assert(!eltTy->is<PackType>() && "Cannot have pack directly inside another pack"); properties |= eltTy->getRecursiveProperties(); if (!eltTy->isCanonical()) isCanonical = false; } auto arena = getArena(properties); void *InsertPos = nullptr; // Check to see if we've already seen this pack before. llvm::FoldingSetNodeID ID; PackType::Profile(ID, elements); if (PackType *TT = C.getImpl().getArena(arena).PackTypes.FindNodeOrInsertPos(ID,InsertPos)) return TT; size_t bytes = totalSizeToAlloc<Type>(elements.size()); // TupleType will copy the fields list into ASTContext owned memory. void *mem = C.Allocate(bytes, alignof(PackType), arena); auto New = new (mem) PackType(elements, isCanonical ? &C : nullptr, properties); C.getImpl().getArena(arena).PackTypes.InsertNode(New, InsertPos); return New; } void PackType::Profile(llvm::FoldingSetNodeID &ID, ArrayRef<Type> Elements) { ID.AddInteger(Elements.size()); for (Type Ty : Elements) { ID.AddPointer(Ty.getPointer()); } } CanSILPackType SILPackType::get(const ASTContext &C, ExtInfo info, ArrayRef<CanType> elements) { RecursiveTypeProperties properties; for (CanType eltTy : elements) { assert(!isa<SILPackType>(eltTy) && "Cannot have pack directly inside another pack"); properties |= eltTy->getRecursiveProperties(); } assert(getArena(properties) == AllocationArena::Permanent && "SILPackType has elements requiring temporary allocation?"); void *insertPos = nullptr; // Check to see if we've already seen this pack before. llvm::FoldingSetNodeID ID; SILPackType::Profile(ID, info, elements); if (SILPackType *existing = C.getImpl().SILPackTypes.FindNodeOrInsertPos(ID, insertPos)) return CanSILPackType(existing); size_t bytes = totalSizeToAlloc<CanType>(elements.size()); void *mem = C.Allocate(bytes, alignof(SILPackType)); auto builtType = new (mem) SILPackType(C, properties, info, elements); C.getImpl().SILPackTypes.InsertNode(builtType, insertPos); return CanSILPackType(builtType); } void SILPackType::Profile(llvm::FoldingSetNodeID &ID, ExtInfo info, ArrayRef<CanType> elements) { ID.AddBoolean(info.ElementIsAddress); ID.AddInteger(elements.size()); for (CanType element : elements) { ID.AddPointer(element.getPointer()); } } Type AnyFunctionType::Param::getOldType() const { if (Flags.isInOut()) return InOutType::get(Ty); return Ty; } AnyFunctionType::Param swift::computeSelfParam(AbstractFunctionDecl *AFD, bool isInitializingCtor, bool wantDynamicSelf) { auto *dc = AFD->getDeclContext(); auto &Ctx = dc->getASTContext(); // Determine the type of the container. auto containerTy = dc->getDeclaredInterfaceType(); if (!containerTy || containerTy->hasError()) return AnyFunctionType::Param(ErrorType::get(Ctx)); // Determine the type of 'self' inside the container. auto selfTy = dc->getSelfInterfaceType(); if (!selfTy || selfTy->hasError()) return AnyFunctionType::Param(ErrorType::get(Ctx)); bool isStatic = false; SelfAccessKind selfAccess = SelfAccessKind::NonMutating; bool isDynamicSelf = false; if (auto *FD = dyn_cast<FuncDecl>(AFD)) { isStatic = FD->isStatic(); selfAccess = FD->getSelfAccessKind(); // `self`s type for subscripts and properties if (auto *AD = dyn_cast<AccessorDecl>(AFD)) { if (wantDynamicSelf && AD->getStorage() ->getValueInterfaceType()->hasDynamicSelfType()) isDynamicSelf = true; } // Methods returning 'Self' have a dynamic 'self'. // // FIXME: All methods of non-final classes should have this. else if (wantDynamicSelf && FD->getResultInterfaceType()->hasDynamicSelfType()) isDynamicSelf = true; } else if (auto *CD = dyn_cast<ConstructorDecl>(AFD)) { if (isInitializingCtor) { // initializing constructors of value types always have an implicitly // inout self. if (!containerTy->hasReferenceSemantics()) selfAccess = SelfAccessKind::Mutating; // FIXME(distributed): pending swift-evolution, allow `self =` in class // inits in general. // See also: https://github.com/apple/swift/pull/19151 general impl auto ext = dyn_cast<ExtensionDecl>(AFD->getDeclContext()); auto distProto = Ctx.getProtocol(KnownProtocolKind::DistributedActor); if (distProto && ext && ext->getExtendedNominal() && ext->getExtendedNominal()->getInterfaceType() ->isEqual(distProto->getInterfaceType())) { auto name = CD->getName(); auto params = name.getArgumentNames(); if (params.size() == 1 && params[0] == Ctx.Id_from) { // FIXME(distributed): this is a workaround to allow init(from:) to // be implemented in AST by allowing the self to be mutable in the // decoding initializer. This should become a general Swift // feature, allowing this in all classes: // https://forums.swift.org/t/allow-self-x-in-class-convenience-initializers/15924 selfAccess = SelfAccessKind::Mutating; } } } else { // allocating constructors have metatype 'self'. isStatic = true; } // Convenience initializers have a dynamic 'self' in '-swift-version 5'. // // NOTE: it's important that we check if it's a convenience init only after // confirming it's not semantically final, or else there can be a request // evaluator cycle to determine the init kind for actors, which are final. if (Ctx.isLanguageModeAtLeast(LanguageMode::v5)) { if (wantDynamicSelf) if (auto *classDecl = selfTy->getClassOrBoundGenericClass()) if (!classDecl->isSemanticallyFinal() && CD->isConvenienceInit()) isDynamicSelf = true; } } else if (isa<DestructorDecl>(AFD)) { // Destructors only correctly appear on classes today. (If move-only types // have destructors, they probably would want to consume self.) // Note that we can't assert(containerTy->hasReferenceSemantics()) here // since incorrect or incomplete code could have deinit decls in invalid // contexts, and we need to recover gracefully in those cases. } if (isDynamicSelf) selfTy = DynamicSelfType::get(selfTy, Ctx); // 'static' functions have 'self' of type metatype<T>. if (isStatic) return AnyFunctionType::Param(MetatypeType::get(selfTy, Ctx)); // `self` is isolated if typechecker says the function is isolated to it. bool isIsolated = evaluateOrDefault(Ctx.evaluator, HasIsolatedSelfRequest{AFD}, false); auto flags = ParameterTypeFlags().withIsolated(isIsolated); switch (selfAccess) { case SelfAccessKind::LegacyConsuming: flags = flags.withOwnershipSpecifier(ParamSpecifier::LegacyOwned); break; case SelfAccessKind::Consuming: flags = flags.withOwnershipSpecifier(ParamSpecifier::Consuming); break; case SelfAccessKind::Borrowing: flags = flags.withOwnershipSpecifier(ParamSpecifier::Borrowing); break; case SelfAccessKind::Mutating: flags = flags.withInOut(true); break; case SelfAccessKind::NonMutating: // The default flagless state. break; } if (AFD->getAttrs().hasAttribute<AddressableSelfAttr>()) { flags = flags.withAddressable(true); } return AnyFunctionType::Param(selfTy, Identifier(), flags); } void UnboundGenericType::Profile(llvm::FoldingSetNodeID &ID, GenericTypeDecl *TheDecl, Type Parent) { ID.AddPointer(TheDecl); ID.AddPointer(Parent.getPointer()); } /// The safety of a parent type does not have an impact on a nested type within /// it. This produces the recursive properties of a given type that should /// be propagated to a nested type, which won't include any "IsUnsafe" bit /// determined based on the declaration itself. static RecursiveTypeProperties getRecursivePropertiesAsParent(Type type) { if (!type) return RecursiveTypeProperties(); // We only need to do anything interesting at all for unsafe types. auto properties = type->getRecursiveProperties(); if (!properties.isUnsafe()) return properties; if (auto nominal = type->getAnyNominal()) { // If the nominal wasn't itself unsafe, then we got the unsafety from // something else (e.g., a generic argument), so it won't change. if (nominal->getExplicitSafety() != ExplicitSafety::Unsafe) return properties; } // Drop the "unsafe" bit. We have to recompute it without considering the // enclosing nominal type. properties.removeIsUnsafe(); // Check generic arguments of parent types. while (type) { // Merge from the generic arguments. if (auto boundGeneric = type->getAs<BoundGenericType>()) { for (auto genericArg : boundGeneric->getGenericArgs()) properties |= genericArg->getRecursiveProperties(); } if (auto nominalOrBound = type->getAs<NominalOrBoundGenericNominalType>()) { type = nominalOrBound->getParent(); continue; } if (auto unbound = type->getAs<UnboundGenericType>()) { type = unbound->getParent(); continue; } break; }; return properties; } UnboundGenericType *UnboundGenericType:: get(GenericTypeDecl *TheDecl, Type Parent, const ASTContext &C) { llvm::FoldingSetNodeID ID; UnboundGenericType::Profile(ID, TheDecl, Parent); void *InsertPos = nullptr; RecursiveTypeProperties properties; if (TheDecl->getExplicitSafety() == ExplicitSafety::Unsafe) properties |= RecursiveTypeProperties::IsUnsafe; properties |= getRecursivePropertiesAsParent(Parent); auto arena = getArena(properties); if (auto unbound = C.getImpl().getArena(arena).UnboundGenericTypes .FindNodeOrInsertPos(ID, InsertPos)) return unbound; auto result = new (C, arena) UnboundGenericType(TheDecl, Parent, C, properties); C.getImpl().getArena(arena).UnboundGenericTypes.InsertNode(result, InsertPos); return result; } void BoundGenericType::Profile(llvm::FoldingSetNodeID &ID, NominalTypeDecl *TheDecl, Type Parent, ArrayRef<Type> GenericArgs) { ID.AddPointer(TheDecl); ID.AddPointer(Parent.getPointer()); ID.AddInteger(GenericArgs.size()); for (Type Arg : GenericArgs) { ID.AddPointer(Arg.getPointer()); } } BoundGenericType::BoundGenericType(TypeKind theKind, NominalTypeDecl *theDecl, Type parent, ArrayRef<Type> genericArgs, const ASTContext *context, RecursiveTypeProperties properties) : NominalOrBoundGenericNominalType(theDecl, parent, theKind, context, properties) { Bits.BoundGenericType.GenericArgCount = genericArgs.size(); // Subtypes are required to provide storage for the generic arguments std::uninitialized_copy(genericArgs.begin(), genericArgs.end(), getTrailingObjectsPointer()); } BoundGenericType *BoundGenericType::get(NominalTypeDecl *TheDecl, Type Parent, ArrayRef<Type> GenericArgs) { assert(TheDecl->getGenericParams() && "must be a generic type decl"); assert((!Parent || Parent->is<NominalType>() || Parent->is<BoundGenericType>() || Parent->is<UnboundGenericType>()) && "parent must be a nominal type"); ASTContext &C = TheDecl->getDeclContext()->getASTContext(); llvm::FoldingSetNodeID ID; BoundGenericType::Profile(ID, TheDecl, Parent, GenericArgs); RecursiveTypeProperties properties; if (TheDecl->getExplicitSafety() == ExplicitSafety::Unsafe) properties |= RecursiveTypeProperties::IsUnsafe; properties |= getRecursivePropertiesAsParent(Parent); for (Type Arg : GenericArgs) { properties |= Arg->getRecursiveProperties(); } auto arena = getArena(properties); void *InsertPos = nullptr; if (BoundGenericType *BGT = C.getImpl().getArena(arena).BoundGenericTypes.FindNodeOrInsertPos(ID, InsertPos)) return BGT; bool IsCanonical = !Parent || Parent->isCanonical(); if (IsCanonical) { for (Type Arg : GenericArgs) { if (!Arg->isCanonical()) { IsCanonical = false; break; } } } BoundGenericType *newType; if (auto theClass = dyn_cast<ClassDecl>(TheDecl)) { auto sz = BoundGenericClassType::totalSizeToAlloc<Type>(GenericArgs.size()); auto mem = C.Allocate(sz, alignof(BoundGenericClassType), arena); newType = new (mem) BoundGenericClassType( theClass, Parent, GenericArgs, IsCanonical ? &C : nullptr, properties); } else if (auto theStruct = dyn_cast<StructDecl>(TheDecl)) { auto sz = BoundGenericStructType::totalSizeToAlloc<Type>(GenericArgs.size()); auto mem = C.Allocate(sz, alignof(BoundGenericStructType), arena); newType = new (mem) BoundGenericStructType( theStruct, Parent, GenericArgs, IsCanonical ? &C : nullptr, properties); } else if (auto theEnum = dyn_cast<EnumDecl>(TheDecl)) { auto sz = BoundGenericEnumType::totalSizeToAlloc<Type>(GenericArgs.size()); auto mem = C.Allocate(sz, alignof(BoundGenericEnumType), arena); newType = new (mem) BoundGenericEnumType( theEnum, Parent, GenericArgs, IsCanonical ? &C : nullptr, properties); } else { llvm_unreachable("Unhandled NominalTypeDecl"); } C.getImpl().getArena(arena).BoundGenericTypes.InsertNode(newType, InsertPos); return newType; } NominalType *NominalType::get(NominalTypeDecl *D, Type Parent, const ASTContext &C) { assert((isa<ProtocolDecl>(D) || isa<BuiltinTupleDecl>(D) || !D->getGenericParams()) && "must be a non-generic type decl"); assert((!Parent || Parent->is<NominalType>() || Parent->is<BoundGenericType>() || Parent->is<UnboundGenericType>()) && "parent must be a nominal type"); switch (D->getKind()) { case DeclKind::Enum: return EnumType::get(cast<EnumDecl>(D), Parent, C); case DeclKind::Struct: return StructType::get(cast<StructDecl>(D), Parent, C); case DeclKind::Class: return ClassType::get(cast<ClassDecl>(D), Parent, C); case DeclKind::Protocol: { return ProtocolType::get(cast<ProtocolDecl>(D), Parent, C); case DeclKind::BuiltinTuple: return BuiltinTupleType::get(cast<BuiltinTupleDecl>(D), Parent, C); } default: llvm_unreachable("Not a nominal declaration!"); } } EnumType::EnumType(EnumDecl *TheDecl, Type Parent, const ASTContext &C, RecursiveTypeProperties properties) : NominalType(TypeKind::Enum, &C, TheDecl, Parent, properties) { } EnumType *EnumType::get(EnumDecl *D, Type Parent, const ASTContext &C) { RecursiveTypeProperties properties; if (D->getExplicitSafety() == ExplicitSafety::Unsafe) properties |= RecursiveTypeProperties::IsUnsafe; properties |= getRecursivePropertiesAsParent(Parent); auto arena = getArena(properties); auto *&known = C.getImpl().getArena(arena).EnumTypes[{D, Parent}]; if (!known) { known = new (C, arena) EnumType(D, Parent, C, properties); } return known; } StructType::StructType(StructDecl *TheDecl, Type Parent, const ASTContext &C, RecursiveTypeProperties properties) : NominalType(TypeKind::Struct, &C, TheDecl, Parent, properties) { } StructType *StructType::get(StructDecl *D, Type Parent, const ASTContext &C) { RecursiveTypeProperties properties; if (D->getExplicitSafety() == ExplicitSafety::Unsafe) properties |= RecursiveTypeProperties::IsUnsafe; properties |= getRecursivePropertiesAsParent(Parent); auto arena = getArena(properties); auto *&known = C.getImpl().getArena(arena).StructTypes[{D, Parent}]; if (!known) { known = new (C, arena) StructType(D, Parent, C, properties); } return known; } ClassType::ClassType(ClassDecl *TheDecl, Type Parent, const ASTContext &C, RecursiveTypeProperties properties) : NominalType(TypeKind::Class, &C, TheDecl, Parent, properties) { } ClassType *ClassType::get(ClassDecl *D, Type Parent, const ASTContext &C) { RecursiveTypeProperties properties; if (D->getExplicitSafety() == ExplicitSafety::Unsafe) properties |= RecursiveTypeProperties::IsUnsafe; properties |= getRecursivePropertiesAsParent(Parent); auto arena = getArena(properties); auto *&known = C.getImpl().getArena(arena).ClassTypes[{D, Parent}]; if (!known) { known = new (C, arena) ClassType(D, Parent, C, properties); } return known; } ProtocolCompositionType * ProtocolCompositionType::build(const ASTContext &C, ArrayRef<Type> Members, InvertibleProtocolSet Inverses, bool HasExplicitAnyObject) { assert(Members.size() != 1 || HasExplicitAnyObject || !Inverses.empty()); // Check to see if we've already seen this protocol composition before. void *InsertPos = nullptr; llvm::FoldingSetNodeID ID; ProtocolCompositionType::Profile(ID, Members, Inverses, HasExplicitAnyObject); bool isCanonical = true; RecursiveTypeProperties properties; for (Type t : Members) { if (!t->isCanonical()) isCanonical = false; properties |= t->getRecursiveProperties(); } // Create a new protocol composition type. auto arena = getArena(properties); if (auto compTy = C.getImpl().getArena(arena).ProtocolCompositionTypes .FindNodeOrInsertPos(ID, InsertPos)) return compTy; // Use trailing objects for member type storage auto size = totalSizeToAlloc<Type>(Members.size()); auto mem = C.Allocate(size, alignof(ProtocolCompositionType), arena); auto compTy = new (mem) ProtocolCompositionType(isCanonical ? &C : nullptr, Members, Inverses, HasExplicitAnyObject, properties); C.getImpl().getArena(arena).ProtocolCompositionTypes.InsertNode( compTy, InsertPos); return compTy; } ParameterizedProtocolType *ParameterizedProtocolType::get(const ASTContext &C, ProtocolType *baseTy, ArrayRef<Type> args) { assert(args.size() > 0); bool isCanonical = baseTy->isCanonical(); RecursiveTypeProperties properties = baseTy->getRecursiveProperties(); for (auto arg : args) { properties |= arg->getRecursiveProperties(); isCanonical &= arg->isCanonical(); } auto arena = getArena(properties); void *InsertPos = nullptr; llvm::FoldingSetNodeID ID; ParameterizedProtocolType::Profile(ID, baseTy, args); if (auto paramTy = C.getImpl().getArena(arena).ParameterizedProtocolTypes .FindNodeOrInsertPos(ID, InsertPos)) return paramTy; auto size = totalSizeToAlloc<Type>(args.size()); auto mem = C.Allocate(size, alignof(ParameterizedProtocolType), arena); properties |= RecursiveTypeProperties::HasParameterizedExistential; auto paramTy = new (mem) ParameterizedProtocolType( isCanonical ? &C : nullptr, baseTy, args, properties); C.getImpl().getArena(arena).ParameterizedProtocolTypes.InsertNode( paramTy, InsertPos); return paramTy; } ReferenceStorageType *ReferenceStorageType::get(Type T, ReferenceOwnership ownership, const ASTContext &C) { assert(!T->hasTypeVariable()); // not meaningful in type-checker assert(!T->hasPlaceholder()); switch (optionalityOf(ownership)) { case ReferenceOwnershipOptionality::Disallowed: assert(!T->getOptionalObjectType() && "optional type is disallowed"); break; case ReferenceOwnershipOptionality::Allowed: break; case ReferenceOwnershipOptionality::Required: assert(T->getOptionalObjectType() && "optional type is required"); break; } auto properties = T->getRecursiveProperties(); auto arena = getArena(properties); auto key = uintptr_t(T.getPointer()) | unsigned(ownership); auto &entry = C.getImpl().getArena(arena).ReferenceStorageTypes[key]; if (entry) return entry; switch (ownership) { case ReferenceOwnership::Strong: llvm_unreachable("strong ownership does not use ReferenceStorageType"); #define REF_STORAGE(Name, ...) \ case ReferenceOwnership::Name: \ return entry = new (C, arena) \ Name##StorageType(T, T->isCanonical() ? &C : nullptr, properties); #include "swift/AST/ReferenceStorage.def" } llvm_unreachable("bad ownership"); } AnyMetatypeType::AnyMetatypeType(TypeKind kind, const ASTContext *C, RecursiveTypeProperties properties, Type instanceType, std::optional<MetatypeRepresentation> repr) : TypeBase(kind, C, properties), InstanceType(instanceType) { if (repr) { Bits.AnyMetatypeType.Representation = static_cast<char>(*repr) + 1; } else { Bits.AnyMetatypeType.Representation = 0; } } MetatypeType *MetatypeType::get(Type T, std::optional<MetatypeRepresentation> Repr, const ASTContext &Ctx) { auto properties = T->getRecursiveProperties(); auto arena = getArena(properties); unsigned reprKey; if (Repr.has_value()) reprKey = static_cast<unsigned>(*Repr) + 1; else reprKey = 0; auto pair = llvm::PointerIntPair<TypeBase*, 3, unsigned>(T.getPointer(), reprKey); MetatypeType *&Entry = Ctx.getImpl().getArena(arena).MetatypeTypes[pair]; if (Entry) return Entry; return Entry = new (Ctx, arena) MetatypeType( T, T->isCanonical() ? &Ctx : nullptr, properties, Repr); } MetatypeType::MetatypeType(Type T, const ASTContext *C, RecursiveTypeProperties properties, std::optional<MetatypeRepresentation> repr) : AnyMetatypeType(TypeKind::Metatype, C, properties, T, repr) {} ExistentialMetatypeType * ExistentialMetatypeType::get(Type T, std::optional<MetatypeRepresentation> repr, const ASTContext &ctx) { // If we're creating an existential metatype from an // existential type, wrap the constraint type direcly. if (auto existential = T->getAs<ExistentialType>()) T = existential->getConstraintType(); auto properties = T->getRecursiveProperties(); auto arena = getArena(properties); unsigned reprKey; if (repr.has_value()) reprKey = static_cast<unsigned>(*repr) + 1; else reprKey = 0; auto pair = llvm::PointerIntPair<TypeBase*, 3, unsigned>(T.getPointer(), reprKey); auto &entry = ctx.getImpl().getArena(arena).ExistentialMetatypeTypes[pair]; if (entry) return entry; return entry = new (ctx, arena) ExistentialMetatypeType( T, T->isCanonical() ? &ctx : nullptr, properties, repr); } ExistentialMetatypeType::ExistentialMetatypeType( Type T, const ASTContext *C, RecursiveTypeProperties properties, std::optional<MetatypeRepresentation> repr) : AnyMetatypeType(TypeKind::ExistentialMetatype, C, properties, T, repr) { if (repr) { assert(*repr != MetatypeRepresentation::Thin && "creating a thin existential metatype?"); assert(getASTContext().LangOpts.EnableObjCInterop || *repr != MetatypeRepresentation::ObjC); } } Type ExistentialMetatypeType::getExistentialInstanceType() { return ExistentialType::get(getInstanceType()); } ModuleType *ModuleType::get(ModuleDecl *M) { ASTContext &C = M->getASTContext(); ModuleType *&Entry = C.getImpl().ModuleTypes[M]; if (Entry) return Entry; return Entry = new (C, AllocationArena::Permanent) ModuleType(M, C); } DynamicSelfType *DynamicSelfType::get(Type selfType, const ASTContext &ctx) { assert(selfType->isMaterializable() && "non-materializable dynamic self?"); auto properties = selfType->getRecursiveProperties(); auto arena = getArena(properties); auto &dynamicSelfTypes = ctx.getImpl().getArena(arena).DynamicSelfTypes; auto known = dynamicSelfTypes.find(selfType); if (known != dynamicSelfTypes.end()) return known->second; auto result = new (ctx, arena) DynamicSelfType(selfType, ctx, properties); dynamicSelfTypes.insert({selfType, result}); return result; } static RecursiveTypeProperties getFunctionRecursiveProperties(ArrayRef<AnyFunctionType::Param> params, Type result, Type globalActor, Type thrownError, Type sendableDependentType) { RecursiveTypeProperties properties; for (auto param : params) properties |= param.getPlainType()->getRecursiveProperties(); properties |= result->getRecursiveProperties(); if (globalActor) properties |= globalActor->getRecursiveProperties(); if (thrownError) properties |= thrownError->getRecursiveProperties(); if (sendableDependentType) { ASSERT(sendableDependentType->hasTypeVariable()); properties |= RecursiveTypeProperties::SolverAllocated; properties |= RecursiveTypeProperties::HasTypeVariable; } properties &= ~RecursiveTypeProperties::IsLValue; return properties; } static bool isAnyFunctionTypeCanonical(ArrayRef<AnyFunctionType::Param> params, Type result) { for (auto param : params) { if (!param.getPlainType()->isCanonical()) return false; if (!param.getInternalLabel().empty()) { // Canonical types don't have internal labels return false; } } return result->isCanonical(); } // For now, generic function types cannot be dependent (in fact, // they erase dependence) or contain type variables, and they're // always materializable. // FIXME: This doesn't seem great, we should consider changing it to be opt-out // rather than opt-in. static RecursiveTypeProperties getGenericFunctionRecursiveProperties(ArrayRef<AnyFunctionType::Param> params, Type result, Type globalActor, Type thrownError) { static_assert(RecursiveTypeProperties::BitWidth == 19, "revisit this if you add new recursive type properties"); RecursiveTypeProperties properties; using Prop = RecursiveTypeProperties::Property; auto mask = (unsigned)Prop::HasError | Prop::IsUnsafe | Prop::HasPlaceholder; auto unionBits = [&](Type ty) { if (!ty) return; auto bits = ty->getRecursiveProperties().getBits(); properties |= Prop(bits & mask); }; for (auto param : params) unionBits(param.getPlainType()); if (result->getRecursiveProperties().hasDynamicSelf()) properties |= RecursiveTypeProperties::HasDynamicSelf; unionBits(result); unionBits(globalActor); unionBits(thrownError); return properties; } static bool isGenericFunctionTypeCanonical(GenericSignature sig, ArrayRef<AnyFunctionType::Param> params, Type result) { if (!sig->isCanonical()) return false; for (auto param : params) { if (!sig->isReducedType(param.getPlainType())) return false; if (!param.getInternalLabel().empty()) { // Canonical types don't have internal labels return false; } } return sig->isReducedType(result); } AnyFunctionType *AnyFunctionType::withExtInfo(ExtInfo info) const { if (isa<FunctionType>(this)) return FunctionType::get(getParams(), getResult(), info); auto *genFnTy = cast<GenericFunctionType>(this); return GenericFunctionType::get(genFnTy->getGenericSignature(), getParams(), getResult(), info); } Type AnyFunctionType::Param::getParameterType(bool forCanonical, ASTContext *ctx) const { Type type = getPlainType(); if (isVariadic()) { if (!ctx) ctx = &type->getASTContext(); auto arrayDecl = ctx->getArrayDecl(); if (!arrayDecl) type = ErrorType::get(*ctx); else if (type->is<PackType>()) return type; else if (forCanonical) type = BoundGenericType::get(arrayDecl, Type(), {type}); else type = VariadicSequenceType::get(type); } return type; } Type AnyFunctionType::composeTuple(ASTContext &ctx, ArrayRef<Param> params, ParameterFlagHandling paramFlagHandling) { SmallVector<TupleTypeElt, 4> elements; for (const auto &param : params) { switch (paramFlagHandling) { case ParameterFlagHandling::IgnoreNonEmpty: break; case ParameterFlagHandling::AssertEmpty: assert(param.getParameterFlags().isNone()); break; } elements.emplace_back(param.getParameterType(), param.getLabel()); } if (elements.size() == 1 && !elements[0].hasName()) return elements[0].getType(); return TupleType::get(elements, ctx); } bool AnyFunctionType::equalParams(ArrayRef<AnyFunctionType::Param> a, ArrayRef<AnyFunctionType::Param> b) { if (a.size() != b.size()) return false; for (unsigned i = 0, n = a.size(); i != n; ++i) { if (a[i] != b[i]) return false; } return true; } bool AnyFunctionType::equalParams(CanParamArrayRef a, CanParamArrayRef b) { if (a.size() != b.size()) return false; for (unsigned i = 0, n = a.size(); i != n; ++i) { if (a[i] != b[i]) return false; } return true; } void AnyFunctionType::relabelParams(MutableArrayRef<Param> params, ArgumentList *argList) { assert(params.size() == argList->size()); for (auto i : indices(params)) { auto &param = params[i]; param = AnyFunctionType::Param(param.getPlainType(), argList->getLabel(i), param.getParameterFlags(), param.getInternalLabel()); } } /// Profile \p params into \p ID. In contrast to \c == on \c Param, the profile /// *does* take the internal label into account and *does not* canonicalize /// the param's type. static void profileParams(llvm::FoldingSetNodeID &ID, ArrayRef<AnyFunctionType::Param> params) { ID.AddInteger(params.size()); for (auto param : params) { ID.AddPointer(param.getLabel().get()); ID.AddPointer(param.getInternalLabel().get()); ID.AddPointer(param.getPlainType().getPointer()); ID.AddInteger(param.getParameterFlags().toRaw()); } } void FunctionType::Profile(llvm::FoldingSetNodeID &ID, ArrayRef<AnyFunctionType::Param> params, Type result, std::optional<ExtInfo> info) { profileParams(ID, params); ID.AddPointer(result.getPointer()); if (info.has_value()) { info->Profile(ID); } } FunctionType *FunctionType::get(ArrayRef<AnyFunctionType::Param> params, Type result, std::optional<ExtInfo> info) { Type thrownError; Type globalActor; Type sendableDependentType; if (info.has_value()) { thrownError = info->getThrownError(); globalActor = info->getGlobalActor(); sendableDependentType = info->getSendableDependentType(); } auto properties = getFunctionRecursiveProperties( params, result, globalActor, thrownError, sendableDependentType); auto arena = getArena(properties); if (info.has_value()) { // Canonicalize all thin functions to be escaping (to keep compatibility // with generic parameters). Note that one can pass SIL-level representation // here, so we need additional check for maximum non-SIL value. Representation rep = info.value().getRepresentation(); if (rep <= FunctionTypeRepresentation::Last && isThinRepresentation(rep)) info = info->withNoEscape(false); } llvm::FoldingSetNodeID id; FunctionType::Profile(id, params, result, info); const ASTContext &ctx = result->getASTContext(); // Do we already have this generic function type? void *insertPos; if (auto funcTy = ctx.getImpl().getArena(arena).FunctionTypes.FindNodeOrInsertPos(id, insertPos)) { return funcTy; } ClangTypeInfo clangTypeInfo; if (info.has_value()) clangTypeInfo = info.value().getClangTypeInfo(); bool hasClangInfo = info.has_value() && !info.value().getClangTypeInfo().empty(); unsigned numTypes = (globalActor ? 1 : 0) + (thrownError ? 1 : 0) + (sendableDependentType ? 1 : 0); bool hasLifetimeDependenceInfo = info.has_value() ? !info->getLifetimeDependencies().empty() : false; auto numLifetimeDependencies = hasLifetimeDependenceInfo ? info->getLifetimeDependencies().size() : 0; size_t allocSize = totalSizeToAlloc<AnyFunctionType::Param, ClangTypeInfo, Type, size_t, LifetimeDependenceInfo>( params.size(), hasClangInfo ? 1 : 0, numTypes, hasLifetimeDependenceInfo ? 1 : 0, hasLifetimeDependenceInfo ? numLifetimeDependencies : 0); void *mem = ctx.Allocate(allocSize, alignof(FunctionType), arena); bool isCanonical = isAnyFunctionTypeCanonical(params, result); if (!clangTypeInfo.empty()) { if (ctx.LangOpts.UseClangFunctionTypes) isCanonical &= clangTypeInfo.getType()->isCanonicalUnqualified(); else isCanonical = false; } if (thrownError && (!thrownError->isCanonical() || thrownError->isNever() || thrownError->isEqual(ctx.getErrorExistentialType()))) isCanonical = false; if (globalActor && !globalActor->isCanonical()) isCanonical = false; auto funcTy = new (mem) FunctionType(params, result, info, isCanonical ? &ctx : nullptr, properties); ctx.getImpl().getArena(arena).FunctionTypes.InsertNode(funcTy, insertPos); return funcTy; } #ifndef NDEBUG static bool isConsistentAboutIsolation(const std::optional<ASTExtInfo> &info, ArrayRef<AnyFunctionType::Param> params) { return (hasIsolatedParameter(params) == (info && info->getIsolation().isParameter())); } #endif // If the input and result types are canonical, then so is the result. FunctionType::FunctionType(ArrayRef<AnyFunctionType::Param> params, Type output, std::optional<ExtInfo> info, const ASTContext *ctx, RecursiveTypeProperties properties) : AnyFunctionType(TypeKind::Function, ctx, output, properties, params.size(), info) { std::uninitialized_copy(params.begin(), params.end(), getTrailingObjects<AnyFunctionType::Param>()); assert(isConsistentAboutIsolation(info, params)); if (info.has_value()) { auto clangTypeInfo = info.value().getClangTypeInfo(); if (!clangTypeInfo.empty()) *getTrailingObjects<ClangTypeInfo>() = clangTypeInfo; unsigned typeIdx = 0; if (Type globalActor = info->getGlobalActor()) { getTrailingObjects<Type>()[typeIdx] = globalActor; typeIdx += 1; } if (Type thrownError = info->getThrownError()) { getTrailingObjects<Type>()[typeIdx] = thrownError; typeIdx += 1; } if (Type sendableDependentType = info->getSendableDependentType()) { getTrailingObjects<Type>()[typeIdx] = sendableDependentType; typeIdx += 1; } auto lifetimeDependenceInfo = info->getLifetimeDependencies(); if (!lifetimeDependenceInfo.empty()) { *getTrailingObjects<size_t>() = lifetimeDependenceInfo.size(); std::uninitialized_copy(lifetimeDependenceInfo.begin(), lifetimeDependenceInfo.end(), getTrailingObjects<LifetimeDependenceInfo>()); } } } void GenericFunctionType::Profile(llvm::FoldingSetNodeID &ID, GenericSignature sig, ArrayRef<AnyFunctionType::Param> params, Type result, std::optional<ExtInfo> info) { ID.AddPointer(sig.getPointer()); profileParams(ID, params); ID.AddPointer(result.getPointer()); if (info.has_value()) { info->Profile(ID); } } GenericFunctionType *GenericFunctionType::get(GenericSignature sig, ArrayRef<Param> params, Type result, std::optional<ExtInfo> info) { assert(sig && "no generic signature for generic function type?!"); // We do not allow type variables in GenericFunctionTypes. Note that if this // ever changes, we'll need to setup arena-specific allocation for // GenericFunctionTypes. assert(llvm::none_of(params, [](Param param) { return param.getPlainType()->hasTypeVariable(); })); assert(!result->hasTypeVariable()); llvm::FoldingSetNodeID id; GenericFunctionType::Profile(id, sig, params, result, info); const ASTContext &ctx = result->getASTContext(); // Do we already have this generic function type? void *insertPos; if (auto result = ctx.getImpl().GenericFunctionTypes.FindNodeOrInsertPos(id, insertPos)) { return result; } // We have to construct this generic function type. Determine whether // it's canonical. Unfortunately, isReducedType() can cause // new GenericFunctionTypes to be created and thus invalidate our insertion // point. bool isCanonical = isGenericFunctionTypeCanonical(sig, params, result); assert((!info.has_value() || info.value().getClangTypeInfo().empty()) && "Generic functions do not have Clang types at the moment."); if (auto funcTy = ctx.getImpl().GenericFunctionTypes.FindNodeOrInsertPos(id, insertPos)) { return funcTy; } Type thrownError; Type globalActor; if (info.has_value()) { thrownError = info->getThrownError(); globalActor = info->getGlobalActor(); // Generic functions can't currently have Sendable dependence. ASSERT(!info->getSendableDependentType()); } if (thrownError) { if (!sig->isReducedType(thrownError)) { isCanonical = false; } else { Type reducedThrownError = thrownError->getReducedType(sig); if (reducedThrownError->isNever() || reducedThrownError->isEqual(ctx.getErrorExistentialType())) isCanonical = false; } } if (globalActor && !sig->isReducedType(globalActor)) isCanonical = false; unsigned numTypes = (globalActor ? 1 : 0) + (thrownError ? 1 : 0); bool hasLifetimeDependenceInfo = info.has_value() ? !info->getLifetimeDependencies().empty() : false; auto numLifetimeDependencies = hasLifetimeDependenceInfo ? info->getLifetimeDependencies().size() : 0; size_t allocSize = totalSizeToAlloc<AnyFunctionType::Param, Type, size_t, LifetimeDependenceInfo>( params.size(), numTypes, hasLifetimeDependenceInfo ? 1 : 0, hasLifetimeDependenceInfo ? numLifetimeDependencies : 0); void *mem = ctx.Allocate(allocSize, alignof(GenericFunctionType)); auto properties = getGenericFunctionRecursiveProperties( params, result, globalActor, thrownError); auto funcTy = new (mem) GenericFunctionType(sig, params, result, info, isCanonical ? &ctx : nullptr, properties); ctx.getImpl().GenericFunctionTypes.InsertNode(funcTy, insertPos); return funcTy; } GenericFunctionType::GenericFunctionType( GenericSignature sig, ArrayRef<AnyFunctionType::Param> params, Type result, std::optional<ExtInfo> info, const ASTContext *ctx, RecursiveTypeProperties properties) : AnyFunctionType(TypeKind::GenericFunction, ctx, result, properties, params.size(), info), Signature(sig) { std::uninitialized_copy(params.begin(), params.end(), getTrailingObjects<AnyFunctionType::Param>()); assert(isConsistentAboutIsolation(info, params)); if (info) { unsigned thrownErrorIndex = 0; if (Type globalActor = info->getGlobalActor()) { getTrailingObjects<Type>()[0] = globalActor; ++thrownErrorIndex; } if (Type thrownError = info->getThrownError()) getTrailingObjects<Type>()[thrownErrorIndex] = thrownError; auto lifetimeDependenceInfo = info->getLifetimeDependencies(); if (!lifetimeDependenceInfo.empty()) { *getTrailingObjects<size_t>() = lifetimeDependenceInfo.size(); std::uninitialized_copy(lifetimeDependenceInfo.begin(), lifetimeDependenceInfo.end(), getTrailingObjects<LifetimeDependenceInfo>()); } } } GenericTypeParamType *GenericTypeParamType::get(Identifier name, GenericTypeParamKind paramKind, unsigned depth, unsigned index, Type valueType, const ASTContext &ctx) { llvm::FoldingSetNodeID id; GenericTypeParamType::Profile(id, paramKind, depth, index, /*weight=*/0, valueType, name); void *insertPos; if (auto gpTy = ctx.getImpl().GenericParamTypes.FindNodeOrInsertPos(id, insertPos)) return gpTy; RecursiveTypeProperties props = RecursiveTypeProperties::HasTypeParameter; if (paramKind == GenericTypeParamKind::Pack) props |= RecursiveTypeProperties::HasParameterPack; auto canType = GenericTypeParamType::get(paramKind, depth, index, /*weight=*/0, valueType, ctx); auto result = new (ctx, AllocationArena::Permanent) GenericTypeParamType(name, canType, ctx); ctx.getImpl().GenericParamTypes.InsertNode(result, insertPos); return result; } GenericTypeParamType *GenericTypeParamType::get(GenericTypeParamDecl *param) { RecursiveTypeProperties props = RecursiveTypeProperties::HasTypeParameter; if (param->isParameterPack()) props |= RecursiveTypeProperties::HasParameterPack; return new (param->getASTContext(), AllocationArena::Permanent) GenericTypeParamType(param, props); } GenericTypeParamType *GenericTypeParamType::get(GenericTypeParamKind paramKind, unsigned depth, unsigned index, unsigned weight, Type valueType, const ASTContext &ctx) { llvm::FoldingSetNodeID id; GenericTypeParamType::Profile(id, paramKind, depth, index, weight, valueType, Identifier()); void *insertPos; if (auto gpTy = ctx.getImpl().GenericParamTypes.FindNodeOrInsertPos(id, insertPos)) return gpTy; RecursiveTypeProperties props = RecursiveTypeProperties::HasTypeParameter; if (paramKind == GenericTypeParamKind::Pack) props |= RecursiveTypeProperties::HasParameterPack; auto result = new (ctx, AllocationArena::Permanent) GenericTypeParamType(paramKind, depth, index, weight, valueType, props, ctx); ctx.getImpl().GenericParamTypes.InsertNode(result, insertPos); return result; } GenericTypeParamType *GenericTypeParamType::getType(unsigned depth, unsigned index, const ASTContext &ctx) { return GenericTypeParamType::get(GenericTypeParamKind::Type, depth, index, /*weight=*/0, /*valueType=*/Type(), ctx); } GenericTypeParamType *GenericTypeParamType::getOpaqueResultType(unsigned depth, unsigned index, const ASTContext &ctx) { return GenericTypeParamType::get(GenericTypeParamKind::Type, depth, index, /*weight=*/1, /*valueType=*/Type(), ctx); } GenericTypeParamType *GenericTypeParamType::getPack(unsigned depth, unsigned index, const ASTContext &ctx) { return GenericTypeParamType::get(GenericTypeParamKind::Pack, depth, index, /*weight=*/0, /*valueType=*/Type(), ctx); } GenericTypeParamType *GenericTypeParamType::getValue(unsigned depth, unsigned index, Type valueType, const ASTContext &ctx) { return GenericTypeParamType::get(GenericTypeParamKind::Value, depth, index, /*weight=*/0, valueType, ctx); } ArrayRef<GenericTypeParamType *> GenericFunctionType::getGenericParams() const { return Signature.getGenericParams(); } /// Retrieve the requirements of this polymorphic function type. ArrayRef<Requirement> GenericFunctionType::getRequirements() const { return Signature.getRequirements(); } void SILFunctionType::Profile( llvm::FoldingSetNodeID &id, GenericSignature genericParams, ExtInfo info, SILCoroutineKind coroutineKind, ParameterConvention calleeConvention, ArrayRef<SILParameterInfo> params, ArrayRef<SILYieldInfo> yields, ArrayRef<SILResultInfo> results, std::optional<SILResultInfo> errorResult, ProtocolConformanceRef conformance, SubstitutionMap patternSubs, SubstitutionMap invocationSubs) { id.AddPointer(genericParams.getPointer()); info.Profile(id); id.AddInteger(unsigned(coroutineKind)); id.AddInteger(unsigned(calleeConvention)); id.AddInteger(params.size()); for (auto param : params) param.profile(id); id.AddInteger(yields.size()); for (auto yield : yields) yield.profile(id); id.AddInteger(results.size()); for (auto result : results) result.profile(id); // Just allow the profile length to implicitly distinguish the // presence of an error result. if (errorResult) errorResult->profile(id); patternSubs.profile(id); invocationSubs.profile(id); id.AddBoolean((bool)conformance); if (conformance) id.AddPointer(conformance.getProtocol()); } SILFunctionType::SILFunctionType( GenericSignature genericSig, ExtInfo ext, SILCoroutineKind coroutineKind, ParameterConvention calleeConvention, ArrayRef<SILParameterInfo> params, ArrayRef<SILYieldInfo> yields, ArrayRef<SILResultInfo> normalResults, std::optional<SILResultInfo> errorResult, SubstitutionMap patternSubs, SubstitutionMap invocationSubs, const ASTContext &ctx, RecursiveTypeProperties properties, ProtocolConformanceRef witnessMethodConformance) : TypeBase(TypeKind::SILFunction, &ctx, properties), InvocationGenericSig(CanGenericSignature(genericSig)), WitnessMethodConformance(witnessMethodConformance) { Bits.SILFunctionType.HasErrorResult = errorResult.has_value(); Bits.SILFunctionType.ExtInfoBits = ext.getBits(); Bits.SILFunctionType.HasClangTypeInfo = false; Bits.SILFunctionType.HasPatternSubs = (bool) patternSubs; Bits.SILFunctionType.HasInvocationSubs = (bool) invocationSubs; // The use of both assert() and static_assert() below is intentional. assert(Bits.SILFunctionType.ExtInfoBits == ext.getBits() && "Bits were dropped!"); static_assert(SILExtInfoBuilder::NumMaskBits == NumSILExtInfoBits, "ExtInfo and SILFunctionTypeBitfields must agree on bit size"); Bits.SILFunctionType.HasClangTypeInfo = !ext.getClangTypeInfo().empty(); Bits.SILFunctionType.CoroutineKind = unsigned(coroutineKind); NumParameters = params.size(); assert((coroutineKind == SILCoroutineKind::None && yields.empty()) || coroutineKind != SILCoroutineKind::None); NumAnyResults = normalResults.size(); NumAnyIndirectFormalResults = 0; NumPackResults = 0; for (auto &resultInfo : normalResults) { if (resultInfo.isFormalIndirect()) NumAnyIndirectFormalResults++; if (resultInfo.isPack()) NumPackResults++; } memcpy(getMutableResults().data(), normalResults.data(), normalResults.size() * sizeof(SILResultInfo)); if (coroutineKind != SILCoroutineKind::None) { NumAnyYieldResults = yields.size(); NumAnyIndirectFormalYieldResults = 0; NumPackResults = 0; for (auto &yieldInfo : yields) { if (yieldInfo.isFormalIndirect()) NumAnyIndirectFormalYieldResults++; if (yieldInfo.isPack()) NumPackYieldResults++; } memcpy(getMutableYields().data(), yields.data(), yields.size() * sizeof(SILYieldInfo)); } assert(!isIndirectFormalParameter(calleeConvention)); Bits.SILFunctionType.CalleeConvention = unsigned(calleeConvention); memcpy(getMutableParameters().data(), params.data(), params.size() * sizeof(SILParameterInfo)); if (errorResult) getMutableErrorResult() = *errorResult; if (patternSubs) getMutablePatternSubs() = patternSubs; if (invocationSubs) getMutableInvocationSubs() = invocationSubs; if (hasResultCache()) { getMutableFormalResultsCache() = CanType(); getMutableAllResultsCache() = CanType(); } if (!ext.getClangTypeInfo().empty()) *getTrailingObjects<ClangTypeInfo>() = ext.getClangTypeInfo(); if (!ext.getLifetimeDependencies().empty()) { NumLifetimeDependencies = ext.getLifetimeDependencies().size(); memcpy(getMutableLifetimeDependenceInfo().data(), ext.getLifetimeDependencies().data(), NumLifetimeDependencies * sizeof(LifetimeDependenceInfo)); } #ifndef NDEBUG if (ext.getRepresentation() == Representation::WitnessMethod) assert(!WitnessMethodConformance.isInvalid() && "witness_method SIL function without a conformance"); else assert(WitnessMethodConformance.isInvalid() && "non-witness_method SIL function with a conformance"); // Make sure the type follows invariants. assert((!invocationSubs || genericSig) && "can only have substitutions with a generic signature"); if (invocationSubs) { assert(invocationSubs.getGenericSignature().getCanonicalSignature() == genericSig.getCanonicalSignature() && "substitutions must match generic signature"); } if (genericSig) { assert(!genericSig->areAllParamsConcrete() && "If all generic parameters are concrete, SILFunctionType should " "not have a generic signature at all"); for (auto gparam : genericSig.getGenericParams()) { (void)gparam; assert(gparam->isCanonical() && "generic signature is not canonicalized"); } } if (genericSig || patternSubs) { for (auto param : getParameters()) { (void)param; assert(!param.getInterfaceType()->hasError() && "interface type of parameter should not contain error types"); assert(!param.getInterfaceType()->hasArchetype() && "interface type of parameter should not contain context archetypes"); } for (auto result : getResults()) { (void)result; assert(!result.getInterfaceType()->hasError() && "interface type of result should not contain error types"); assert(!result.getInterfaceType()->hasArchetype() && "interface type of result should not contain context archetypes"); } for (auto yield : getYields()) { (void)yield; assert(!yield.getInterfaceType()->hasError() && "interface type of yield should not contain error types"); assert(!yield.getInterfaceType()->hasArchetype() && "interface type of yield should not contain context archetypes"); } if (hasErrorResult()) { assert(!getErrorResult().getInterfaceType()->hasError() && "interface type of result should not contain error types"); assert(!getErrorResult().getInterfaceType()->hasArchetype() && "interface type of result should not contain context archetypes"); } if (genericSig && patternSubs) { assert(!patternSubs.getRecursiveProperties().hasArchetype() && "pattern substitutions should not contain context archetypes"); } } for (auto result : getResults()) { assert(!isa<PackExpansionType>(result.getInterfaceType()) && "Cannot have a pack expansion directly as a result"); (void)result; assert((result.getConvention() == ResultConvention::Pack) == (isa<SILPackType>(result.getInterfaceType())) && "Packs must have pack convention"); if (auto *FnType = result.getInterfaceType()->getAs<SILFunctionType>()) { assert(!FnType->isNoEscape() && "Cannot return an @noescape function type"); } } bool hasIsolatedParameter = false; (void) hasIsolatedParameter; for (auto param : getParameters()) { if (param.hasOption(SILParameterInfo::Isolated)) { assert(!hasIsolatedParameter && "building SIL function type with multiple isolated parameters"); hasIsolatedParameter = true; } assert(!isa<PackExpansionType>(param.getInterfaceType()) && "Cannot have a pack expansion directly as a parameter"); assert(param.isPack() == isa<SILPackType>(param.getInterfaceType()) && "Packs must have pack convention"); } for (auto yield : getYields()) { (void)yield; assert(!isa<PackExpansionType>(yield.getInterfaceType()) && "Cannot have a pack expansion directly as a yield"); assert(yield.isPack() == isa<SILPackType>(yield.getInterfaceType()) && "Packs must have pack convention"); } // Check that `@noDerivative` parameters and results only exist in // `@differentiable` function types. if (!ext.isDifferentiable()) { for (auto param : getParameters()) { assert(!param.hasOption(SILParameterInfo::NotDifferentiable) && "non-`@differentiable` function type should not have " "`@noDerivative` parameter"); } for (auto result : getResults()) { assert(!result.hasOption(SILResultInfo::NotDifferentiable) && "non-`@differentiable` function type should not have " "`@noDerivative` result"); } } #endif } CanSILMoveOnlyWrappedType SILMoveOnlyWrappedType::get(CanType innerType) { ASTContext &ctx = innerType->getASTContext(); auto found = ctx.getImpl().SILMoveOnlyWrappedTypes.find(innerType); if (found != ctx.getImpl().SILMoveOnlyWrappedTypes.end()) return CanSILMoveOnlyWrappedType(found->second); void *mem = ctx.Allocate(sizeof(SILMoveOnlyWrappedType), alignof(SILMoveOnlyWrappedType)); auto *storageTy = new (mem) SILMoveOnlyWrappedType(innerType); ctx.getImpl().SILMoveOnlyWrappedTypes.insert({innerType, storageTy}); return CanSILMoveOnlyWrappedType(storageTy); } CanSILBlockStorageType SILBlockStorageType::get(CanType captureType) { ASTContext &ctx = captureType->getASTContext(); auto found = ctx.getImpl().SILBlockStorageTypes.find(captureType); if (found != ctx.getImpl().SILBlockStorageTypes.end()) return CanSILBlockStorageType(found->second); void *mem = ctx.Allocate(sizeof(SILBlockStorageType), alignof(SILBlockStorageType)); SILBlockStorageType *storageTy = new (mem) SILBlockStorageType(captureType); ctx.getImpl().SILBlockStorageTypes.insert({captureType, storageTy}); return CanSILBlockStorageType(storageTy); } CanSILFunctionType SILFunctionType::get( GenericSignature genericSig, ExtInfo ext, SILCoroutineKind coroutineKind, ParameterConvention callee, ArrayRef<SILParameterInfo> params, ArrayRef<SILYieldInfo> yields, ArrayRef<SILResultInfo> normalResults, std::optional<SILResultInfo> errorResult, SubstitutionMap patternSubs, SubstitutionMap invocationSubs, const ASTContext &ctx, ProtocolConformanceRef witnessMethodConformance) { assert(coroutineKind != SILCoroutineKind::None || yields.empty()); assert(!ext.isPseudogeneric() || genericSig || coroutineKind != SILCoroutineKind::None); patternSubs = patternSubs.getCanonical(); invocationSubs = invocationSubs.getCanonical(); // [FIXME: Clang-type-plumbing] if (ctx.LangOpts.UseClangFunctionTypes) { if (auto error = ext.checkClangType()) { error.value().dump(); llvm_unreachable("Unexpected Clang type in SILExtInfo."); } } else if (!ext.getClangTypeInfo().empty()) { // Unlike AnyFunctionType, SILFunctionType is always canonical. Hence, // conditionalizing canonical type computation based on // UseClangFunctionTypes like AnyFunctionType is not feasible. It is simpler // to drop the Clang type altogether. ext = ext.intoBuilder().withClangFunctionType(nullptr).build(); } // Canonicalize all thin functions to be escaping (to keep compatibility // with generic parameters) if (isThinRepresentation(ext.getRepresentation())) ext = ext.intoBuilder().withNoEscape(false); llvm::FoldingSetNodeID id; SILFunctionType::Profile(id, genericSig, ext, coroutineKind, callee, params, yields, normalResults, errorResult, witnessMethodConformance, patternSubs, invocationSubs); // Do we already have this generic function type? void *insertPos; if (auto result = ctx.getImpl().SILFunctionTypes.FindNodeOrInsertPos(id, insertPos)) return CanSILFunctionType(result); // All SILFunctionTypes are canonical. // See [NOTE: SILFunctionType-layout] bool hasResultCache = normalResults.size() > 1; size_t bytes = totalSizeToAlloc<SILParameterInfo, SILResultInfo, SILYieldInfo, SubstitutionMap, CanType, ClangTypeInfo, LifetimeDependenceInfo>( params.size(), normalResults.size() + (errorResult ? 1 : 0), yields.size(), (patternSubs ? 1 : 0) + (invocationSubs ? 1 : 0), hasResultCache ? 2 : 0, ext.getClangTypeInfo().empty() ? 0 : 1, !ext.getLifetimeDependencies().empty() ? ext.getLifetimeDependencies().size() : 0); void *mem = ctx.Allocate(bytes, alignof(SILFunctionType)); RecursiveTypeProperties properties; static_assert(RecursiveTypeProperties::BitWidth == 19, "revisit this if you add new recursive type properties"); for (auto &param : params) properties |= param.getInterfaceType()->getRecursiveProperties(); for (auto &yield : yields) properties |= yield.getInterfaceType()->getRecursiveProperties(); for (auto &result : normalResults) properties |= result.getInterfaceType()->getRecursiveProperties(); if (errorResult) properties |= errorResult->getInterfaceType()->getRecursiveProperties(); // FIXME: If we ever have first-class polymorphic values, we'll need to // revisit this. if (genericSig || patternSubs) { properties.removeHasTypeParameter(); properties.removeHasDependentMember(); } auto outerSubs = genericSig ? invocationSubs : patternSubs; properties |= outerSubs.getRecursiveProperties(); auto fnType = new (mem) SILFunctionType(genericSig, ext, coroutineKind, callee, params, yields, normalResults, errorResult, patternSubs, invocationSubs, ctx, properties, witnessMethodConformance); assert(fnType->hasResultCache() == hasResultCache); ctx.getImpl().SILFunctionTypes.InsertNode(fnType, insertPos); return CanSILFunctionType(fnType); } ArraySliceType *ArraySliceType::get(Type base) { auto properties = base->getRecursiveProperties(); auto arena = getArena(properties); const ASTContext &C = base->getASTContext(); ArraySliceType *&entry = C.getImpl().getArena(arena).ArraySliceTypes[base]; if (entry) return entry; return entry = new (C, arena) ArraySliceType(C, base, properties); } InlineArrayType *InlineArrayType::get(Type count, Type elt) { auto properties = count->getRecursiveProperties() | elt->getRecursiveProperties(); auto arena = getArena(properties); const ASTContext &C = count->getASTContext(); auto *&entry = C.getImpl().getArena(arena).InlineArrayTypes[{count, elt}]; if (entry) return entry; entry = new (C, arena) InlineArrayType(C, count, elt, properties); return entry; } VariadicSequenceType *VariadicSequenceType::get(Type base) { auto properties = base->getRecursiveProperties(); auto arena = getArena(properties); const ASTContext &C = base->getASTContext(); VariadicSequenceType *&entry = C.getImpl().getArena(arena).VariadicSequenceTypes[base]; if (entry) return entry; return entry = new (C, arena) VariadicSequenceType(C, base, properties); } DictionaryType *DictionaryType::get(Type keyType, Type valueType) { auto properties = keyType->getRecursiveProperties() | valueType->getRecursiveProperties(); auto arena = getArena(properties); const ASTContext &C = keyType->getASTContext(); DictionaryType *&entry = C.getImpl().getArena(arena).DictionaryTypes[{keyType, valueType}]; if (entry) return entry; return entry = new (C, arena) DictionaryType(C, keyType, valueType, properties); } OptionalType *OptionalType::get(Type base) { auto properties = base->getRecursiveProperties(); auto arena = getArena(properties); const ASTContext &C = base->getASTContext(); OptionalType *&entry = C.getImpl().getArena(arena).OptionalTypes[base]; if (entry) return entry; return entry = new (C, arena) OptionalType(C, base, properties); } ProtocolType *ProtocolType::get(ProtocolDecl *D, Type Parent, const ASTContext &C) { RecursiveTypeProperties properties; if (D->getExplicitSafety() == ExplicitSafety::Unsafe) properties |= RecursiveTypeProperties::IsUnsafe; properties |= getRecursivePropertiesAsParent(Parent); auto arena = getArena(properties); auto *&known = C.getImpl().getArena(arena).ProtocolTypes[{D, Parent}]; if (!known) { known = new (C, arena) ProtocolType(D, Parent, C, properties); } return known; } ProtocolType::ProtocolType(ProtocolDecl *TheDecl, Type Parent, const ASTContext &Ctx, RecursiveTypeProperties properties) : NominalType(TypeKind::Protocol, &Ctx, TheDecl, Parent, properties) { } Type ExistentialType::get(Type constraint) { auto &C = constraint->getASTContext(); // ExistentialMetatypeType is already an existential type. if (constraint->is<ExistentialMetatypeType>()) return constraint; bool printWithAny = true; if (constraint->isEqual(C.TheAnyType) || constraint->isAnyObject()) printWithAny = false; auto properties = constraint->getRecursiveProperties(); auto arena = getArena(properties); auto &entry = C.getImpl().getArena(arena).ExistentialTypes[constraint]; if (entry) return entry; const ASTContext *canonicalContext = constraint->isCanonical() ? &C : nullptr; return entry = new (C, arena) ExistentialType(constraint, printWithAny, canonicalContext, properties); } BuiltinTupleType::BuiltinTupleType(BuiltinTupleDecl *TheDecl, const ASTContext &Ctx) : NominalType(TypeKind::BuiltinTuple, &Ctx, TheDecl, Type(), RecursiveTypeProperties()) { } LValueType *LValueType::get(Type objectTy) { assert(!objectTy->is<LValueType>() && !objectTy->is<InOutType>() && "cannot have 'inout' or @lvalue wrapped inside an @lvalue"); auto properties = objectTy->getRecursiveProperties() | RecursiveTypeProperties::IsLValue; auto arena = getArena(properties); auto &C = objectTy->getASTContext(); auto &entry = C.getImpl().getArena(arena).LValueTypes[objectTy]; if (entry) return entry; const ASTContext *canonicalContext = objectTy->isCanonical() ? &C : nullptr; return entry = new (C, arena) LValueType(objectTy, canonicalContext, properties); } InOutType *InOutType::get(Type objectTy) { assert(!objectTy->is<LValueType>() && !objectTy->is<InOutType>() && "cannot have 'inout' or @lvalue wrapped inside an 'inout'"); auto properties = objectTy->getRecursiveProperties(); properties &= ~RecursiveTypeProperties::IsLValue; auto arena = getArena(properties); auto &C = objectTy->getASTContext(); auto &entry = C.getImpl().getArena(arena).InOutTypes[objectTy]; if (entry) return entry; const ASTContext *canonicalContext = objectTy->isCanonical() ? &C : nullptr; return entry = new (C, arena) InOutType(objectTy, canonicalContext, properties); } DependentMemberType *DependentMemberType::get(Type base, Identifier name) { auto properties = base->getRecursiveProperties(); properties |= RecursiveTypeProperties::HasDependentMember; auto arena = getArena(properties); llvm::PointerUnion<Identifier, AssociatedTypeDecl *> stored(name); const ASTContext &ctx = base->getASTContext(); auto *&known = ctx.getImpl().getArena(arena).DependentMemberTypes[ {base, stored.getOpaqueValue()}]; if (!known) { const ASTContext *canonicalCtx = base->isCanonical() ? &ctx : nullptr; known = new (ctx, arena) DependentMemberType(base, name, canonicalCtx, properties); } return known; } DependentMemberType *DependentMemberType::get(Type base, AssociatedTypeDecl *assocType) { assert(assocType && "Missing associated type"); auto properties = base->getRecursiveProperties(); properties |= RecursiveTypeProperties::HasDependentMember; auto arena = getArena(properties); llvm::PointerUnion<Identifier, AssociatedTypeDecl *> stored(assocType); const ASTContext &ctx = base->getASTContext(); auto *&known = ctx.getImpl().getArena(arena).DependentMemberTypes[ {base, stored.getOpaqueValue()}]; if (!known) { const ASTContext *canonicalCtx = base->isCanonical() ? &ctx : nullptr; known = new (ctx, arena) DependentMemberType(base, assocType, canonicalCtx, properties); } return known; } OpaqueTypeArchetypeType *OpaqueTypeArchetypeType::getNew( GenericEnvironment *environment, Type interfaceType, ArrayRef<ProtocolDecl *> conformsTo, Type superclass, LayoutConstraint layout) { auto properties = archetypeProperties( RecursiveTypeProperties::HasOpaqueArchetype, conformsTo, superclass, environment->getOuterSubstitutions()); auto arena = getArena(properties); auto size = OpaqueTypeArchetypeType::totalSizeToAlloc< ProtocolDecl *, Type, LayoutConstraint>( conformsTo.size(), superclass ? 1 : 0, layout ? 1 : 0); ASTContext &ctx = interfaceType->getASTContext(); auto mem = ctx.Allocate(size, alignof(OpaqueTypeArchetypeType), arena); return ::new (mem) OpaqueTypeArchetypeType(environment->isCanonical() ? &ctx : nullptr, environment, properties, interfaceType, conformsTo, superclass, layout); } Type OpaqueTypeArchetypeType::get( OpaqueTypeDecl *Decl, Type interfaceType, SubstitutionMap Substitutions) { auto *env = GenericEnvironment::forOpaqueType(Decl, Substitutions); return env->getOrCreateArchetypeFromInterfaceType(interfaceType); } CanTypeWrapper<ExistentialArchetypeType> ExistentialArchetypeType::getNew( GenericEnvironment *environment, Type interfaceType, ArrayRef<ProtocolDecl *> conformsTo, Type superclass, LayoutConstraint layout) { auto properties = archetypeProperties( RecursiveTypeProperties::HasOpenedExistential, conformsTo, superclass, environment->getOuterSubstitutions()); auto arena = getArena(properties); auto size = ExistentialArchetypeType::totalSizeToAlloc< ProtocolDecl *, Type, LayoutConstraint>( conformsTo.size(), superclass ? 1 : 0, layout ? 1 : 0); ASTContext &ctx = interfaceType->getASTContext(); void *mem = ctx.Allocate(size, alignof(ExistentialArchetypeType), arena); return CanExistentialArchetypeType(::new (mem) ExistentialArchetypeType( environment->isCanonical() ? &ctx : nullptr, environment, interfaceType, conformsTo, superclass, layout, properties)); } CanExistentialArchetypeType ExistentialArchetypeType::get(CanType existential) { auto &ctx = existential->getASTContext(); auto existentialSig = ctx.getOpenedExistentialSignature(existential); auto *genericEnv = GenericEnvironment::forOpenedExistential( existentialSig.OpenedSig, existentialSig.Shape, existentialSig.Generalization, UUID::fromTime()); return cast<ExistentialArchetypeType>( genericEnv->mapTypeIntoEnvironment(existentialSig.SelfType) ->getCanonicalType()); } Type ExistentialArchetypeType::getAny(Type existential) { assert(existential->isAnyExistentialType()); if (auto metatypeTy = existential->getAs<ExistentialMetatypeType>()) { auto instanceTy = metatypeTy->getExistentialInstanceType(); auto openedInstanceTy = ExistentialArchetypeType::getAny(instanceTy); if (metatypeTy->hasRepresentation()) { return MetatypeType::get(openedInstanceTy, metatypeTy->getRepresentation()); } return MetatypeType::get(openedInstanceTy); } return ExistentialArchetypeType::get(existential->getCanonicalType()); } void SubstitutionMap::Storage::Profile( llvm::FoldingSetNodeID &id, GenericSignature genericSig, ArrayRef<Type> replacementTypes, ArrayRef<ProtocolConformanceRef> conformances) { id.AddPointer(genericSig.getPointer()); if (!genericSig) return; // Replacement types. for (auto replacementType : replacementTypes) id.AddPointer(replacementType.getPointer()); // Conformances. for (auto conformance : conformances) id.AddPointer(conformance.getOpaqueValue()); } SubstitutionMap::Storage *SubstitutionMap::Storage::get( GenericSignature genericSig, ArrayRef<Type> replacementTypes, ArrayRef<ProtocolConformanceRef> conformances) { // If there is no generic signature, we need no storage. if (!genericSig) { assert(replacementTypes.empty()); assert(conformances.empty()); return nullptr; } // Figure out which arena this should go in. RecursiveTypeProperties properties; for (auto type : replacementTypes) { if (type) properties |= type->getRecursiveProperties(); } // Profile the substitution map. llvm::FoldingSetNodeID id; SubstitutionMap::Storage::Profile(id, genericSig, replacementTypes, conformances); auto arena = getArena(properties); // Did we already record this substitution map? auto &ctx = genericSig->getASTContext(); void *insertPos; auto &substitutionMaps = ctx.getImpl().getArena(arena).SubstitutionMaps; if (auto result = substitutionMaps.FindNodeOrInsertPos(id, insertPos)) return result; // Allocate the appropriate amount of storage for the signature and its // replacement types and conformances. auto size = Storage::totalSizeToAlloc<Type, ProtocolConformanceRef>( replacementTypes.size(), conformances.size()); auto mem = ctx.Allocate(size, alignof(Storage), arena); auto result = new (mem) Storage(genericSig, replacementTypes, conformances); substitutionMaps.InsertNode(result, insertPos); return result; } ProtocolConformanceRef ProtocolConformanceRef::forAbstract( Type conformingType, ProtocolDecl *proto) { ASTContext &ctx = proto->getASTContext(); auto kind = conformingType->getDesugaredType()->getKind(); switch (kind) { case TypeKind::GenericTypeParam: case TypeKind::TypeVariable: case TypeKind::DependentMember: case TypeKind::Error: case TypeKind::Placeholder: case TypeKind::PrimaryArchetype: case TypeKind::PackArchetype: case TypeKind::OpaqueTypeArchetype: case TypeKind::ExistentialArchetype: case TypeKind::ElementArchetype: break; default: ABORT([&](auto &out) { out << "Abstract conformance with bad subject type:\n"; conformingType->dump(out); }); } // Figure out which arena this should go in. auto properties = conformingType->getRecursiveProperties(); auto arena = getArena(properties); // Form the folding set key. llvm::FoldingSetNodeID id; AbstractConformance::Profile(id, conformingType, proto); // Did we already record this abstract conformance? void *insertPos; auto &abstractConformances = ctx.getImpl().getArena(arena).AbstractConformances; if (auto result = abstractConformances.FindNodeOrInsertPos(id, insertPos)) return ProtocolConformanceRef(result); // Allocate and record this abstract conformance. auto mem = ctx.Allocate(sizeof(AbstractConformance), alignof(AbstractConformance), arena); auto result = new (mem) AbstractConformance(conformingType, proto); abstractConformances.InsertNode(result, insertPos); return ProtocolConformanceRef(result); } const AvailabilityContext::Storage *AvailabilityContext::Storage::get( const AvailabilityRange &platformRange, bool isDeprecated, llvm::ArrayRef<DomainInfo> domainInfos, const ASTContext &ctx) { llvm::FoldingSetNodeID id; AvailabilityContext::Storage::Profile(id, platformRange, isDeprecated, domainInfos); auto &foldingSet = ctx.getImpl().AvailabilityContexts; void *insertPos; auto *existing = foldingSet.FindNodeOrInsertPos(id, insertPos); if (existing) return existing; size_t storageToAlloc = AvailabilityContext::Storage::totalSizeToAlloc< AvailabilityContext::DomainInfo>(domainInfos.size()); void *mem = ctx.Allocate(storageToAlloc, alignof(AvailabilityContext::Storage)); auto *newNode = ::new (mem) AvailabilityContext::Storage( platformRange, isDeprecated, domainInfos.size()); std::uninitialized_copy(domainInfos.begin(), domainInfos.end(), newNode->getTrailingObjects()); foldingSet.InsertNode(newNode, insertPos); return newNode; } const CustomAvailabilityDomain * CustomAvailabilityDomain::get(StringRef name, Kind kind, ModuleDecl *mod, ValueDecl *decl, FuncDecl *predicateFunc, const ASTContext &ctx) { auto identifier = ctx.getIdentifier(name); llvm::FoldingSetNodeID id; CustomAvailabilityDomain::Profile(id, identifier, mod); auto &foldingSet = ctx.getImpl().CustomAvailabilityDomains; void *insertPos; auto *existing = foldingSet.FindNodeOrInsertPos(id, insertPos); if (existing) return existing; void *mem = ctx.Allocate(sizeof(CustomAvailabilityDomain), alignof(CustomAvailabilityDomain)); auto *newNode = ::new (mem) CustomAvailabilityDomain(identifier, kind, mod, decl, predicateFunc); foldingSet.InsertNode(newNode, insertPos); return newNode; } void GenericSignatureImpl::Profile(llvm::FoldingSetNodeID &ID, ArrayRef<GenericTypeParamType *> genericParams, ArrayRef<Requirement> requirements) { for (auto p : genericParams) ID.AddPointer(p); for (auto &reqt : requirements) { ID.AddPointer(reqt.getFirstType().getPointer()); if (reqt.getKind() != RequirementKind::Layout) ID.AddPointer(reqt.getSecondType().getPointer()); else ID.AddPointer(reqt.getLayoutConstraint().getPointer()); ID.AddInteger(unsigned(reqt.getKind())); } } GenericSignature GenericSignature::get(ArrayRef<GenericTypeParamType *> params, ArrayRef<Requirement> requirements, bool isKnownCanonical) { assert(!params.empty()); #ifndef NDEBUG for (auto req : requirements) { assert(req.getFirstType()->isTypeParameter()); assert(!req.getFirstType()->hasTypeVariable()); assert(req.getKind() == RequirementKind::Layout || !req.getSecondType()->hasTypeVariable()); } #endif // Check for an existing generic signature. llvm::FoldingSetNodeID ID; GenericSignatureImpl::Profile(ID, params, requirements); auto &ctx = getASTContext(params, requirements); void *insertPos; auto &sigs = ctx.getImpl().GenericSignatures; if (auto *sig = sigs.FindNodeOrInsertPos(ID, insertPos)) { if (isKnownCanonical) sig->CanonicalSignatureOrASTContext = &ctx; return sig; } // Allocate and construct the new signature. size_t bytes = GenericSignatureImpl::template totalSizeToAlloc< GenericTypeParamType *, Requirement>( params.size(), requirements.size()); void *mem = ctx.Allocate(bytes, alignof(GenericSignatureImpl)); auto *newSig = new (mem) GenericSignatureImpl(params, requirements, isKnownCanonical); ctx.getImpl().GenericSignatures.InsertNode(newSig, insertPos); return newSig; } GenericSignature GenericSignature::forInvalid(ArrayRef<GenericTypeParamType *> params) { ASSERT(!params.empty()); auto &ctx = params.front()->getASTContext(); // Add same type requirements that make each of the generic parameters // concrete error types. This helps avoid downstream diagnostics and is // handled the same as if the user wrote e.g `<T where T == Undefined>`. SmallVector<Requirement, 2> requirements; for (auto *param : params) { if (param->isValue()) continue; requirements.emplace_back(RequirementKind::SameType, param, ErrorType::get(ctx)); } return GenericSignature::get(params, requirements); } GenericEnvironment *GenericEnvironment::forPrimary(GenericSignature signature) { auto &ctx = signature->getASTContext(); // Allocate and construct the new environment. unsigned numGenericParams = signature.getGenericParams().size(); size_t bytes = totalSizeToAlloc<SubstitutionMap, OpaqueEnvironmentData, ExistentialEnvironmentData, ElementEnvironmentData, Type>( 0, 0, 0, 0, numGenericParams); void *mem = ctx.Allocate(bytes, alignof(GenericEnvironment)); return new (mem) GenericEnvironment(signature); } /// Create a new generic environment for an opaque type with the given set of /// outer substitutions. GenericEnvironment *GenericEnvironment::forOpaqueType( OpaqueTypeDecl *opaque, SubstitutionMap subs) { // Don't preserve sugar if we have type variables, because this leads to // excessive solver arena memory usage. if (subs.getRecursiveProperties().hasTypeVariable()) subs = subs.getCanonical(); auto &ctx = opaque->getASTContext(); auto properties = ArchetypeType::archetypeProperties( RecursiveTypeProperties::HasOpaqueArchetype, { }, Type(), subs); auto arena = getArena(properties); auto &environments = ctx.getImpl().getArena(arena).OpaqueArchetypeEnvironments; GenericEnvironment *env = environments[{opaque, subs}]; if (!env) { // Allocate and construct the new environment. auto signature = opaque->getOpaqueInterfaceGenericSignature(); unsigned numGenericParams = signature.getGenericParams().size(); size_t bytes = totalSizeToAlloc<SubstitutionMap, OpaqueEnvironmentData, ExistentialEnvironmentData, ElementEnvironmentData, Type>( 1, 1, 0, 0, numGenericParams); void *mem = ctx.Allocate(bytes, alignof(GenericEnvironment), arena); env = new (mem) GenericEnvironment(signature, opaque, subs); environments[{opaque, subs}] = env; } return env; } /// Create a new generic environment for an opened archetype. GenericEnvironment * GenericEnvironment::forOpenedExistential(Type existential, UUID uuid) { auto &ctx = existential->getASTContext(); auto existentialSig = ctx.getOpenedExistentialSignature(existential); return forOpenedExistential(existentialSig.OpenedSig, existentialSig.Shape, existentialSig.Generalization, uuid); } /// Create a new generic environment for an opened archetype. GenericEnvironment * GenericEnvironment::forOpenedExistential( GenericSignature signature, Type existential, SubstitutionMap subs, UUID uuid) { assert(existential->isExistentialType()); // TODO: We could attempt to preserve type sugar in the substitution map. // Currently archetypes are assumed to be always canonical in many places, // though, so doing so would require fixing those places. subs = subs.getCanonical(); auto &ctx = existential->getASTContext(); auto layout = existential->getExistentialLayout(); auto properties = ArchetypeType::archetypeProperties( RecursiveTypeProperties::HasOpenedExistential, layout.getProtocols(), layout.getSuperclass(), subs); auto arena = getArena(properties); auto key = std::make_pair(subs, uuid); auto &environments = ctx.getImpl().getArena(arena).OpenedExistentialEnvironments; auto found = environments.find(key); if (found != environments.end()) { auto *existingEnv = found->second; assert(existingEnv->getOpenedExistentialType()->isEqual(existential)); assert(existingEnv->getGenericSignature().getPointer() == signature.getPointer()); assert(existingEnv->getOuterSubstitutions() == subs); assert(existingEnv->getOpenedExistentialUUID() == uuid); return existingEnv; } // Allocate and construct the new environment. unsigned numGenericParams = signature.getGenericParams().size(); size_t bytes = totalSizeToAlloc<SubstitutionMap, OpaqueEnvironmentData, ExistentialEnvironmentData, ElementEnvironmentData, Type>( 1, 0, 1, 0, numGenericParams); void *mem = ctx.Allocate(bytes, alignof(GenericEnvironment)); auto *genericEnv = new (mem) GenericEnvironment(signature, existential, subs, uuid); environments[key] = genericEnv; return genericEnv; } /// Create a new generic environment for an element archetype. GenericEnvironment * GenericEnvironment::forOpenedElement(GenericSignature signature, UUID uuid, CanGenericTypeParamType shapeClass, SubstitutionMap outerSubs) { auto &ctx = signature->getASTContext(); auto &openedElementEnvironments = ctx.getImpl().OpenedElementEnvironments; auto found = openedElementEnvironments.find(uuid); if (found != openedElementEnvironments.end()) { auto *existingEnv = found->second; assert(existingEnv->getGenericSignature().getPointer() == signature.getPointer()); assert(existingEnv->getOpenedElementShapeClass()->isEqual(shapeClass)); assert(existingEnv->getOpenedElementUUID() == uuid); return existingEnv; } // Allocate and construct the new environment. unsigned numGenericParams = signature.getGenericParams().size(); unsigned numOpenedParams = signature.getInnermostGenericParams().size(); size_t bytes = totalSizeToAlloc<SubstitutionMap, OpaqueEnvironmentData, ExistentialEnvironmentData, ElementEnvironmentData, Type>( 1, 0, 0, 1, numGenericParams + numOpenedParams); void *mem = ctx.Allocate(bytes, alignof(GenericEnvironment)); auto *genericEnv = new (mem) GenericEnvironment(signature, uuid, shapeClass, outerSubs); openedElementEnvironments[uuid] = genericEnv; return genericEnv; } void DeclName::CompoundDeclName::Profile(llvm::FoldingSetNodeID &id, DeclBaseName baseName, ArrayRef<Identifier> argumentNames) { id.AddPointer(baseName.getAsOpaquePointer()); id.AddInteger(argumentNames.size()); for (auto arg : argumentNames) id.AddPointer(arg.get()); } void DeclName::initialize(ASTContext &C, DeclBaseName baseName, ArrayRef<Identifier> argumentNames) { llvm::FoldingSetNodeID id; CompoundDeclName::Profile(id, baseName, argumentNames); void *insert = nullptr; if (CompoundDeclName *compoundName = C.getImpl().CompoundNames.FindNodeOrInsertPos(id, insert)) { BaseNameOrCompound = compoundName; return; } size_t size = CompoundDeclName::totalSizeToAlloc<Identifier>(argumentNames.size()); auto buf = C.Allocate(size, alignof(CompoundDeclName)); auto compoundName = new (buf) CompoundDeclName(baseName,argumentNames.size()); std::uninitialized_copy(argumentNames.begin(), argumentNames.end(), compoundName->getArgumentNames().begin()); BaseNameOrCompound = compoundName; C.getImpl().CompoundNames.InsertNode(compoundName, insert); } /// Build a compound value name given a base name and a set of argument names /// extracted from a parameter list. DeclName::DeclName(ASTContext &C, DeclBaseName baseName, ParameterList *paramList) { SmallVector<Identifier, 4> names; for (auto P : *paramList) names.push_back(P->getArgumentName()); initialize(C, baseName, names); } void DeclNameRef::SelectiveDeclNameRef::Profile(llvm::FoldingSetNodeID &id, Identifier moduleSelector, DeclName fullName) { ASSERT(!moduleSelector.empty() && "Looking up SelectiveDeclNameRef with empty module?"); id.AddPointer(moduleSelector.getAsOpaquePointer()); id.AddPointer(fullName.getOpaqueValue()); } void DeclNameRef::initialize(ASTContext &C, Identifier moduleSelector, DeclName fullName) { if (moduleSelector.empty()) { storage = fullName; return; } llvm::FoldingSetNodeID id; SelectiveDeclNameRef::Profile(id, moduleSelector, fullName); void *insert = nullptr; if (SelectiveDeclNameRef *selectiveRef = C.getImpl().SelectiveNameRefs.FindNodeOrInsertPos(id, insert)) { storage = selectiveRef; return; } auto buf = C.Allocate(sizeof(SelectiveDeclNameRef), alignof(SelectiveDeclNameRef)); auto selectiveRef = new (buf) SelectiveDeclNameRef(moduleSelector, fullName); storage = selectiveRef; C.getImpl().SelectiveNameRefs.InsertNode(selectiveRef, insert); } /// Find the implementation of the named type in the given module. static NominalTypeDecl *findUnderlyingTypeInModule(ASTContext &ctx, Identifier name, ModuleDecl *module) { // Find all of the declarations with this name in the Swift module. SmallVector<ValueDecl *, 1> results; module->lookupValue(name, NLKind::UnqualifiedLookup, results); for (auto result : results) { if (auto nominal = dyn_cast<NominalTypeDecl>(result)) return nominal; // Look through typealiases. if (auto typealias = dyn_cast<TypeAliasDecl>(result)) { return typealias->getDeclaredInterfaceType()->getAnyNominal(); } } return nullptr; } bool ForeignRepresentationInfo::isRepresentableAsOptional() const { switch (getKind()) { case ForeignRepresentableKind::None: llvm_unreachable("this type is not representable"); case ForeignRepresentableKind::Trivial: return Storage.getPointer() != 0; case ForeignRepresentableKind::Bridged: { auto KPK_ObjectiveCBridgeable = KnownProtocolKind::ObjectiveCBridgeable; ProtocolDecl *proto = getConformance()->getProtocol(); assert(proto->isSpecificProtocol(KPK_ObjectiveCBridgeable) && "unknown protocol; does it support optional?"); (void)proto; (void)KPK_ObjectiveCBridgeable; return true; } case ForeignRepresentableKind::BridgedError: return true; case ForeignRepresentableKind::Object: case ForeignRepresentableKind::StaticBridged: llvm_unreachable("unexpected kind in ForeignRepresentableCacheEntry"); } llvm_unreachable("Unhandled ForeignRepresentableKind in switch."); } ForeignRepresentationInfo ASTContext::getForeignRepresentationInfo(NominalTypeDecl *nominal, ForeignLanguage language, const DeclContext *dc) { // Local function to add a type with the given name and module as // trivially-representable. auto addTrivial = [&](Identifier name, ModuleDecl *module, bool allowOptional = false) { if (auto type = findUnderlyingTypeInModule(*this, name, module)) { auto info = ForeignRepresentationInfo::forTrivial(); if (allowOptional) info = ForeignRepresentationInfo::forTrivialWithOptional(); getImpl().ForeignRepresentableCache.insert({type, info}); } }; if (getImpl().ForeignRepresentableCache.empty()) { // Pre-populate the foreign-representable cache with known types. if (auto stdlib = getStdlibModule()) { addTrivial(getIdentifier("OpaquePointer"), stdlib, true); // Builtin types // FIXME: Layering violation to use the ClangImporter's define. #define MAP_BUILTIN_TYPE(CLANG_BUILTIN_KIND, SWIFT_TYPE_NAME) \ addTrivial(getIdentifier(#SWIFT_TYPE_NAME), stdlib); #include "swift/ClangImporter/BuiltinMappedTypes.def" // Even though we may never import types directly as Int or UInt // (e.g. on 64-bit Windows, where CLong maps to Int32 and // CLongLong to Int64), it's always possible to convert an Int // or UInt to a C type. addTrivial(getIdentifier("Int"), stdlib); addTrivial(getIdentifier("UInt"), stdlib); } if (auto darwin = getLoadedModule(Id_Darwin)) { // Note: DarwinBoolean is odd because it's bridged to Bool in APIs, // but can also be trivially bridged. addTrivial(getIdentifier("DarwinBoolean"), darwin); } if (auto winsdk = getLoadedModule(Id_WinSDK)) { // NOTE: WindowsBool is odd because it is bridged to Bool in APIs, but can // also be trivially bridged. addTrivial(getIdentifier("WindowsBool"), winsdk); } if (auto objectiveC = getLoadedModule(Id_ObjectiveC)) { addTrivial(Id_Selector, objectiveC, true); // Note: ObjCBool is odd because it's bridged to Bool in APIs, // but can also be trivially bridged. addTrivial(getIdentifier("ObjCBool"), objectiveC); addTrivial(getSwiftId(KnownFoundationEntity::NSZone), objectiveC, true); } if (auto coreGraphics = getLoadedModule(getIdentifier("CoreGraphics"))) { addTrivial(Id_CGFloat, coreGraphics); } if (auto coreFoundation = getLoadedModule(getIdentifier("CoreFoundation"))) { addTrivial(Id_CGFloat, coreFoundation); } // Pull SIMD types of size 2...4 from the SIMD module, if it exists. if (auto simd = getLoadedModule(Id_simd)) { #define MAP_SIMD_TYPE(BASENAME, _, __) \ { \ char name[] = #BASENAME "0"; \ for (unsigned i = 2; i <= SWIFT_MAX_IMPORTED_SIMD_ELEMENTS; ++i) { \ *(std::end(name) - 2) = '0' + i; \ addTrivial(getIdentifier(name), simd); \ } \ } #include "swift/ClangImporter/SIMDMappedTypes.def" } } // Determine whether we know anything about this nominal type // yet. If we've never seen this nominal type before, or if we have // an out-of-date negative cached value, we'll have to go looking. auto known = getImpl().ForeignRepresentableCache.find(nominal); bool wasNotFoundInCache = known == getImpl().ForeignRepresentableCache.end(); // For the REPL. We might have initialized the cache above before CoreGraphics // was loaded. // let s = "" // Here we initialize the ForeignRepresentableCache. // import Foundation // let pt = CGPoint(x: 1.0, y: 2.0) // Here we query for CGFloat. // Add CGFloat as trivial if we encounter it later. // If the type was not found check if it would be found after having recently // loaded the module. // Similar for types for other non stdlib modules. auto conditionallyAddTrivial = [&](NominalTypeDecl *nominalDecl, Identifier typeName, Identifier moduleName, bool allowOptional = false) { if (nominal->getName() == typeName && wasNotFoundInCache) { if (auto module = getLoadedModule(moduleName)) { addTrivial(typeName, module, allowOptional); known = getImpl().ForeignRepresentableCache.find(nominal); wasNotFoundInCache = known == getImpl().ForeignRepresentableCache.end(); } } }; conditionallyAddTrivial(nominal, getIdentifier("DarwinBoolean") , Id_Darwin); conditionallyAddTrivial(nominal, getIdentifier("WindowsBool"), Id_WinSDK); conditionallyAddTrivial(nominal, Id_Selector, Id_ObjectiveC, true); conditionallyAddTrivial(nominal, getIdentifier("ObjCBool"), Id_ObjectiveC); conditionallyAddTrivial(nominal, getSwiftId(KnownFoundationEntity::NSZone), Id_ObjectiveC, true); conditionallyAddTrivial(nominal, Id_CGFloat, getIdentifier("CoreGraphics")); conditionallyAddTrivial(nominal, Id_CGFloat, getIdentifier("CoreFoundation")); #define MAP_SIMD_TYPE(BASENAME, _, __) \ { \ char name[] = #BASENAME "0"; \ for (unsigned i = 2; i <= SWIFT_MAX_IMPORTED_SIMD_ELEMENTS; ++i) { \ *(std::end(name) - 2) = '0' + i; \ conditionallyAddTrivial(nominal, getIdentifier(name), Id_simd); \ } \ } #include "swift/ClangImporter/SIMDMappedTypes.def" if (wasNotFoundInCache || (known->second.getKind() == ForeignRepresentableKind::None && known->second.getGeneration() < CurrentGeneration)) { std::optional<ForeignRepresentationInfo> result; // Look for a conformance to _ObjectiveCBridgeable (other than Optional's-- // we don't want to allow exposing APIs with double-optional types like // NSObject??, even though Optional is bridged to its underlying type). // // FIXME: We're implicitly depending on the fact that lookupConformance // is global, ignoring the module we provide for it. if (nominal != dc->getASTContext().getOptionalDecl()) { if (auto objcBridgeable = getProtocol(KnownProtocolKind::ObjectiveCBridgeable)) { auto conformance = lookupConformance( nominal->getDeclaredInterfaceType(), objcBridgeable); if (conformance) { result = ForeignRepresentationInfo::forBridged(conformance.getConcrete()); } } } // Error is bridged to NSError, when it's available. if (nominal == getErrorDecl() && getNSErrorDecl()) result = ForeignRepresentationInfo::forBridgedError(); // If we didn't find anything, mark the result as "None". if (!result) result = ForeignRepresentationInfo::forNone(CurrentGeneration); // Cache the result. known = getImpl().ForeignRepresentableCache.insert({ nominal, *result }).first; } // Map a cache entry to a result for this specific auto entry = known->second; if (entry.getKind() == ForeignRepresentableKind::None) return entry; // Extract the protocol conformance. auto conformance = entry.getConformance(); // If the conformance is not visible, fail. if (conformance && !conformance->isVisibleFrom(dc)) return ForeignRepresentationInfo::forNone(); // Language-specific filtering. switch (language) { case ForeignLanguage::C: // Ignore _ObjectiveCBridgeable conformances in C. if (conformance && conformance->getProtocol()->isSpecificProtocol( KnownProtocolKind::ObjectiveCBridgeable)) return ForeignRepresentationInfo::forNone(); // Ignore error bridging in C. if (entry.getKind() == ForeignRepresentableKind::BridgedError) return ForeignRepresentationInfo::forNone(); LLVM_FALLTHROUGH; case ForeignLanguage::ObjectiveC: return entry; } llvm_unreachable("Unhandled ForeignLanguage in switch."); } bool ASTContext::isTypeBridgedInExternalModule( NominalTypeDecl *nominal) const { return (nominal == getBoolDecl() || nominal == getIntDecl() || nominal == getInt64Decl() || nominal == getInt32Decl() || nominal == getInt16Decl() || nominal == getInt8Decl() || nominal == getUIntDecl() || nominal == getUInt64Decl() || nominal == getUInt32Decl() || nominal == getUInt16Decl() || nominal == getUInt8Decl() || nominal == getFloatDecl() || nominal == getDoubleDecl() || nominal == getArrayDecl() || nominal == getCollectionDifferenceDecl() || (nominal->getDeclContext()->getAsDecl() == getCollectionDifferenceDecl() && nominal->getBaseName() == Id_Change) || nominal == getDictionaryDecl() || nominal == getSetDecl() || nominal == getStringDecl() || nominal == getSubstringDecl() || nominal == getErrorDecl() || nominal == getAnyHashableDecl() || // Foundation's overlay depends on the CoreGraphics overlay, but // CoreGraphics value types bridge to Foundation objects such as // NSValue and NSNumber, so to avoid circular dependencies, the // bridging implementations of CG types appear in the Foundation // module. nominal->getParentModule()->getName() == Id_CoreGraphics || nominal->getParentModule()->getName() == Id_CoreFoundation || // CoreMedia is a dependency of AVFoundation, but the bridged // NSValue implementations for CMTime, CMTimeRange, and // CMTimeMapping are provided by AVFoundation, and AVFoundation // gets upset if you don't use the NSValue subclasses its factory // methods instantiate. nominal->getParentModule()->getName() == Id_CoreMedia); } bool ASTContext::isObjCClassWithMultipleSwiftBridgedTypes(Type t) { auto clazz = t->getClassOrBoundGenericClass(); if (!clazz) return false; if (clazz == getNSErrorDecl()) return true; if (clazz == getNSNumberDecl()) return true; if (clazz == getNSValueDecl()) return true; return false; } Type ASTContext::getBridgedToObjC(const DeclContext *dc, Type type, Type *bridgedValueType) const { if (type->isBridgeableObjectType()) { if (bridgedValueType) *bridgedValueType = type; return type; } if (auto metaTy = type->getAs<MetatypeType>()) if (metaTy->getInstanceType()->mayHaveSuperclass()) return type; if (auto existentialMetaTy = type->getAs<ExistentialMetatypeType>()) if (existentialMetaTy->getInstanceType()->isObjCExistentialType()) return type; // Check whether the type is an existential that contains // Error. If so, it's bridged to NSError. if (type->isExistentialWithError()) { if (auto nsErrorTy = getNSErrorType()) { // The corresponding value type is Error. if (bridgedValueType) *bridgedValueType = getErrorExistentialType(); return nsErrorTy; } } // Try to find a conformance that will enable bridging. auto findConformance = [&](KnownProtocolKind known) -> ProtocolConformanceRef { // Don't ascribe any behavior to Optional other than what we explicitly // give it. We don't want things like AnyObject?? to work. if (type->isOptional()) return ProtocolConformanceRef::forInvalid(); // Find the protocol. auto proto = getProtocol(known); if (!proto) return ProtocolConformanceRef::forInvalid(); return lookupConformance(type, proto); }; // Do we conform to _ObjectiveCBridgeable? if (auto conformance = findConformance(KnownProtocolKind::ObjectiveCBridgeable)) { // The corresponding value type is... the type. if (bridgedValueType) *bridgedValueType = type; // Find the Objective-C class type we bridge to. Type witnessTy = conformance.getTypeWitnessByName(Id_ObjectiveCType); // If Objective-C import is broken, witness type would be a dependent member // with `<<error type>>` base. return (witnessTy && !witnessTy->hasError()) ? witnessTy : Type(); } // Do we conform to Error? if (findConformance(KnownProtocolKind::Error)) { // The corresponding value type is Error. if (bridgedValueType) *bridgedValueType = getErrorExistentialType(); // Bridge to NSError. if (auto nsErrorTy = getNSErrorType()) return nsErrorTy; } // No special bridging to Objective-C, but this can become an 'Any'. return Type(); } ClangTypeConverter &ASTContext::getClangTypeConverter() { auto &impl = getImpl(); if (!impl.Converter) { auto *cml = getClangModuleLoader(); impl.Converter.emplace(*this, cml->getClangASTContext(), LangOpts.Target); } return impl.Converter.value(); } const clang::Type * ASTContext::getClangFunctionType(ArrayRef<AnyFunctionType::Param> params, Type resultTy, FunctionTypeRepresentation trueRep) { return getClangTypeConverter().getFunctionType</*templateArgument=*/false>( params, resultTy, trueRep); } const clang::Type *ASTContext::getCanonicalClangFunctionType( ArrayRef<SILParameterInfo> params, std::optional<SILResultInfo> result, SILFunctionType::Representation trueRep) { auto *ty = getClangTypeConverter().getFunctionType</*templateArgument=*/false>( params, result, trueRep); return ty ? ty->getCanonicalTypeInternal().getTypePtr() : nullptr; } std::unique_ptr<TemplateInstantiationError> ASTContext::getClangTemplateArguments( const clang::TemplateParameterList *templateParams, ArrayRef<Type> genericArgs, SmallVectorImpl<clang::TemplateArgument> &templateArgs) { auto &impl = getImpl(); if (!impl.Converter) { auto *cml = getClangModuleLoader(); impl.Converter.emplace(*this, cml->getClangASTContext(), LangOpts.Target); } return impl.Converter->getClangTemplateArguments(templateParams, genericArgs, templateArgs); } const Decl * ASTContext::getSwiftDeclForExportedClangDecl(const clang::Decl *decl) { auto &impl = getImpl(); // If we haven't exported anything yet, this must not be how we found // this declaration. if (!impl.Converter) return nullptr; return impl.Converter->getSwiftDeclForExportedClangDecl(decl); } const clang::Type * ASTContext::getClangTypeForIRGen(Type ty) { return getClangTypeConverter().convert(ty).getTypePtrOrNull(); } GenericParamList *ASTContext::getSelfGenericParamList(DeclContext *dc) const { auto *theParamList = getImpl().SelfGenericParamList; if (theParamList) return theParamList; // Note: we always return a GenericParamList rooted at the first // DeclContext this was called with. Since this is just a giant // hack for SIL mode, that should be OK. auto *selfParam = GenericTypeParamDecl::createImplicit( dc, Id_Self, /*depth*/ 0, /*index*/ 0, GenericTypeParamKind::Type); theParamList = GenericParamList::create( const_cast<ASTContext &>(*this), SourceLoc(), {selfParam}, SourceLoc()); getImpl().SelfGenericParamList = theParamList; return theParamList; } CanGenericSignature ASTContext::getSingleGenericParameterSignature() const { if (auto theSig = getImpl().SingleGenericParameterSignature) return theSig; auto sig = GenericSignature::get({TheSelfType}, { }); auto canonicalSig = CanGenericSignature(sig); getImpl().SingleGenericParameterSignature = canonicalSig; return canonicalSig; } OpenedExistentialSignature ASTContext::getOpenedExistentialSignature(Type type) { assert(type->isExistentialType()); auto canType = type->getCanonicalType(); // The constraint type might contain type variables. auto properties = canType->getRecursiveProperties(); auto arena = getArena(properties); // Check the cache. const auto &sigs = getImpl().getArena(arena).ExistentialSignatures; auto found = sigs.find(canType); if (found != sigs.end()) return found->second; OpenedExistentialSignature existentialSig; // Generalize the existential type, to move type variables and primary // archetypes into the substitution map. auto gen = ExistentialTypeGeneralization::get(canType); existentialSig.Shape = gen.Shape->getCanonicalType(); existentialSig.Generalization = gen.Generalization; // Now, we have an existential type written with type parameters only. // Open the generalization signature by adding a new generic parameter // for `Self`. auto parentSig = gen.Generalization.getGenericSignature(); auto canParentSig = parentSig.getCanonicalSignature(); LocalArchetypeRequirementCollector collector(*this, canParentSig); collector.addOpenedExistential(gen.Shape); existentialSig.OpenedSig = buildGenericSignature( *this, collector.OuterSig, collector.Params, collector.Requirements, /*allowInverses=*/true).getCanonicalSignature(); // Stash the `Self` type. existentialSig.SelfType = existentialSig.OpenedSig.getGenericParams().back() ->getCanonicalType(); // Cache the result. auto result = getImpl().getArena(arena).ExistentialSignatures.insert( std::make_pair(canType, existentialSig)); ASSERT(result.second); return existentialSig; } CanGenericSignature ASTContext::getOpenedElementSignature(CanGenericSignature baseGenericSig, CanGenericTypeParamType shapeClass) { auto &sigs = getImpl().ElementSignatures; auto key = std::make_pair(shapeClass, baseGenericSig.getPointer()); auto found = sigs.find(key); if (found != sigs.end()) return found->second; LocalArchetypeRequirementCollector collector(*this, baseGenericSig); collector.addOpenedElement(shapeClass); auto elementSig = buildGenericSignature( *this, collector.OuterSig, collector.Params, collector.Requirements, /*allowInverses=*/false).getCanonicalSignature(); sigs[key] = elementSig; return elementSig; } GenericSignature ASTContext::getOverrideGenericSignature(const ValueDecl *base, const ValueDecl *derived) { assert(isa<AbstractFunctionDecl>(base) || isa<SubscriptDecl>(base)); assert(isa<AbstractFunctionDecl>(derived) || isa<SubscriptDecl>(derived)); const auto baseNominal = base->getDeclContext()->getSelfNominalTypeDecl(); const auto derivedNominal = derived->getDeclContext()->getSelfNominalTypeDecl(); assert(baseNominal != nullptr); assert(derivedNominal != nullptr); const auto baseGenericSig = base->getAsGenericContext()->getGenericSignature(); const auto *derivedParams = derived->getAsGenericContext()->getGenericParams(); return getOverrideGenericSignature(baseNominal, derivedNominal, baseGenericSig, derivedParams); } GenericSignature ASTContext::getOverrideGenericSignature(const NominalTypeDecl *baseNominal, const NominalTypeDecl *derivedNominal, GenericSignature baseGenericSig, const GenericParamList *derivedParams) { if (baseNominal == derivedNominal) return baseGenericSig; const auto derivedNominalSig = derivedNominal->getGenericSignature(); if (derivedNominalSig.isNull() && derivedParams == nullptr) return nullptr; if (baseGenericSig.isNull()) return derivedNominalSig; auto key = OverrideSignatureKey(baseGenericSig, baseNominal, derivedNominal, derivedParams); if (getImpl().overrideSigCache.find(key) != getImpl().overrideSigCache.end()) { return getImpl().overrideSigCache.lookup(key); } SmallVector<GenericTypeParamType *, 2> addedGenericParams; if (derivedParams) { for (auto gp : *derivedParams) { addedGenericParams.push_back( gp->getDeclaredInterfaceType()->castTo<GenericTypeParamType>()); } } SmallVector<Requirement, 2> addedRequirements; OverrideSubsInfo info(baseNominal, derivedNominal, baseGenericSig, derivedParams); for (auto reqt : baseGenericSig.getRequirements()) { auto substReqt = reqt.subst(QueryOverrideSubs(info), LookUpConformanceInOverrideSubs(info)); addedRequirements.push_back(substReqt); } auto genericSig = buildGenericSignature(*this, derivedNominalSig, std::move(addedGenericParams), std::move(addedRequirements), /*allowInverses=*/false); getImpl().overrideSigCache.insert(std::make_pair(key, genericSig)); return genericSig; } bool ASTContext::overrideGenericSignatureReqsSatisfied( const ValueDecl *base, const ValueDecl *derived, const OverrideGenericSignatureReqCheck direction) { auto *baseCtx = base->getAsGenericContext(); auto *derivedCtx = derived->getAsGenericContext(); if (baseCtx->hasGenericParamList() != derivedCtx->hasGenericParamList()) return false; if (baseCtx->hasGenericParamList() && (baseCtx->getGenericParams()->size() != derivedCtx->getGenericParams()->size())) return false; auto sig = getOverrideGenericSignature(base, derived); if (!sig) return true; auto derivedSig = derivedCtx->getGenericSignature(); switch (direction) { case OverrideGenericSignatureReqCheck::BaseReqSatisfiedByDerived: return sig.requirementsNotSatisfiedBy(derivedSig).empty(); case OverrideGenericSignatureReqCheck::DerivedReqSatisfiedByBase: return derivedSig.requirementsNotSatisfiedBy(sig).empty(); } llvm_unreachable("Unhandled OverrideGenericSignatureReqCheck in switch"); } void ASTContext::registerIRGenSILTransforms(SILTransformCtors ctors) { assert(getImpl().IRGenSILPasses.empty() && "Already registered"); getImpl().IRGenSILPasses = ctors; } ASTContext::SILTransformCtors ASTContext::getIRGenSILTransforms() const { auto passes = getImpl().IRGenSILPasses; assert(!passes.empty() && "Didn't register the necessary passes"); return passes; } std::string ASTContext::getEntryPointFunctionName() const { // Set default entry point name // // Usually the main entrypoint is "main" but WebAssembly's C ABI uses // "__main_argc_argv" for `int (int, char **)` signature and Swift's // main entrypoint always takes argc/argv. // See https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md std::string defaultName = LangOpts.Target.isWasm() ? "__main_argc_argv" : "main"; return LangOpts.entryPointFunctionName.value_or(defaultName); } SILLayout *SILLayout::get(ASTContext &C, CanGenericSignature Generics, ArrayRef<SILField> Fields, bool CapturesGenericEnvironment) { // The "captures generic environment" flag is meaningless if there are // no generic arguments to capture. if (!Generics || Generics->areAllParamsConcrete()) { CapturesGenericEnvironment = false; } // Profile the layout parameters. llvm::FoldingSetNodeID id; Profile(id, Generics, Fields, CapturesGenericEnvironment); // Return an existing layout if there is one. void *insertPos; auto &Layouts = C.getImpl().SILLayouts; if (auto existing = Layouts.FindNodeOrInsertPos(id, insertPos)) return existing; // Allocate a new layout. void *memory = C.Allocate(totalSizeToAlloc<SILField>(Fields.size()), alignof(SILLayout)); auto newLayout = ::new (memory) SILLayout(Generics, Fields, CapturesGenericEnvironment); Layouts.InsertNode(newLayout, insertPos); return newLayout; } SILLayout * SILLayout::withMutable(ASTContext &ctx, std::initializer_list<std::pair<unsigned, bool>> fieldIndexMutabilityUpdatePairs) const { // Copy the fields, setting the mutable field to newMutable. SmallVector<SILField, 8> newFields; llvm::copy(getFields(), std::back_inserter(newFields)); for (auto p : fieldIndexMutabilityUpdatePairs) { newFields[p.first].setIsMutable(p.second); } return SILLayout::get(ctx, getGenericSignature(), newFields, capturesGenericEnvironment()); } CanSILBoxType SILBoxType::get(ASTContext &C, SILLayout *Layout, SubstitutionMap Substitutions) { // Canonicalize substitutions. Substitutions = Substitutions.getCanonical(); // Return an existing layout if there is one. void *insertPos; auto &SILBoxTypes = C.getImpl().SILBoxTypes; llvm::FoldingSetNodeID id; Profile(id, Layout, Substitutions); if (auto existing = SILBoxTypes.FindNodeOrInsertPos(id, insertPos)) return CanSILBoxType(existing); auto newBox = new (C, AllocationArena::Permanent) SILBoxType(C, Layout, Substitutions); SILBoxTypes.InsertNode(newBox, insertPos); return CanSILBoxType(newBox); } /// TODO: Transitional factory to present the single-type SILBoxType::get /// interface. CanSILBoxType SILBoxType::get(CanType boxedType) { auto &ctx = boxedType->getASTContext(); auto singleGenericParamSignature = ctx.getSingleGenericParameterSignature(); auto genericParam = singleGenericParamSignature.getGenericParams()[0]; auto layout = SILLayout::get(ctx, singleGenericParamSignature, SILField(CanType(genericParam), /*mutable*/ true), /*captures generic env*/ false); auto subMap = SubstitutionMap::get(singleGenericParamSignature, boxedType, LookUpConformanceInModule()); return get(boxedType->getASTContext(), layout, subMap); } CanSILBoxType SILBoxType::withMutable(ASTContext &ctx, std::initializer_list<std::pair<unsigned, bool>> fieldIndexMutabilityUpdatePairs) const { return SILBoxType::get( ctx, getLayout()->withMutable(ctx, fieldIndexMutabilityUpdatePairs), getSubstitutions()); } ArrayRef<SILField> SILBoxType::getFields() const { return getLayout()->getFields(); } bool SILBoxType::isFieldMutable(unsigned index) const { return getFields()[index].isMutable(); } LayoutConstraint LayoutConstraint::getLayoutConstraint(LayoutConstraintKind Kind, ASTContext &C) { return getLayoutConstraint(Kind, 0, 0, C); } LayoutConstraint LayoutConstraint::getLayoutConstraint(LayoutConstraintKind Kind, unsigned SizeInBits, unsigned Alignment, ASTContext &C) { if (!LayoutConstraintInfo::isKnownSizeTrivial(Kind) && !LayoutConstraintInfo::isTrivialStride(Kind)) { assert(SizeInBits == 0); assert(Alignment == 0); return getLayoutConstraint(Kind); } // Check to see if we've already seen this tuple before. llvm::FoldingSetNodeID ID; LayoutConstraintInfo::Profile(ID, Kind, SizeInBits, Alignment); void *InsertPos = nullptr; if (LayoutConstraintInfo *Layout = C.getImpl().getArena(AllocationArena::Permanent) .LayoutConstraints.FindNodeOrInsertPos(ID, InsertPos)) return LayoutConstraint(Layout); LayoutConstraintInfo *New = LayoutConstraintInfo::isTrivial(Kind) ? new (C, AllocationArena::Permanent) LayoutConstraintInfo(Kind, SizeInBits, Alignment) : new (C, AllocationArena::Permanent) LayoutConstraintInfo(Kind); C.getImpl().getArena(AllocationArena::Permanent) .LayoutConstraints.InsertNode(New, InsertPos); return LayoutConstraint(New); } Type &ASTContext::getDefaultTypeRequestCache(SourceFile *SF, KnownProtocolKind kind) { return getImpl().DefaultTypeRequestCaches[SF][size_t(kind)]; } Type ASTContext::getSideCachedPropertyWrapperBackingPropertyType( VarDecl *var) const { return getImpl().PropertyWrapperBackingVarTypes[var]; } void ASTContext::setSideCachedPropertyWrapperBackingPropertyType( VarDecl *var, Type type) { assert(!getImpl().PropertyWrapperBackingVarTypes[var] || getImpl().PropertyWrapperBackingVarTypes[var]->isEqual(type)); getImpl().PropertyWrapperBackingVarTypes[var] = type; } VarDecl *VarDecl::getOriginalWrappedProperty( std::optional<PropertyWrapperSynthesizedPropertyKind> kind) const { if (!Bits.VarDecl.IsPropertyWrapperBackingProperty) return nullptr; auto *original = getOriginalVarForBackingStorage(); ASSERT(original); if (!kind) return original; auto wrapperInfo = original->getPropertyWrapperAuxiliaryVariables(); switch (*kind) { case PropertyWrapperSynthesizedPropertyKind::Backing: return this == wrapperInfo.backingVar ? original : nullptr; case PropertyWrapperSynthesizedPropertyKind::Projection: return this == wrapperInfo.projectionVar ? original : nullptr; } llvm_unreachable("covered switch"); } void VarDecl::setOriginalWrappedProperty(VarDecl *originalProperty) { Bits.VarDecl.IsPropertyWrapperBackingProperty = true; ASTContext &ctx = getASTContext(); assert(ctx.getImpl().OriginalVarsForBackingStorage.count(this) == 0); ctx.getImpl().OriginalVarsForBackingStorage[this] = originalProperty; } void VarDecl::setLazyStorageFor(VarDecl *VD) { Bits.VarDecl.IsLazyStorageProperty = true; ASTContext &ctx = getASTContext(); ASSERT(ctx.getImpl().OriginalVarsForBackingStorage.count(this) == 0); ctx.getImpl().OriginalVarsForBackingStorage[this] = VD; } VarDecl *VarDecl::getOriginalVarForBackingStorage() const { const auto &map = getASTContext().getImpl().OriginalVarsForBackingStorage; auto iter = map.find(this); if (iter == map.end()) return nullptr; return iter->second; } #ifndef NDEBUG static bool isSourceLocInOrignalBuffer(const Decl *D, SourceLoc Loc) { assert(Loc.isValid()); auto bufferID = D->getDeclContext()->getParentSourceFile()->getBufferID(); auto &SM = D->getASTContext().SourceMgr; return SM.getRangeForBuffer(bufferID).contains(Loc); } #endif void AbstractFunctionDecl::keepOriginalBodySourceRange() { auto &impl = getASTContext().getImpl(); auto result = impl.OriginalBodySourceRanges.insert({this, getBodySourceRange()}); assert((!result.second || result.first->getSecond().isInvalid() || isSourceLocInOrignalBuffer(this, result.first->getSecond().Start)) && "This function must be called before setting new body range"); (void)result; } SourceRange AbstractFunctionDecl::getOriginalBodySourceRange() const { auto &impl = getASTContext().getImpl(); auto found = impl.OriginalBodySourceRanges.find(this); if (found != impl.OriginalBodySourceRanges.end()) { return found->getSecond(); } else { return getBodySourceRange(); } } IndexSubset * IndexSubset::get(ASTContext &ctx, const SmallBitVector &indices) { auto &foldingSet = ctx.getImpl().IndexSubsets; llvm::FoldingSetNodeID id; unsigned capacity = indices.size(); id.AddInteger(capacity); for (unsigned index : indices.set_bits()) id.AddInteger(index); void *insertPos = nullptr; auto *existing = foldingSet.FindNodeOrInsertPos(id, insertPos); if (existing) return existing; auto sizeToAlloc = sizeof(IndexSubset) + getNumBytesNeededForCapacity(capacity); auto *buf = reinterpret_cast<IndexSubset *>( ctx.Allocate(sizeToAlloc, alignof(IndexSubset))); auto *newNode = new (buf) IndexSubset(indices); foldingSet.InsertNode(newNode, insertPos); return newNode; } AutoDiffDerivativeFunctionIdentifier *AutoDiffDerivativeFunctionIdentifier::get( AutoDiffDerivativeFunctionKind kind, IndexSubset *parameterIndices, GenericSignature derivativeGenericSignature, ASTContext &C) { assert(parameterIndices); auto &foldingSet = C.getImpl().AutoDiffDerivativeFunctionIdentifiers; llvm::FoldingSetNodeID id; id.AddInteger((unsigned)kind); id.AddPointer(parameterIndices); auto derivativeCanGenSig = derivativeGenericSignature.getCanonicalSignature(); id.AddPointer(derivativeCanGenSig.getPointer()); void *insertPos; auto *existing = foldingSet.FindNodeOrInsertPos(id, insertPos); if (existing) return existing; void *mem = C.Allocate(sizeof(AutoDiffDerivativeFunctionIdentifier), alignof(AutoDiffDerivativeFunctionIdentifier)); auto *newNode = ::new (mem) AutoDiffDerivativeFunctionIdentifier( kind, parameterIndices, derivativeGenericSignature); foldingSet.InsertNode(newNode, insertPos); return newNode; } llvm::LLVMContext &ASTContext::getIntrinsicScratchContext() const { return *getImpl().IntrinsicScratchContext.get(); } bool ASTContext::isASCIIString(StringRef s) const { for (unsigned char c : s) { if (c > 127) { return false; } } return true; } clang::DarwinSDKInfo *ASTContext::getDarwinSDKInfo() const { if (!getImpl().SDKInfo) { auto SDKInfoOrErr = clang::parseDarwinSDKInfo(*SourceMgr.getFileSystem(), SearchPathOpts.SDKPath); if (!SDKInfoOrErr) { llvm::handleAllErrors(SDKInfoOrErr.takeError(), [](const llvm::ErrorInfoBase &) { // Ignore the error for now.. }); getImpl().SDKInfo.emplace(); } else if (!*SDKInfoOrErr) { getImpl().SDKInfo.emplace(); } else { getImpl().SDKInfo.emplace( std::make_unique<clang::DarwinSDKInfo>(**SDKInfoOrErr)); } } return getImpl().SDKInfo->get(); } /// The special Builtin.TheTupleType, which parents tuple extensions and /// conformances. BuiltinTupleDecl *ASTContext::getBuiltinTupleDecl() { auto &result = getImpl().TheTupleTypeDecl; if (result) return result; auto *dc = &TheBuiltinModule->getMainFile(FileUnitKind::Builtin); result = new (*this) BuiltinTupleDecl(Id_TheTupleType, dc); result->setAccess(AccessLevel::Public); // Avoid going through InferredGenericSignatureRequest and directly set the // generic signature to <each Element> { GenericParamList *list = result->getGenericParams(); assert(list->size() == 1); auto paramTy = (*list->begin())->getDeclaredInterfaceType() ->castTo<GenericTypeParamType>(); auto baseSig = GenericSignature::get({paramTy}, {}); result->setGenericSignature(baseSig); } // Cook up conditional conformances to Sendable and Copyable. auto buildFakeExtension = [&](ProtocolDecl *proto) { auto protoTy = proto->getDeclaredInterfaceType(); // extension Builtin.TheTupleType: P { ... } SmallVector<InheritedEntry, 1> inherited; inherited.emplace_back(TypeLoc::withoutLoc(protoTy)); auto *ext = ExtensionDecl::create(*this, SourceLoc(), nullptr, AllocateCopy(inherited), dc, nullptr); // <each T where repeat each T: P> auto genericSig = result->getGenericSignature(); auto params = genericSig.getGenericParams(); assert(params.size() == 1); Requirement req(RequirementKind::Conformance, params[0], protoTy); genericSig = GenericSignature::get(params, req); ext->setGenericSignature(genericSig); // Bind the extension. evaluator.cacheOutput(ExtendedTypeRequest{ext}, result->getDeclaredInterfaceType()); ext->setExtendedNominal(result); result->addExtension(ext); }; if (auto *proto = getProtocol(KnownProtocolKind::Sendable)) buildFakeExtension(proto); if (auto *proto = getProtocol(KnownProtocolKind::Copyable)) buildFakeExtension(proto); if (auto *proto = getProtocol(KnownProtocolKind::Escapable)) buildFakeExtension(proto); if (auto *proto = getProtocol(KnownProtocolKind::BitwiseCopyable)) buildFakeExtension(proto); return result; } /// The declared interface type of Builtin.TheTupleType. BuiltinTupleType *ASTContext::getBuiltinTupleType() { auto &result = getImpl().TheTupleType; if (result) return result; result = new (*this) BuiltinTupleType(getBuiltinTupleDecl(), *this); return result; } void ASTContext::setPluginLoader(std::unique_ptr<PluginLoader> loader) { getImpl().Plugins = std::move(loader); } PluginLoader &ASTContext::getPluginLoader() { assert(getImpl().Plugins && "PluginLoader must be setup before using"); return *getImpl().Plugins; } Type ASTContext::getNamedSwiftType(ModuleDecl *module, StringRef name) { if (!module) return Type(); // Look for the type. Identifier identifier = getIdentifier(name); SmallVector<ValueDecl *, 2> results; // Check if the lookup we're about to perform a lookup within is // a Clang module. for (auto *file : module->getFiles()) { if (auto clangUnit = dyn_cast<ClangModuleUnit>(file)) { // If we have an overlay, look in the overlay. Otherwise, skip // the lookup to avoid infinite recursion. if (auto module = clangUnit->getOverlayModule()) module->lookupValue(identifier, NLKind::UnqualifiedLookup, results); } else { file->lookupValue(identifier, NLKind::UnqualifiedLookup, { }, results); } } if (results.size() != 1) return Type(); auto decl = dyn_cast<TypeDecl>(results.front()); if (!decl) return Type(); assert(!decl->hasClangNode() && "picked up the original type?"); if (auto *nominalDecl = dyn_cast<NominalTypeDecl>(decl)) return nominalDecl->getDeclaredType(); return decl->getDeclaredInterfaceType(); } /// Map a `ValueOwnership` to the corresponding ABI-stable constant used by /// runtime metadata. ParameterOwnership swift::asParameterOwnership(ValueOwnership o) { switch (o) { case ValueOwnership::Default: return ParameterOwnership::Default; case ValueOwnership::Shared: return ParameterOwnership::Shared; case ValueOwnership::InOut: return ParameterOwnership::InOut; case ValueOwnership::Owned: return ParameterOwnership::Owned; } llvm_unreachable("exhaustive switch"); } ValueOwnership swift::asValueOwnership(ParameterOwnership o) { switch (o) { case ParameterOwnership::Default: return ValueOwnership::Default; case ParameterOwnership::Shared: return ValueOwnership::Shared; case ParameterOwnership::InOut: return ValueOwnership::InOut; case ParameterOwnership::Owned: return ValueOwnership::Owned; } llvm_unreachable("exhaustive switch"); } AvailabilityDomain ASTContext::getTargetAvailabilityDomain() const { auto platform = swift::targetPlatform(LangOpts); if (platform != PlatformKind::none) return AvailabilityDomain::forPlatform(platform); // Fall back to the universal domain for triples without a platform. return AvailabilityDomain::forUniversal(); } GenericSignature & ASTContext::getCachedBuiltinGenericTypeSignature(TypeKind kind) { ASSERT(kind >= TypeKind::First_BuiltinGenericType && kind <= TypeKind::Last_BuiltinGenericType && "not a builtin generic type kind"); return getImpl().BuiltinGenericTypeSignatures [unsigned(kind) - unsigned(TypeKind::First_BuiltinGenericType)]; }
cpp
github
https://github.com/apple/swift
lib/AST/ASTContext.cpp
# /* Copyright (C) 2001 # * Housemarque Oy # * http://www.housemarque.com # * # * Distributed under the Boost Software License, Version 1.0. (See # * accompanying file LICENSE_1_0.txt or copy at # * http://www.boost.org/LICENSE_1_0.txt) # */ # # /* Revised by Paul Mensonides (2002) */ # # /* See http://www.boost.org/libs/preprocessor for documentation. */ # # ifndef BOOST_PREPROCESSOR_HPP # define BOOST_PREPROCESSOR_HPP # # include <boost/preprocessor/library.hpp> # # endif
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/preprocessor.hpp
apiVersion: v1 kind: Service metadata: name: prune-svc labels: prune-group: "true" spec: selector: prune-group-nomatch: "true" ports: - port: 80 protocol: TCP
unknown
github
https://github.com/kubernetes/kubernetes
hack/testdata/prune/svc.yaml
# Copyright (C) 2013 Project Hatohol # # This file is part of Hatohol. # # Hatohol is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License, version 3 # as published by the Free Software Foundation. # # Hatohol is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with Hatohol. If not, see # <http://www.gnu.org/licenses/>. # Django settings for hatohol project. import os import logging PROJECT_HOME = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'hatohol_client', 'USER': 'hatohol', 'PASSWORD': 'hatohol', 'HOST': '', # Set to empty string for localhost. 'PORT': '', # Set to empty string for default. } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en//ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_HOME, 'static'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '9xkobcf%(rj(u54^zf7+-c^@+59c9dqg&amp;he2ue65v88z&amp;dyy_k' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.locale.LocaleMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'hatohol.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'hatohol.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or # "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. PROJECT_HOME, ) INSTALLED_APPS = ( # 'django.contrib.auth', # 'django.contrib.contenttypes', # 'django.contrib.sessions', # 'django.contrib.sites', # 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'hatohol', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { 'level': 'WARNING', 'class': 'logging.StreamHandler', }, 'syslog': { 'level': 'INFO', 'class': 'logging.handlers.SysLogHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'hatohol': { 'handlers': ['syslog', 'console'], 'level': 'INFO', 'propagate': True, }, 'viewer': { 'handlers': ['syslog', 'console'], 'level': 'INFO', 'propagate': True, } } } LOCALE_PATHS = ( os.path.join(PROJECT_HOME, "conf", "locale"), )
unknown
codeparrot/codeparrot-clean
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Nova Auth Middleware. """ import webob.dec import webob.exc from nova import context from nova import flags from nova import wsgi FLAGS = flags.FLAGS flags.DECLARE('use_forwarded_for', 'nova.api.auth') class NovaKeystoneContext(wsgi.Middleware): """Make a request context from keystone headers""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: user_id = req.headers['X_USER'] except KeyError: return webob.exc.HTTPUnauthorized() # get the roles roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] project_id = req.headers['X_TENANT'] # Get the auth token auth_token = req.headers.get('X_AUTH_TOKEN', req.headers.get('X_STORAGE_TOKEN')) # Build a context, including the auth_token... remote_address = getattr(req, 'remote_address', '127.0.0.1') remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, roles=roles, auth_token=auth_token, strategy='keystone', remote_address=remote_address) req.environ['nova.context'] = ctx return self.application
unknown
codeparrot/codeparrot-clean
#from flam import * from aspects import * from features import * from elixir import * class Page(Entity): name = Field(Unicode(128), primary_key=True) text = Field(Text, required=True) author = ManyToOne('Author', required=True) attachments = OneToMany('Attachment') class Attachment(Entity): filename = Field(Unicode(255), primary_key=True) data = Field(Binary) page = ManyToOne('Page', primary_key=True) class Author(Entity): username = Field(Unicode(128), primary_key=True) password = Field(Password(128)) pages = OneToMany('Page') # EntityLogic abstracts "public" operations on a base Entity. This allows # the business logic to be separated from the controller/veiw. EntityLogic # itself provides the basic CRUD operations. class AuthorLogic(EntityLogic): entity = Author def authenticate(self, username, password): return self.read(username).password == password # Provide a controller on top of Logic. class AuthorHTMLController(LogicController): logic = AuthorLogic class LoginForm(Form): # Ensures that "username" is a key for Author user = Validate(AuthorLogic) password = Validate(Password(128)) @route('/login', endpoint='login') @form(LoginForm) def login(self, req, form, user=None, password=None): if user.password == password: req.session['authenticated'] = 1 return RedirectResponse(endpoint='root') return TemplateResponse('login.html', **form.data) @route('/logout', endpoint='logout') def logout(req): req.session['authenticated'] = 0 return RedirectResponse(endpoint='root') def setup(app): # Setup JSON handlers under /index.json and /wiki/<page>.json. app.expose(JSONController(Page, '/index.json', '/<name>')) # Setup HTML views and forms under /wiki/<page>. If page_<op>.html templates # are available they will be used, otherwise default views will be rendered. app.expose(HTMLController(Page, '[<name>]')) # SA filters are built up from the free variables in the route, parsed from # right to left. If a name is a column on the model it is filtered on. If # the name is a relationship, the relationships primary key is looked up # and the related object becomes the focus model. These two steps are # repeated for all free variables. app.expose(JSONController(Attachment, '<page>/attachment/[<filename>]')) app.expose(AuthorHTMLController()) app.expose(logout)
unknown
codeparrot/codeparrot-clean
"""Common utilities for Numba operations""" from __future__ import annotations import inspect import types from typing import TYPE_CHECKING import numpy as np from pandas.compat._optional import import_optional_dependency from pandas.errors import NumbaUtilError if TYPE_CHECKING: from collections.abc import Callable GLOBAL_USE_NUMBA: bool = False def maybe_use_numba(engine: str | None) -> bool: """Signal whether to use numba routines.""" return engine == "numba" or (engine is None and GLOBAL_USE_NUMBA) def set_use_numba(enable: bool = False) -> None: global GLOBAL_USE_NUMBA if enable: import_optional_dependency("numba") GLOBAL_USE_NUMBA = enable def get_jit_arguments(engine_kwargs: dict[str, bool] | None = None) -> dict[str, bool]: """ Return arguments to pass to numba.JIT, falling back on pandas default JIT settings. Parameters ---------- engine_kwargs : dict, default None user passed keyword arguments for numba.JIT Returns ------- dict[str, bool] nopython, nogil, parallel Raises ------ NumbaUtilError """ if engine_kwargs is None: engine_kwargs = {} nopython = engine_kwargs.get("nopython", True) nogil = engine_kwargs.get("nogil", False) parallel = engine_kwargs.get("parallel", False) return {"nopython": nopython, "nogil": nogil, "parallel": parallel} def jit_user_function(func: Callable) -> Callable: """ If user function is not jitted already, mark the user's function as jitable. Parameters ---------- func : function user defined function Returns ------- function Numba JITed function, or function marked as JITable by numba """ if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") if numba.extending.is_jitted(func): # Don't jit a user passed jitted function numba_func = func elif getattr(np, func.__name__, False) is func or isinstance( func, types.BuiltinFunctionType ): # Not necessary to jit builtins or np functions # This will mess up register_jitable numba_func = func else: numba_func = numba.extending.register_jitable(func) return numba_func _sentinel = object() def prepare_function_arguments( func: Callable, args: tuple, kwargs: dict, *, num_required_args: int ) -> tuple[tuple, dict]: """ Prepare arguments for jitted function. As numba functions do not support kwargs, we try to move kwargs into args if possible. Parameters ---------- func : function User defined function args : tuple User input positional arguments kwargs : dict User input keyword arguments num_required_args : int The number of leading positional arguments we will pass to udf. These are not supplied by the user. e.g. for groupby we require "values", "index" as the first two arguments: `numba_func(group, group_index, *args)`, in this case num_required_args=2. See :func:`pandas.core.groupby.numba_.generate_numba_agg_func` Returns ------- tuple[tuple, dict] args, kwargs """ if not kwargs: return args, kwargs # the udf should have this pattern: def udf(arg1, arg2, ..., *args, **kwargs):... signature = inspect.signature(func) arguments = signature.bind(*[_sentinel] * num_required_args, *args, **kwargs) arguments.apply_defaults() # Ref: https://peps.python.org/pep-0362/ # Arguments which could be passed as part of either *args or **kwargs # will be included only in the BoundArguments.args attribute. args = arguments.args kwargs = arguments.kwargs if kwargs: # Note: in case numba supports keyword-only arguments in # a future version, we should remove this check. But this # seems unlikely to happen soon. raise NumbaUtilError( "numba does not support keyword-only arguments" "https://github.com/numba/numba/issues/2916, " "https://github.com/numba/numba/issues/6846" ) args = args[num_required_args:] return args, kwargs
python
github
https://github.com/pandas-dev/pandas
pandas/core/util/numba_.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import itertools import uuid from django import http from mox import IsA # noqa from novaclient.v1_1 import floating_ip_pools from openstack_dashboard import api from openstack_dashboard.test import helpers as test class NetworkClientTestCase(test.APITestCase): def test_networkclient_no_neutron(self): self.mox.StubOutWithMock(api.base, 'is_service_enabled') api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \ .AndReturn(False) self.mox.ReplayAll() nc = api.network.NetworkClient(self.request) self.assertIsInstance(nc.floating_ips, api.nova.FloatingIpManager) self.assertIsInstance(nc.secgroups, api.nova.SecurityGroupManager) def test_networkclient_neutron(self): self.mox.StubOutWithMock(api.base, 'is_service_enabled') api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \ .AndReturn(True) self.neutronclient = self.stub_neutronclient() self.neutronclient.list_extensions() \ .AndReturn({'extensions': self.api_extensions.list()}) self.mox.ReplayAll() nc = api.network.NetworkClient(self.request) self.assertIsInstance(nc.floating_ips, api.neutron.FloatingIpManager) self.assertIsInstance(nc.secgroups, api.neutron.SecurityGroupManager) def test_networkclient_neutron_with_nova_security_group(self): self.mox.StubOutWithMock(api.base, 'is_service_enabled') api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \ .AndReturn(True) self.neutronclient = self.stub_neutronclient() self.neutronclient.list_extensions().AndReturn({'extensions': []}) self.mox.ReplayAll() nc = api.network.NetworkClient(self.request) self.assertIsInstance(nc.floating_ips, api.neutron.FloatingIpManager) self.assertIsInstance(nc.secgroups, api.nova.SecurityGroupManager) class NetworkApiNovaTestBase(test.APITestCase): def setUp(self): super(NetworkApiNovaTestBase, self).setUp() self.mox.StubOutWithMock(api.base, 'is_service_enabled') api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \ .AndReturn(False) class NetworkApiNovaSecurityGroupTests(NetworkApiNovaTestBase): def test_server_update_security_groups(self): all_secgroups = self.security_groups.list() added_secgroup = all_secgroups[2] rm_secgroup = all_secgroups[0] cur_secgroups_raw = [{'id': sg.id, 'name': sg.name, 'rules': []} for sg in all_secgroups[0:2]] cur_secgroups_ret = {'security_groups': cur_secgroups_raw} new_sg_ids = [sg.id for sg in all_secgroups[1:3]] instance_id = self.servers.first().id novaclient = self.stub_novaclient() novaclient.security_groups = self.mox.CreateMockAnything() novaclient.servers = self.mox.CreateMockAnything() novaclient.client = self.mox.CreateMockAnything() novaclient.security_groups.list().AndReturn(all_secgroups) url = '/servers/%s/os-security-groups' % instance_id novaclient.client.get(url).AndReturn((200, cur_secgroups_ret)) novaclient.servers.add_security_group(instance_id, added_secgroup.name) novaclient.servers.remove_security_group(instance_id, rm_secgroup.name) self.mox.ReplayAll() api.network.server_update_security_groups( self.request, instance_id, new_sg_ids) class NetworkApiNovaFloatingIpTests(NetworkApiNovaTestBase): def test_floating_ip_pools_list(self): pool_names = ['pool1', 'pool2'] pools = [floating_ip_pools.FloatingIPPool( None, {'name': pool}) for pool in pool_names] novaclient = self.stub_novaclient() novaclient.floating_ip_pools = self.mox.CreateMockAnything() novaclient.floating_ip_pools.list().AndReturn(pools) self.mox.ReplayAll() ret = api.network.floating_ip_pools_list(self.request) self.assertEqual([p.name for p in ret], pool_names) def test_floating_ip_list(self): fips = self.api_floating_ips.list() novaclient = self.stub_novaclient() novaclient.floating_ips = self.mox.CreateMockAnything() novaclient.floating_ips.list().AndReturn(fips) self.mox.ReplayAll() ret = api.network.tenant_floating_ip_list(self.request) for r, e in zip(ret, fips): for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']: self.assertEqual(r.__getattr__(attr), e.__getattr__(attr)) self.assertEqual(r.port_id, e.instance_id) def test_floating_ip_get(self): fip = self.api_floating_ips.first() novaclient = self.stub_novaclient() novaclient.floating_ips = self.mox.CreateMockAnything() novaclient.floating_ips.get(fip.id).AndReturn(fip) self.mox.ReplayAll() ret = api.network.tenant_floating_ip_get(self.request, fip.id) for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']: self.assertEqual(ret.__getattr__(attr), fip.__getattr__(attr)) self.assertEqual(ret.port_id, fip.instance_id) def test_floating_ip_allocate(self): pool_name = 'fip_pool' fip = self.api_floating_ips.first() novaclient = self.stub_novaclient() novaclient.floating_ips = self.mox.CreateMockAnything() novaclient.floating_ips.create(pool=pool_name).AndReturn(fip) self.mox.ReplayAll() ret = api.network.tenant_floating_ip_allocate(self.request, pool_name) for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']: self.assertEqual(ret.__getattr__(attr), fip.__getattr__(attr)) self.assertEqual(ret.port_id, fip.instance_id) def test_floating_ip_release(self): fip = self.api_floating_ips.first() novaclient = self.stub_novaclient() novaclient.floating_ips = self.mox.CreateMockAnything() novaclient.floating_ips.delete(fip.id) self.mox.ReplayAll() api.network.tenant_floating_ip_release(self.request, fip.id) def test_floating_ip_associate(self): server = api.nova.Server(self.servers.first(), self.request) floating_ip = self.floating_ips.first() novaclient = self.stub_novaclient() novaclient.floating_ips = self.mox.CreateMockAnything() novaclient.servers = self.mox.CreateMockAnything() novaclient.servers.get(server.id).AndReturn(server) novaclient.floating_ips.get(floating_ip.id).AndReturn(floating_ip) novaclient.servers.add_floating_ip(server.id, floating_ip.ip) \ .AndReturn(server) self.mox.ReplayAll() api.network.floating_ip_associate(self.request, floating_ip.id, server.id) def test_floating_ip_disassociate(self): server = api.nova.Server(self.servers.first(), self.request) floating_ip = self.api_floating_ips.first() novaclient = self.stub_novaclient() novaclient.servers = self.mox.CreateMockAnything() novaclient.floating_ips = self.mox.CreateMockAnything() novaclient.servers.get(server.id).AndReturn(server) novaclient.floating_ips.get(floating_ip.id).AndReturn(floating_ip) novaclient.servers.remove_floating_ip(server.id, floating_ip.ip) \ .AndReturn(server) self.mox.ReplayAll() api.network.floating_ip_disassociate(self.request, floating_ip.id, server.id) def test_floating_ip_target_list(self): servers = self.servers.list() novaclient = self.stub_novaclient() novaclient.servers = self.mox.CreateMockAnything() novaclient.servers.list().AndReturn(servers) self.mox.ReplayAll() targets = api.network.floating_ip_target_list(self.request) for target, server in zip(targets, servers): self.assertEqual(target.id, server.id) self.assertEqual(target.name, '%s (%s)' % (server.name, server.id)) def test_floating_ip_target_get_by_instance(self): self.mox.ReplayAll() instance_id = self.servers.first().id ret = api.network.floating_ip_target_get_by_instance(self.request, instance_id) self.assertEqual(instance_id, ret) class NetworkApiNeutronTestBase(test.APITestCase): def setUp(self): super(NetworkApiNeutronTestBase, self).setUp() self.mox.StubOutWithMock(api.base, 'is_service_enabled') api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \ .AndReturn(True) self.qclient = self.stub_neutronclient() self.qclient.list_extensions() \ .AndReturn({'extensions': self.api_extensions.list()}) class NetworkApiNeutronSecurityGroupTests(NetworkApiNeutronTestBase): def setUp(self): super(NetworkApiNeutronSecurityGroupTests, self).setUp() self.sg_dict = dict([(sg['id'], sg['name']) for sg in self.api_q_secgroups.list()]) def _cmp_sg_rule(self, exprule, retrule): self.assertEqual(exprule['id'], retrule.id) self.assertEqual(exprule['security_group_id'], retrule.parent_group_id) self.assertEqual(exprule['direction'], retrule.direction) self.assertEqual(exprule['ethertype'], retrule.ethertype) self.assertEqual(exprule['port_range_min'], retrule.from_port) self.assertEqual(exprule['port_range_max'], retrule.to_port) if (exprule['remote_ip_prefix'] is None and exprule['remote_group_id'] is None): expcidr = ('::/0' if exprule['ethertype'] == 'IPv6' else '0.0.0.0/0') else: expcidr = exprule['remote_ip_prefix'] self.assertEqual(expcidr, retrule.ip_range.get('cidr')) self.assertEqual(self.sg_dict.get(exprule['remote_group_id']), retrule.group.get('name')) def _cmp_sg(self, exp_sg, ret_sg): self.assertEqual(exp_sg['id'], ret_sg.id) self.assertEqual(exp_sg['name'], ret_sg.name) exp_rules = exp_sg['security_group_rules'] self.assertEqual(len(exp_rules), len(ret_sg.rules)) for (exprule, retrule) in itertools.izip(exp_rules, ret_sg.rules): self._cmp_sg_rule(exprule, retrule) def test_security_group_list(self): sgs = self.api_q_secgroups.list() tenant_id = self.request.user.tenant_id # use deepcopy to ensure self.api_q_secgroups is not modified. self.qclient.list_security_groups(tenant_id=tenant_id) \ .AndReturn({'security_groups': copy.deepcopy(sgs)}) self.mox.ReplayAll() rets = api.network.security_group_list(self.request) self.assertEqual(len(sgs), len(rets)) for (exp, ret) in itertools.izip(sgs, rets): self._cmp_sg(exp, ret) def test_security_group_get(self): secgroup = self.api_q_secgroups.first() sg_ids = set([secgroup['id']] + [rule['remote_group_id'] for rule in secgroup['security_group_rules'] if rule['remote_group_id']]) related_sgs = [sg for sg in self.api_q_secgroups.list() if sg['id'] in sg_ids] # use deepcopy to ensure self.api_q_secgroups is not modified. self.qclient.show_security_group(secgroup['id']) \ .AndReturn({'security_group': copy.deepcopy(secgroup)}) self.qclient.list_security_groups(id=sg_ids, fields=['id', 'name']) \ .AndReturn({'security_groups': related_sgs}) self.mox.ReplayAll() ret = api.network.security_group_get(self.request, secgroup['id']) self._cmp_sg(secgroup, ret) def test_security_group_create(self): secgroup = self.api_q_secgroups.list()[1] body = {'security_group': {'name': secgroup['name'], 'description': secgroup['description']}} self.qclient.create_security_group(body) \ .AndReturn({'security_group': copy.deepcopy(secgroup)}) self.mox.ReplayAll() ret = api.network.security_group_create(self.request, secgroup['name'], secgroup['description']) self._cmp_sg(secgroup, ret) def test_security_group_update(self): secgroup = self.api_q_secgroups.list()[1] secgroup = copy.deepcopy(secgroup) secgroup['name'] = 'newname' secgroup['description'] = 'new description' body = {'security_group': {'name': secgroup['name'], 'description': secgroup['description']}} self.qclient.update_security_group(secgroup['id'], body) \ .AndReturn({'security_group': secgroup}) self.mox.ReplayAll() ret = api.network.security_group_update(self.request, secgroup['id'], secgroup['name'], secgroup['description']) self._cmp_sg(secgroup, ret) def test_security_group_delete(self): secgroup = self.api_q_secgroups.first() self.qclient.delete_security_group(secgroup['id']) self.mox.ReplayAll() api.network.security_group_delete(self.request, secgroup['id']) def test_security_group_rule_create(self): sg_rule = [r for r in self.api_q_secgroup_rules.list() if r['protocol'] == 'tcp' and r['remote_ip_prefix']][0] sg_id = sg_rule['security_group_id'] secgroup = [sg for sg in self.api_q_secgroups.list() if sg['id'] == sg_id][0] post_rule = copy.deepcopy(sg_rule) del post_rule['id'] del post_rule['tenant_id'] post_body = {'security_group_rule': post_rule} self.qclient.create_security_group_rule(post_body) \ .AndReturn({'security_group_rule': copy.deepcopy(sg_rule)}) self.qclient.list_security_groups(id=set([sg_id]), fields=['id', 'name']) \ .AndReturn({'security_groups': [copy.deepcopy(secgroup)]}) self.mox.ReplayAll() ret = api.network.security_group_rule_create( self.request, sg_rule['security_group_id'], sg_rule['direction'], sg_rule['ethertype'], sg_rule['protocol'], sg_rule['port_range_min'], sg_rule['port_range_max'], sg_rule['remote_ip_prefix'], sg_rule['remote_group_id']) self._cmp_sg_rule(sg_rule, ret) def test_security_group_rule_delete(self): sg_rule = self.api_q_secgroup_rules.first() self.qclient.delete_security_group_rule(sg_rule['id']) self.mox.ReplayAll() api.network.security_group_rule_delete(self.request, sg_rule['id']) def _get_instance(self, cur_sg_ids): instance_port = [p for p in self.api_ports.list() if p['device_owner'].startswith('compute:')][0] instance_id = instance_port['device_id'] # Emulate an intance with two ports instance_ports = [] for _i in range(2): p = copy.deepcopy(instance_port) p['id'] = str(uuid.uuid4()) p['security_groups'] = cur_sg_ids instance_ports.append(p) return (instance_id, instance_ports) def test_server_security_groups(self): cur_sg_ids = [sg['id'] for sg in self.api_q_secgroups.list()[:2]] instance_id, instance_ports = self._get_instance(cur_sg_ids) self.qclient.list_ports(device_id=instance_id) \ .AndReturn({'ports': instance_ports}) secgroups = copy.deepcopy(self.api_q_secgroups.list()) self.qclient.list_security_groups(id=set(cur_sg_ids)) \ .AndReturn({'security_groups': secgroups}) self.mox.ReplayAll() api.network.server_security_groups(self.request, instance_id) def test_server_update_security_groups(self): cur_sg_ids = [self.api_q_secgroups.first()['id']] new_sg_ids = [sg['id'] for sg in self.api_q_secgroups.list()[:2]] instance_id, instance_ports = self._get_instance(cur_sg_ids) self.qclient.list_ports(device_id=instance_id) \ .AndReturn({'ports': instance_ports}) for p in instance_ports: body = {'port': {'security_groups': new_sg_ids}} self.qclient.update_port(p['id'], body=body).AndReturn({'port': p}) self.mox.ReplayAll() api.network.server_update_security_groups( self.request, instance_id, new_sg_ids) def test_security_group_backend(self): self.mox.ReplayAll() self.assertEqual(api.network.security_group_backend(self.request), 'neutron') class NetworkApiNeutronFloatingIpTests(NetworkApiNeutronTestBase): def test_floating_ip_pools_list(self): search_opts = {'router:external': True} ext_nets = [n for n in self.api_networks.list() if n['router:external']] self.qclient.list_networks(**search_opts) \ .AndReturn({'networks': ext_nets}) self.mox.ReplayAll() rets = api.network.floating_ip_pools_list(self.request) for attr in ['id', 'name']: self.assertEqual([p.__getattr__(attr) for p in rets], [p[attr] for p in ext_nets]) def test_floating_ip_list(self): fips = self.api_q_floating_ips.list() filters = {'tenant_id': self.request.user.tenant_id} self.qclient.list_floatingips(**filters) \ .AndReturn({'floatingips': fips}) self.qclient.list_ports(**filters) \ .AndReturn({'ports': self.api_ports.list()}) self.mox.ReplayAll() rets = api.network.tenant_floating_ip_list(self.request) assoc_port = self.api_ports.list()[1] self.assertEqual(len(fips), len(rets)) for ret, exp in zip(rets, fips): for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']: self.assertEqual(ret.__getattr__(attr), exp[attr]) if exp['port_id']: dev_id = assoc_port['device_id'] if exp['port_id'] else None self.assertEqual(ret.instance_id, dev_id) def test_floating_ip_get_associated(self): fip = self.api_q_floating_ips.list()[1] assoc_port = self.api_ports.list()[1] self.qclient.show_floatingip(fip['id']).AndReturn({'floatingip': fip}) self.qclient.show_port(assoc_port['id']) \ .AndReturn({'port': assoc_port}) self.mox.ReplayAll() ret = api.network.tenant_floating_ip_get(self.request, fip['id']) for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']: self.assertEqual(ret.__getattr__(attr), fip[attr]) self.assertEqual(ret.instance_id, assoc_port['device_id']) def test_floating_ip_get_unassociated(self): fip = self.api_q_floating_ips.list()[0] self.qclient.show_floatingip(fip['id']).AndReturn({'floatingip': fip}) self.mox.ReplayAll() ret = api.network.tenant_floating_ip_get(self.request, fip['id']) for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']: self.assertEqual(ret.__getattr__(attr), fip[attr]) self.assertEqual(ret.instance_id, None) def test_floating_ip_allocate(self): ext_nets = [n for n in self.api_networks.list() if n['router:external']] ext_net = ext_nets[0] fip = self.api_q_floating_ips.first() self.qclient.create_floatingip( {'floatingip': {'floating_network_id': ext_net['id']}}) \ .AndReturn({'floatingip': fip}) self.mox.ReplayAll() ret = api.network.tenant_floating_ip_allocate(self.request, ext_net['id']) for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']: self.assertEqual(ret.__getattr__(attr), fip[attr]) self.assertEqual(ret.instance_id, None) def test_floating_ip_release(self): fip = self.api_q_floating_ips.first() self.qclient.delete_floatingip(fip['id']) self.mox.ReplayAll() api.network.tenant_floating_ip_release(self.request, fip['id']) def test_floating_ip_associate(self): fip = self.api_q_floating_ips.list()[1] assoc_port = self.api_ports.list()[1] ip_address = assoc_port['fixed_ips'][0]['ip_address'] target_id = '%s_%s' % (assoc_port['id'], ip_address) params = {'port_id': assoc_port['id'], 'fixed_ip_address': ip_address} self.qclient.update_floatingip(fip['id'], {'floatingip': params}) self.mox.ReplayAll() api.network.floating_ip_associate(self.request, fip['id'], target_id) def test_floating_ip_disassociate(self): fip = self.api_q_floating_ips.list()[1] assoc_port = self.api_ports.list()[1] ip_address = assoc_port['fixed_ips'][0]['ip_address'] target_id = '%s_%s' % (assoc_port['id'], ip_address) self.qclient.update_floatingip(fip['id'], {'floatingip': {'port_id': None}}) self.mox.ReplayAll() api.network.floating_ip_disassociate(self.request, fip['id'], target_id) def _get_target_id(self, port): param = {'id': port['id'], 'addr': port['fixed_ips'][0]['ip_address']} return '%(id)s_%(addr)s' % param def _get_target_name(self, port): param = {'svrid': port['device_id'], 'addr': port['fixed_ips'][0]['ip_address']} return 'server_%(svrid)s: %(addr)s' % param def test_floating_ip_target_list(self): ports = self.api_ports.list() target_ports = [(self._get_target_id(p), self._get_target_name(p)) for p in ports if not p['device_owner'].startswith('network:')] filters = {'tenant_id': self.request.user.tenant_id} self.qclient.list_ports(**filters).AndReturn({'ports': ports}) servers = self.servers.list() novaclient = self.stub_novaclient() novaclient.servers = self.mox.CreateMockAnything() search_opts = {'project_id': self.request.user.tenant_id} novaclient.servers.list(True, search_opts).AndReturn(servers) self.mox.ReplayAll() rets = api.network.floating_ip_target_list(self.request) self.assertEqual(len(rets), len(target_ports)) for ret, exp in zip(rets, target_ports): self.assertEqual(ret.id, exp[0]) self.assertEqual(ret.name, exp[1]) def test_floating_ip_target_get_by_instance(self): ports = self.api_ports.list() candidates = [p for p in ports if p['device_id'] == '1'] search_opts = {'device_id': '1'} self.qclient.list_ports(**search_opts).AndReturn({'ports': candidates}) self.mox.ReplayAll() ret = api.network.floating_ip_target_get_by_instance(self.request, '1') self.assertEqual(ret, self._get_target_id(candidates[0])) def test_target_floating_ip_port_by_instance(self): ports = self.api_ports.list() candidates = [p for p in ports if p['device_id'] == '1'] search_opts = {'device_id': '1'} self.qclient.list_ports(**search_opts).AndReturn({'ports': candidates}) self.mox.ReplayAll() ret = api.network.floating_ip_target_list_by_instance(self.request, '1') self.assertEqual(ret[0], self._get_target_id(candidates[0])) self.assertEqual(len(ret), len(candidates))
unknown
codeparrot/codeparrot-clean
import numpy as np import tables as ts import playground.group_rotation.amino_acids as amino import pele.amber.read_amber as ra import playground.group_rotation.chirality as chir import networkx as nx class RotamerGroupTemplate(ts.IsDescription): """ A compound data type for interoperating with ROTAMER_GROUP_TEMPLATE in GMIN. Fortran type definition: TYPE ROTAMER_GROUP_TEMPLATE CHAR(LEN=16) :: GROUP_NAME CHAR(LEN=16) :: RES_NAME CHAR(LEN=16) :: DIHEDRAL_ATOM_NAMES(4) CHAR(LEN=16), ALLOCATABLE :: MOVING_ATOM_NAMES(:) END TYPE ROTAMER_GROUP_TEMPLATE These are designed to be read from HDF5 files, and so this implementation uses the appropriate types from PyTables. As we do not know the length of the moving atoms array a priori, we instead use a CArray (HDF5 compressible dataset) to store moving atom names for each group. The individual row corresponding to a group then contains the name of the CArray with the relevant atom names in it. """ group_name = ts.StringCol(itemsize=16) res_name = ts.StringCol(itemsize=16) dihedral_atom_names = ts.StringCol(itemsize=16, shape=(4)) moving_atoms_carray = ts.StringCol(itemsize=24) # Open a file in "w"rite mode fileh = ts.open_file("amber_rotamer_groups.h5", mode="w") # Get the HDF5 root group root = fileh.root # Create the groups for the templates themselves and one for storing the moving atoms arrays. for groupname in ("RotamerGroupTemplates", "MovingAtomsArrays"): group = fileh.create_group(root, groupname) # Create a filter, telling it to compress with zlib. filters = ts.Filters(complib='zlib') for amino_acid in (amino.amino_acids): table = fileh.create_table("/RotamerGroupTemplates", amino_acid, RotamerGroupTemplate, "Template for {res}".format(res=amino_acid)) # Get the record object associated with the table. group_template = table.row # Read in an appropriate topology file and create a molecular graph. filename = '/scratch/khs26/rotamer_lib_igb2/{res}/{res}/{res}/coords.prmtop'.format(res=amino_acid) topology = ra.read_topology(filename) mol_graph = ra.create_atoms_and_residues(topology) # Get the residue name for the first residue. res = next(residue for residue in mol_graph.residues.nodes() if residue.index == 1) # Get a list of dihedrals we are interested in for this residue. dihedrals = sorted([k[1] for k in amino.def_parameters if k[0] == amino_acid and not ('C' in k[1] and 'CA' in k[1])]) # For each pair of atoms in a dihedral, find their highest-ranked neighbours for defining the dihedral angle. dihedral_atoms = {} dihedral_moving_atoms = {} for atom_pair in dihedrals: atom0 = next(n for n in mol_graph.atoms.nodes() if n.name == atom_pair[0] and n.residue == res) atom1 = next(n for n in mol_graph.atoms.nodes() if n.name == atom_pair[1] and n.residue == res) atom_1 = next(atom for atom in chir.chiral_order(mol_graph.atoms, atom0, depth=2) if atom != atom1) atom2 = next(atom for atom in chir.chiral_order(mol_graph.atoms, atom1, depth=2) if atom != atom0) dihedral_atoms[(atom0.name, atom1.name)] = (atom_1, atom0, atom1, atom2) # Now find the moving atoms by breaking the dihedral bond and choosing the subgraph containing atom1. mol_graph.atoms.remove_edge(atom0, atom1) dihedral_moving_atoms[(atom0.name, atom1.name)] = nx.node_connected_component(mol_graph.atoms, atom1) mol_graph.atoms.add_edge(atom0, atom1) # Loop through the possible dihedral atom pairs for the amino acid. # i is going to form part of the CArray name for i, dihedral in enumerate(dihedrals): moving_atom_names = [atom.name for atom in dihedral_moving_atoms[dihedral]] carray_name = 'carray_{res}_{ind}'.format(res=amino_acid, ind=str(i)) print amino_acid, i, dihedral, moving_atom_names, carray_name ca = fileh.create_carray(root.MovingAtomsArrays, name=carray_name, atom=ts.StringAtom(16), shape=(len(moving_atom_names),), filters=filters) ca[0:] = moving_atom_names group_template['group_name'] = "{res}_{dih0}_{dih1}".format(res=amino_acid, dih0=dihedral[0], dih1=dihedral[1]) group_template['res_name'] = "{res}".format(res=amino_acid) group_template['dihedral_atom_names'] = np.array([x.name for x in dihedral_atoms[dihedral]]) group_template['moving_atoms_carray'] = carray_name # Append this element to the row and move on. group_template.append() # Flush the table buffers table.flush() # Read the records from table "/RotamerGroupTemplates/ARG" and select some table = root.RotamerGroupTemplates.ARG e = [(p['group_name'], p['res_name'], p['dihedral_atom_names'], p['moving_atoms_carray']) for p in table] for elem in e: print("Selected values ==>", elem) print("Carray:", root.MovingAtomsArrays._v_children[elem[-1]][:]) print("Total selected records ==> ", len(e)) # Finally, close the file (this also will flush all the remaining buffers!) fileh.close()
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Tests\Database; use Illuminate\Database\Capsule\Manager as DB; use Illuminate\Database\Eloquent\Model as Eloquent; use Illuminate\Support\Carbon; use PHPUnit\Framework\TestCase; use RuntimeException; class DatabaseEloquentTimestampsTest extends TestCase { protected function setUp(): void { parent::setUp(); $db = new DB; $db->addConnection([ 'driver' => 'sqlite', 'database' => ':memory:', ]); $db->bootEloquent(); $db->setAsGlobal(); $this->createSchema(); } /** * Setup the database schema. * * @return void */ public function createSchema() { $this->schema()->create('users', function ($table) { $table->increments('id'); $table->string('email')->unique(); $table->timestamps(); }); $this->schema()->create('users_created_at', function ($table) { $table->increments('id'); $table->string('email')->unique(); $table->string('created_at'); }); $this->schema()->create('users_updated_at', function ($table) { $table->increments('id'); $table->string('email')->unique(); $table->string('updated_at'); }); } /** * Tear down the database schema. * * @return void */ protected function tearDown(): void { $this->schema()->drop('users'); $this->schema()->drop('users_created_at'); $this->schema()->drop('users_updated_at'); Carbon::setTestNow(null); parent::tearDown(); } /** * Tests... */ public function testUserWithCreatedAtAndUpdatedAt() { Carbon::setTestNow($now = Carbon::now()); $user = UserWithCreatedAndUpdated::create([ 'email' => 'test@test.com', ]); $this->assertEquals($now->toDateTimeString(), $user->created_at->toDateTimeString()); $this->assertEquals($now->toDateTimeString(), $user->updated_at->toDateTimeString()); } public function testUserWithCreatedAt() { Carbon::setTestNow($now = Carbon::now()); $user = UserWithCreated::create([ 'email' => 'test@test.com', ]); $this->assertEquals($now->toDateTimeString(), $user->created_at->toDateTimeString()); } public function testUserWithUpdatedAt() { Carbon::setTestNow($now = Carbon::now()); $user = UserWithUpdated::create([ 'email' => 'test@test.com', ]); $this->assertEquals($now->toDateTimeString(), $user->updated_at->toDateTimeString()); } public function testWithoutTimestamp() { Carbon::setTestNow($now = Carbon::now()->setYear(1995)->startOfYear()); $user = UserWithCreatedAndUpdated::create(['email' => 'foo@example.com']); Carbon::setTestNow(Carbon::now()->addHour()); $this->assertTrue($user->usesTimestamps()); $user->withoutTimestamps(function () use ($user) { $this->assertFalse($user->usesTimestamps()); $user->withoutTimestamps(function () use ($user) { $this->assertFalse($user->usesTimestamps()); }); $this->assertFalse($user->usesTimestamps()); $user->update([ 'email' => 'bar@example.com', ]); }); $this->assertTrue($user->usesTimestamps()); $this->assertTrue($now->equalTo($user->updated_at)); $this->assertSame('bar@example.com', $user->email); } public function testWithoutTimestampWhenAlreadyIgnoringTimestamps() { Carbon::setTestNow($now = Carbon::now()->setYear(1995)->startOfYear()); $user = UserWithCreatedAndUpdated::create(['email' => 'foo@example.com']); Carbon::setTestNow(Carbon::now()->addHour()); $user->timestamps = false; $this->assertFalse($user->usesTimestamps()); $user->withoutTimestamps(function () use ($user) { $this->assertFalse($user->usesTimestamps()); $user->update([ 'email' => 'bar@example.com', ]); }); $this->assertFalse($user->usesTimestamps()); $this->assertTrue($now->equalTo($user->updated_at)); $this->assertSame('bar@example.com', $user->email); } public function testWithoutTimestampRestoresWhenClosureThrowsException() { $user = UserWithCreatedAndUpdated::create(['email' => 'foo@example.com']); $user->timestamps = true; try { $user->withoutTimestamps(function () use ($user) { $this->assertFalse($user->usesTimestamps()); throw new RuntimeException(); }); $this->fail(); } catch (RuntimeException) { // } $this->assertTrue($user->timestamps); } public function testWithoutTimestampsRespectsClasses() { $a = new UserWithCreatedAndUpdated(); $b = new UserWithCreatedAndUpdated(); $z = new UserWithUpdated(); $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); Eloquent::withoutTimestamps(function () use ($a, $b, $z) { $this->assertFalse($a->usesTimestamps()); $this->assertFalse($b->usesTimestamps()); $this->assertFalse($z->usesTimestamps()); $this->assertTrue(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertTrue(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); }); $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); UserWithCreatedAndUpdated::withoutTimestamps(function () use ($a, $b, $z) { $this->assertFalse($a->usesTimestamps()); $this->assertFalse($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertTrue(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); }); $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); UserWithUpdated::withoutTimestamps(function () use ($a, $b, $z) { $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertFalse($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertTrue(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); }); $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); Eloquent::withoutTimestampsOn([], function () use ($a, $b, $z) { $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); }); $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); Eloquent::withoutTimestampsOn([UserWithCreatedAndUpdated::class], function () use ($a, $b, $z) { $this->assertFalse($a->usesTimestamps()); $this->assertFalse($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertTrue(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); }); $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); Eloquent::withoutTimestampsOn([UserWithUpdated::class], function () use ($a, $b, $z) { $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertFalse($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertTrue(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); }); $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); Eloquent::withoutTimestampsOn([UserWithCreatedAndUpdated::class, UserWithUpdated::class], function () use ($a, $b, $z) { $this->assertFalse($a->usesTimestamps()); $this->assertFalse($b->usesTimestamps()); $this->assertFalse($z->usesTimestamps()); $this->assertTrue(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertTrue(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); }); $this->assertTrue($a->usesTimestamps()); $this->assertTrue($b->usesTimestamps()); $this->assertTrue($z->usesTimestamps()); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithCreatedAndUpdated::class)); $this->assertFalse(Eloquent::isIgnoringTimestamps(UserWithUpdated::class)); } /** * Get a database connection instance. * * @return \Illuminate\Database\Connection */ protected function connection() { return Eloquent::getConnectionResolver()->connection(); } /** * Get a schema builder instance. * * @return \Illuminate\Database\Schema\Builder */ protected function schema() { return $this->connection()->getSchemaBuilder(); } } /** * Eloquent Models... */ class UserWithCreatedAndUpdated extends Eloquent { protected $table = 'users'; protected $guarded = []; } class UserWithCreated extends Eloquent { public const UPDATED_AT = null; protected $table = 'users_created_at'; protected $guarded = []; protected $dateFormat = 'U'; } class UserWithUpdated extends Eloquent { public const CREATED_AT = null; protected $table = 'users_updated_at'; protected $guarded = []; protected $dateFormat = 'U'; }
php
github
https://github.com/laravel/framework
tests/Database/DatabaseEloquentTimestampsTest.php
from __future__ import annotations from collections import ( abc, defaultdict, ) import csv from io import StringIO import re from typing import ( IO, TYPE_CHECKING, Any, DefaultDict, Literal, cast, final, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import Scalar from pandas.errors import ( EmptyDataError, ParserError, ParserWarning, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.common import ( is_bool_dtype, is_extension_array_dtype, is_integer, is_numeric_dtype, is_object_dtype, is_string_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( CategoricalDtype, ExtensionDtype, ) from pandas.core.dtypes.inference import is_dict_like from pandas.core import algorithms from pandas.core.arrays import ( Categorical, ExtensionArray, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.indexes.api import Index from pandas.io.common import ( dedup_names, is_potential_multi_index, ) from pandas.io.parsers.base_parser import ( ParserBase, evaluate_callable_usecols, get_na_values, parser_defaults, validate_parse_dates_presence, ) if TYPE_CHECKING: from collections.abc import ( Hashable, Iterator, Mapping, Sequence, ) from pandas._typing import ( ArrayLike, DtypeObj, ReadCsvBuffer, T, ) from pandas import ( MultiIndex, Series, ) # BOM character (byte order mark) # This exists at the beginning of a file to indicate endianness # of a file (stream). Unfortunately, this marker screws up parsing, # so we need to remove it if we see it. _BOM = "\ufeff" class PythonParser(ParserBase): _no_thousands_columns: set[int] def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None: """ Workhorse function for processing nested list into DataFrame """ super().__init__(kwds) self.data: Iterator[list[str]] | list[list[Scalar]] = [] self.buf: list = [] self.pos = 0 self.line_pos = 0 self.skiprows = kwds["skiprows"] if callable(self.skiprows): self.skipfunc = self.skiprows else: self.skipfunc = lambda x: x in self.skiprows self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"]) self.delimiter = kwds["delimiter"] self.quotechar = kwds["quotechar"] if isinstance(self.quotechar, str): self.quotechar = str(self.quotechar) self.escapechar = kwds["escapechar"] self.doublequote = kwds["doublequote"] self.skipinitialspace = kwds["skipinitialspace"] self.lineterminator = kwds["lineterminator"] self.quoting = kwds["quoting"] self.skip_blank_lines = kwds["skip_blank_lines"] # Passed from read_excel self.has_index_names = kwds.get("has_index_names", False) self.thousands = kwds["thousands"] self.decimal = kwds["decimal"] self.comment = kwds["comment"] # Set self.data to something that can read lines. if isinstance(f, list): # read_excel: f is a nested list, can contain non-str self.data = f else: assert hasattr(f, "readline") # yields list of str self.data = self._make_reader(f) # Get columns in two steps: infer from data, then # infer column indices from self.usecols if it is specified. self._col_indices: list[int] | None = None columns: list[list[Scalar | None]] ( columns, self.num_original_columns, self.unnamed_cols, ) = self._infer_columns() # Now self.columns has the set of columns that we will process. # The original set is stored in self.original_columns. # error: Cannot determine type of 'index_names' ( self.columns, self.index_names, self.col_names, _, ) = self._extract_multi_indexer_columns( columns, self.index_names, ) # get popped off for index self.orig_names: list[Hashable] = list(self.columns) index_names, self.orig_names, self.columns = self._get_index_name() if self.index_names is None: self.index_names = index_names if self._col_indices is None: self._col_indices = list(range(len(self.columns))) self._no_thousands_columns = self._set_no_thousand_columns() if len(self.decimal) != 1: raise ValueError("Only length-1 decimal markers supported") @cache_readonly def num(self) -> re.Pattern: decimal = re.escape(self.decimal) if self.thousands is None: regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$" else: thousands = re.escape(self.thousands) regex = ( rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?" rf"([0-9]?(E|e)\-?[0-9]+)?$" ) return re.compile(regex) def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> Iterator[list[str]]: sep = self.delimiter if sep is None or len(sep) == 1: if self.lineterminator: raise ValueError( "Custom line terminators not supported in python parser (yet)" ) class MyDialect(csv.Dialect): delimiter = self.delimiter quotechar = self.quotechar escapechar = self.escapechar doublequote = self.doublequote skipinitialspace = self.skipinitialspace quoting = self.quoting lineterminator = "\n" dia = MyDialect if sep is not None: dia.delimiter = sep # Skip rows at file level before csv.reader sees them # prevents CSV parsing errors on lines that will be discarded if self.skiprows is not None: while self.skipfunc(self.pos): line = f.readline() if not line: break self.pos += 1 else: # attempt to sniff the delimiter from the first valid line, # i.e. no comment line and not in skiprows line = f.readline() lines = self._check_comments([[line]])[0] while self.skipfunc(self.pos) or not lines: self.pos += 1 line = f.readline() lines = self._check_comments([[line]])[0] lines_str = cast(list[str], lines) # since `line` was a string, lines will be a list containing # only a single string line = lines_str[0] self.pos += 1 self.line_pos += 1 sniffed = csv.Sniffer().sniff(line) dia.delimiter = sniffed.delimiter # Note: encoding is irrelevant here line_rdr = csv.reader(StringIO(line), dialect=dia) self.buf.extend(list(line_rdr)) # Note: encoding is irrelevant here reader = csv.reader(f, dialect=dia, strict=True) else: def _read(): line = f.readline() pat = re.compile(sep) yield pat.split(line.strip()) for line in f: yield pat.split(line.strip()) reader = _read() return reader def read( self, rows: int | None = None ) -> tuple[ Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike | Series], ]: try: content = self._get_lines(rows) except StopIteration: if self._first_chunk: content = [] else: self.close() raise # done with first read, next time raise StopIteration self._first_chunk = False index: Index | None columns: Sequence[Hashable] = list(self.orig_names) if not content: # pragma: no cover # DataFrame with the right metadata, even though it's length 0 # error: Cannot determine type of 'index_col' names = dedup_names( self.orig_names, is_potential_multi_index( self.orig_names, self.index_col, ), ) index, columns, col_dict = self._get_empty_meta( names, self.dtype, ) conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names) return index, conv_columns, col_dict # handle new style for names in index indexnamerow = None if self.has_index_names and sum( int(v == "" or v is None) for v in content[0] ) == len(columns): indexnamerow = content[0] content = content[1:] alldata = self._rows_to_cols(content) data, columns = self._exclude_implicit_index(alldata) conv_data = self._convert_data(data) conv_data = self._do_date_conversions(columns, conv_data) index, result_columns = self._make_index(alldata, columns, indexnamerow) return index, result_columns, conv_data def _exclude_implicit_index( self, alldata: list[np.ndarray], ) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]: # error: Cannot determine type of 'index_col' names = dedup_names( self.orig_names, is_potential_multi_index( self.orig_names, self.index_col, ), ) offset = 0 if self._implicit_index: offset = len(self.index_col) len_alldata = len(alldata) self._check_data_length(names, alldata) return { name: alldata[i + offset] for i, name in enumerate(names) if i < len_alldata }, names # legacy def get_chunk( self, size: int | None = None ) -> tuple[ Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike | Series], ]: if size is None: # error: "PythonParser" has no attribute "chunksize" size = self.chunksize # type: ignore[attr-defined] return self.read(rows=size) def _convert_data( self, data: Mapping[Hashable, np.ndarray], ) -> Mapping[Hashable, ArrayLike]: # apply converters clean_conv = self._clean_mapping(self.converters) clean_dtypes = self._clean_mapping(self.dtype) # Apply NA values. clean_na_values = {} clean_na_fvalues = {} if isinstance(self.na_values, dict): for col in self.na_values: if col is not None: na_value = self.na_values[col] na_fvalue = self.na_fvalues[col] if isinstance(col, int) and col not in self.orig_names: col = self.orig_names[col] clean_na_values[col] = na_value clean_na_fvalues[col] = na_fvalue else: clean_na_values = self.na_values clean_na_fvalues = self.na_fvalues return self._convert_to_ndarrays( data, clean_na_values, clean_na_fvalues, clean_conv, clean_dtypes, ) @final def _convert_to_ndarrays( self, dct: Mapping, na_values, na_fvalues, converters=None, dtypes=None, ) -> dict[Any, np.ndarray]: result = {} parse_date_cols = validate_parse_dates_presence(self.parse_dates, self.columns) for c, values in dct.items(): conv_f = None if converters is None else converters.get(c, None) if isinstance(dtypes, dict): cast_type = dtypes.get(c, None) else: # single dtype or None cast_type = dtypes if self.na_filter: col_na_values, col_na_fvalues = get_na_values( c, na_values, na_fvalues, self.keep_default_na ) else: col_na_values, col_na_fvalues = set(), set() if c in parse_date_cols: # GH#26203 Do not convert columns which get converted to dates # but replace nans to ensure to_datetime works mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues) # pyright: ignore[reportArgumentType] np.putmask(values, mask, np.nan) result[c] = values continue if conv_f is not None: # conv_f applied to data before inference if cast_type is not None: warnings.warn( ( "Both a converter and dtype were specified " f"for column {c} - only the converter will be used." ), ParserWarning, stacklevel=find_stack_level(), ) try: values = lib.map_infer(values, conv_f) except ValueError: mask = algorithms.isin(values, list(na_values)).view(np.uint8) values = lib.map_infer_mask(values, conv_f, mask) cvals, na_count = self._infer_types( values, set(col_na_values) | col_na_fvalues, cast_type is None, try_num_bool=False, ) else: is_ea = is_extension_array_dtype(cast_type) is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type) # skip inference if specified dtype is object # or casting to an EA try_num_bool = not (cast_type and is_str_or_ea_dtype) # general type inference and conversion cvals, na_count = self._infer_types( values, set(col_na_values) | col_na_fvalues, cast_type is None, try_num_bool, ) # type specified in dtype param or cast_type is an EA if cast_type is not None: cast_type = pandas_dtype(cast_type) if cast_type and (cvals.dtype != cast_type or is_ea): if not is_ea and na_count > 0: if is_bool_dtype(cast_type): raise ValueError(f"Bool column has NA values in column {c}") cvals = self._cast_types(cvals, cast_type, c) result[c] = cvals return result @final def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike: """ Cast values to specified type Parameters ---------- values : ndarray or ExtensionArray cast_type : np.dtype or ExtensionDtype dtype to cast values to column : string column name - used only for error reporting Returns ------- converted : ndarray or ExtensionArray """ if isinstance(cast_type, CategoricalDtype): known_cats = cast_type.categories is not None if not is_object_dtype(values.dtype) and not known_cats: # TODO: this is for consistency with # c-parser which parses all categories # as strings values = lib.ensure_string_array( values, skipna=False, convert_na_value=False ) cats = Index(values, copy=False).unique().dropna() values = Categorical._from_inferred_categories( cats, cats.get_indexer(values), cast_type, true_values=self.true_values ) # use the EA's implementation of casting elif isinstance(cast_type, ExtensionDtype): array_type = cast_type.construct_array_type() try: if isinstance(cast_type, BooleanDtype): # error: Unexpected keyword argument "true_values" for # "_from_sequence_of_strings" of "ExtensionArray" values_str = [str(val) for val in values] return array_type._from_sequence_of_strings( # type: ignore[call-arg] values_str, dtype=cast_type, true_values=self.true_values, # pyright: ignore[reportCallIssue] false_values=self.false_values, # pyright: ignore[reportCallIssue] none_values=self.na_values, # pyright: ignore[reportCallIssue] ) else: return array_type._from_sequence_of_strings(values, dtype=cast_type) except NotImplementedError as err: raise NotImplementedError( f"Extension Array: {array_type} must implement " "_from_sequence_of_strings in order to be used in parser methods" ) from err elif isinstance(values, ExtensionArray): values = values.astype(cast_type, copy=False) elif issubclass(cast_type.type, str): # TODO: why skipna=True here and False above? some tests depend # on it here, but nothing fails if we change it above # (as no tests get there as of 2022-12-06) values = lib.ensure_string_array( values, skipna=True, convert_na_value=False ) else: try: values = astype_array(values, cast_type, copy=True) except ValueError as err: raise ValueError( f"Unable to convert column {column} to type {cast_type}" ) from err return values @cache_readonly def _have_mi_columns(self) -> bool: if self.header is None: return False header = self.header if isinstance(header, (list, tuple, np.ndarray)): return len(header) > 1 else: return False def _infer_columns( self, ) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]: names = self.names num_original_columns = 0 clear_buffer = True unnamed_cols: set[Scalar | None] = set() if self.header is not None: header = self.header have_mi_columns = self._have_mi_columns if isinstance(header, (list, tuple, np.ndarray)): # we have a mi columns, so read an extra line if have_mi_columns: header = [*list(header), header[-1] + 1] else: header = [header] columns: list[list[Scalar | None]] = [] for level, hr in enumerate(header): try: line = self._buffered_line() while self.line_pos <= hr: line = self._next_line() except StopIteration as err: if 0 < self.line_pos <= hr and ( not have_mi_columns or hr != header[-1] ): # If no rows we want to raise a different message and if # we have mi columns, the last line is not part of the header joi = list(map(str, header[:-1] if have_mi_columns else header)) msg = f"[{','.join(joi)}], len of {len(joi)}, " raise ValueError( f"Passed header={msg}but only {self.line_pos} lines in file" ) from err # We have an empty file, so check # if columns are provided. That will # serve as the 'line' for parsing if have_mi_columns and hr > 0: if clear_buffer: self.buf.clear() columns.append([None] * len(columns[-1])) return columns, num_original_columns, unnamed_cols if not self.names: raise EmptyDataError("No columns to parse from file") from err line = self.names[:] this_columns: list[Scalar | None] = [] this_unnamed_cols = [] for i, c in enumerate(line): if c == "": if have_mi_columns: col_name = f"Unnamed: {i}_level_{level}" else: col_name = f"Unnamed: {i}" this_unnamed_cols.append(i) this_columns.append(col_name) else: this_columns.append(c) if not have_mi_columns: counts: DefaultDict = defaultdict(int) # Ensure that regular columns are used before unnamed ones # to keep given names and mangle unnamed columns col_loop_order = [ i for i in range(len(this_columns)) if i not in this_unnamed_cols ] + this_unnamed_cols # TODO: Use pandas.io.common.dedup_names instead (see #50371) for i in col_loop_order: col = this_columns[i] old_col = col cur_count = counts[col] if cur_count > 0: while cur_count > 0: counts[old_col] = cur_count + 1 col = f"{old_col}.{cur_count}" if col in this_columns: cur_count += 1 else: cur_count = counts[col] if ( self.dtype is not None and is_dict_like(self.dtype) and self.dtype.get(old_col) is not None and self.dtype.get(col) is None ): self.dtype.update({col: self.dtype.get(old_col)}) this_columns[i] = col counts[col] = cur_count + 1 elif have_mi_columns: # if we have grabbed an extra line, but it's not in our # format so save in the buffer, and create a blank extra # line for the rest of the parsing code if hr == header[-1]: lc = len(this_columns) sic = self.index_col ic = len(sic) if sic is not None else 0 unnamed_count = len(this_unnamed_cols) # if wrong number of blanks or no index, not our format if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0: clear_buffer = False this_columns = [None] * lc self.buf = [self.buf[-1]] columns.append(this_columns) unnamed_cols.update({this_columns[i] for i in this_unnamed_cols}) if len(columns) == 1: num_original_columns = len(this_columns) if clear_buffer: self.buf.clear() first_line: list[Scalar] | None if names is not None: # Read first row after header to check if data are longer try: first_line = self._next_line() except StopIteration: first_line = None len_first_data_row = 0 if first_line is None else len(first_line) if len(names) > len(columns[0]) and len(names) > len_first_data_row: raise ValueError( "Number of passed names did not match " "number of header fields in the file" ) if len(columns) > 1: raise TypeError("Cannot pass names with multi-index columns") if self.usecols is not None: # Set _use_cols. We don't store columns because they are # overwritten. self._handle_usecols(columns, names, num_original_columns) else: num_original_columns = len(names) if self._col_indices is not None and len(names) != len( self._col_indices ): columns = [[names[i] for i in sorted(self._col_indices)]] else: columns = [names] else: columns = self._handle_usecols( columns, columns[0], num_original_columns ) else: ncols = len(self._header_line) num_original_columns = ncols if not names: columns = [list(range(ncols))] columns = self._handle_usecols(columns, columns[0], ncols) elif self.usecols is None or len(names) >= ncols: columns = self._handle_usecols([names], names, ncols) num_original_columns = len(names) elif not callable(self.usecols) and len(names) != len(self.usecols): raise ValueError( "Number of passed names did not match number of " "header fields in the file" ) else: # Ignore output but set used columns. columns = [names] self._handle_usecols(columns, columns[0], ncols) return columns, num_original_columns, unnamed_cols @cache_readonly def _header_line(self): # Store line for reuse in _get_index_name if self.header is not None: return None try: line = self._buffered_line() except StopIteration as err: if not self.names: raise EmptyDataError("No columns to parse from file") from err line = self.names[:] return line def _handle_usecols( self, columns: list[list[Scalar | None]], usecols_key: list[Scalar | None], num_original_columns: int, ) -> list[list[Scalar | None]]: """ Sets self._col_indices usecols_key is used if there are string usecols. """ col_indices: set[int] | list[int] if self.usecols is not None: if callable(self.usecols): col_indices = evaluate_callable_usecols(self.usecols, usecols_key) elif any(isinstance(u, str) for u in self.usecols): if len(columns) > 1: raise ValueError( "If using multiple headers, usecols must be integers." ) col_indices = [] for col in self.usecols: if isinstance(col, str): try: col_indices.append(usecols_key.index(col)) except ValueError: self._validate_usecols_names(self.usecols, usecols_key) else: col_indices.append(col) else: missing_usecols = [ col for col in self.usecols if col >= num_original_columns ] if missing_usecols: raise ParserError( "Defining usecols with out-of-bounds indices is not allowed. " f"{missing_usecols} are out-of-bounds.", ) col_indices = self.usecols columns = [ [n for i, n in enumerate(column) if i in col_indices] for column in columns ] self._col_indices = sorted(col_indices) return columns def _buffered_line(self) -> list[Scalar]: """ Return a line from buffer, filling buffer if required. """ if len(self.buf) > 0: return self.buf[0] else: return self._next_line() def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]: """ Checks whether the file begins with the BOM character. If it does, remove it. In addition, if there is quoting in the field subsequent to the BOM, remove it as well because it technically takes place at the beginning of the name, not the middle of it. """ # first_row will be a list, so we need to check # that that list is not empty before proceeding. if not first_row: return first_row # The first element of this row is the one that could have the # BOM that we want to remove. Check that the first element is a # string before proceeding. if not isinstance(first_row[0], str): return first_row # Check that the string is not empty, as that would # obviously not have a BOM at the start of it. if not first_row[0]: return first_row # Since the string is non-empty, check that it does # in fact begin with a BOM. first_elt = first_row[0][0] if first_elt != _BOM: return first_row first_row_bom = first_row[0] new_row: str if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar: start = 2 quote = first_row_bom[1] end = first_row_bom[2:].index(quote) + 2 # Extract the data between the quotation marks new_row = first_row_bom[start:end] # Extract any remaining data after the second # quotation mark. if len(first_row_bom) > end + 1: new_row += first_row_bom[end + 1 :] else: # No quotation so just remove BOM from first element new_row = first_row_bom[1:] new_row_list: list[Scalar] = [new_row] return new_row_list + first_row[1:] def _is_line_empty(self, line: Sequence[Scalar]) -> bool: """ Check if a line is empty or not. Parameters ---------- line : str, array-like The line of data to check. Returns ------- boolean : Whether or not the line is empty. """ return not line or all(not x for x in line) def _next_line(self) -> list[Scalar]: if isinstance(self.data, list): while self.skipfunc(self.pos): if self.pos >= len(self.data): break self.pos += 1 while True: try: line = self._check_comments([self.data[self.pos]])[0] self.pos += 1 # either uncommented or blank to begin with if not self.skip_blank_lines and ( self._is_line_empty(self.data[self.pos - 1]) or line ): break if self.skip_blank_lines: ret = self._remove_empty_lines([line]) if ret: line = ret[0] break except IndexError as err: raise StopIteration from err else: while self.skipfunc(self.pos): self.pos += 1 next(self.data) while True: orig_line = self._next_iter_line(row_num=self.pos + 1) self.pos += 1 if orig_line is not None: line = self._check_comments([orig_line])[0] if self.skip_blank_lines: ret = self._remove_empty_lines([line]) if ret: line = ret[0] break elif self._is_line_empty(orig_line) or line: break # This was the first line of the file, # which could contain the BOM at the # beginning of it. if self.pos == 1: line = self._check_for_bom(line) self.line_pos += 1 self.buf.append(line) return line def _alert_malformed(self, msg: str, row_num: int) -> None: """ Alert a user about a malformed row, depending on value of `self.on_bad_lines` enum. If `self.on_bad_lines` is ERROR, the alert will be `ParserError`. If `self.on_bad_lines` is WARN, the alert will be printed out. Parameters ---------- msg: str The error message to display. row_num: int The row number where the parsing error occurred. Because this row number is displayed, we 1-index, even though we 0-index internally. """ if self.on_bad_lines == self.BadLineHandleMethod.ERROR: raise ParserError(msg) if self.on_bad_lines == self.BadLineHandleMethod.WARN or callable( self.on_bad_lines ): warnings.warn( f"Skipping line {row_num}: {msg}\n", ParserWarning, stacklevel=find_stack_level(), ) def _next_iter_line(self, row_num: int) -> list[Scalar] | None: """ Wrapper around iterating through `self.data` (CSV source). When a CSV error is raised, we check for specific error messages that allow us to customize the error message displayed to the user. Parameters ---------- row_num: int The row number of the line being parsed. """ try: assert not isinstance(self.data, list) line = next(self.data) # lie about list[str] vs list[Scalar] to minimize ignores return line # type: ignore[return-value] except csv.Error as e: if self.on_bad_lines in ( self.BadLineHandleMethod.ERROR, self.BadLineHandleMethod.WARN, ): msg = str(e) if "NULL byte" in msg or "line contains NUL" in msg: msg = ( "NULL byte detected. This byte " "cannot be processed in Python's " "native csv library at the moment, " "so please pass in engine='c' instead" ) if self.skipfooter > 0: reason = ( "Error could possibly be due to " "parsing errors in the skipped footer rows " "(the skipfooter keyword is only applied " "after Python's csv library has parsed " "all rows)." ) msg += ". " + reason self._alert_malformed(msg, row_num) return None def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: if self.comment is None: return lines ret = [] for line in lines: rl = [] for x in line: if ( not isinstance(x, str) or self.comment not in x or x in self.na_values ): rl.append(x) else: x = x[: x.find(self.comment)] if len(x) > 0: rl.append(x) break ret.append(rl) return ret def _remove_empty_lines(self, lines: list[list[T]]) -> list[list[T]]: """ Iterate through the lines and remove any that are either empty or contain only one whitespace value Parameters ---------- lines : list of list of Scalars The array of lines that we are to filter. Returns ------- filtered_lines : list of list of Scalars The same array of lines with the "empty" ones removed. """ # Remove empty lines and lines with only one whitespace value ret = [ line for line in lines if ( len(line) > 1 or ( len(line) == 1 and (not isinstance(line[0], str) or line[0].strip()) ) ) ] return ret def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: if self.thousands is None: return lines return self._search_replace_num_columns( lines=lines, search=self.thousands, replace="" ) def _search_replace_num_columns( self, lines: list[list[Scalar]], search: str, replace: str ) -> list[list[Scalar]]: ret = [] for line in lines: rl = [] for i, x in enumerate(line): if ( not isinstance(x, str) or search not in x or i in self._no_thousands_columns or not self.num.search(x.strip()) ): rl.append(x) else: rl.append(x.replace(search, replace)) ret.append(rl) return ret def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: if self.decimal == parser_defaults["decimal"]: return lines return self._search_replace_num_columns( lines=lines, search=self.decimal, replace="." ) def _get_index_name( self, ) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]: """ Try several cases to get lines: 0) There are headers on row 0 and row 1 and their total summed lengths equals the length of the next line. Treat row 0 as columns and row 1 as indices 1) Look for implicit index: there are more columns on row 1 than row 0. If this is true, assume that row 1 lists index columns and row 0 lists normal columns. 2) Get index from the columns if it was listed. """ columns: Sequence[Hashable] = self.orig_names orig_names = list(columns) columns = list(columns) line: list[Scalar] | None if self._header_line is not None: line = self._header_line else: try: line = self._next_line() except StopIteration: line = None next_line: list[Scalar] | None try: next_line = self._next_line() except StopIteration: next_line = None # implicitly index_col=0 b/c 1 fewer column names implicit_first_cols = 0 if line is not None: # leave it 0, #2442 # Case 1 index_col = self.index_col if index_col is not False: implicit_first_cols = len(line) - self.num_original_columns # Case 0 if ( next_line is not None and self.header is not None and index_col is not False ): if len(next_line) == len(line) + self.num_original_columns: # column and index names on diff rows self.index_col = list(range(len(line))) self.buf = self.buf[1:] for c in reversed(line): columns.insert(0, c) # Update list of original names to include all indices. orig_names = list(columns) self.num_original_columns = len(columns) return line, orig_names, columns if implicit_first_cols > 0: # Case 1 self._implicit_index = True if self.index_col is None: self.index_col = list(range(implicit_first_cols)) index_name = None else: # Case 2 (index_name, _, self.index_col) = self._clean_index_names( columns, self.index_col ) return index_name, orig_names, columns def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]: col_len = self.num_original_columns if self._implicit_index: col_len += len(self.index_col) max_len = max(len(row) for row in content) # Check that there are no rows with too many # elements in their row (rows with too few # elements are padded with NaN). if max_len > col_len and self.index_col is not False and self.usecols is None: footers = self.skipfooter if self.skipfooter else 0 bad_lines = [] iter_content = enumerate(content) content_len = len(content) content = [] for i, _content in iter_content: actual_len = len(_content) if actual_len > col_len: if callable(self.on_bad_lines): new_l = self.on_bad_lines(_content) if new_l is not None: new_l = cast(list[Scalar], new_l) if len(new_l) > col_len: row_num = self.pos - (content_len - i + footers) bad_lines.append((row_num, len(new_l), "callable")) new_l = new_l[:col_len] content.append(new_l) elif self.on_bad_lines in ( self.BadLineHandleMethod.ERROR, self.BadLineHandleMethod.WARN, ): row_num = self.pos - (content_len - i + footers) bad_lines.append((row_num, actual_len, "normal")) if self.on_bad_lines == self.BadLineHandleMethod.ERROR: break else: content.append(_content) for row_num, actual_len, source in bad_lines: msg = ( f"Expected {col_len} fields in line {row_num + 1}, saw {actual_len}" ) if source == "callable": msg += " from bad_lines callable" elif ( self.delimiter and len(self.delimiter) > 1 and self.quoting != csv.QUOTE_NONE ): # see gh-13374 reason = ( "Error could possibly be due to quotes being " "ignored when a multi-char delimiter is used." ) msg += ". " + reason self._alert_malformed(msg, row_num + 1) # see gh-13320 zipped_content = list(lib.to_object_array(content, min_width=col_len).T) if self.usecols: assert self._col_indices is not None col_indices = self._col_indices if self._implicit_index: zipped_content = [ a for i, a in enumerate(zipped_content) if ( i < len(self.index_col) or i - len(self.index_col) in col_indices ) ] else: zipped_content = [ a for i, a in enumerate(zipped_content) if i in col_indices ] return zipped_content def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]: lines = self.buf new_rows = None # already fetched some number if rows is not None: # we already have the lines in the buffer if len(self.buf) >= rows: new_rows, self.buf = self.buf[:rows], self.buf[rows:] # need some lines else: rows -= len(self.buf) if new_rows is None: if isinstance(self.data, list): if self.pos > len(self.data): raise StopIteration if rows is None: new_rows = self.data[self.pos :] new_pos = len(self.data) else: new_rows = self.data[self.pos : self.pos + rows] new_pos = self.pos + rows new_rows = self._remove_skipped_rows(new_rows) lines.extend(new_rows) self.pos = new_pos else: new_rows = [] try: if rows is not None: row_index = 0 row_ct = 0 offset = self.pos if self.pos is not None else 0 while row_ct < rows: new_row = next(self.data) if not self.skipfunc(offset + row_index): row_ct += 1 row_index += 1 new_rows.append(new_row) len_new_rows = len(new_rows) new_rows = self._remove_skipped_rows(new_rows) lines.extend(new_rows) else: rows = 0 while True: next_row = self._next_iter_line(row_num=self.pos + rows + 1) rows += 1 if next_row is not None: new_rows.append(next_row) len_new_rows = len(new_rows) except StopIteration: len_new_rows = len(new_rows) new_rows = self._remove_skipped_rows(new_rows) lines.extend(new_rows) if len(lines) == 0: raise self.pos += len_new_rows self.buf = [] else: lines = new_rows if self.skipfooter: lines = lines[: -self.skipfooter] lines = self._check_comments(lines) if self.skip_blank_lines: lines = self._remove_empty_lines(lines) lines = self._check_thousands(lines) return self._check_decimal(lines) def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]: if self.skiprows: return [ row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos) ] return new_rows def _set_no_thousand_columns(self) -> set[int]: no_thousands_columns: set[int] = set() if self.columns and self.parse_dates: assert self._col_indices is not None no_thousands_columns = self._set_noconvert_dtype_columns( self._col_indices, self.columns ) if self.columns and self.dtype: assert self._col_indices is not None for i, col in zip(self._col_indices, self.columns, strict=True): if not isinstance(self.dtype, dict) and not is_numeric_dtype( self.dtype ): no_thousands_columns.add(i) if ( isinstance(self.dtype, dict) and col in self.dtype and ( not is_numeric_dtype(self.dtype[col]) or is_bool_dtype(self.dtype[col]) ) ): no_thousands_columns.add(i) return no_thousands_columns class FixedWidthReader(abc.Iterator): """ A reader of fixed-width lines. """ def __init__( self, f: IO[str] | ReadCsvBuffer[str], colspecs: list[tuple[int, int]] | Literal["infer"], delimiter: str | None, comment: str | None, skiprows: set[int] | None = None, infer_nrows: int = 100, ) -> None: self.f = f self.buffer: Iterator | None = None self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t " self.comment = comment if colspecs == "infer": self.colspecs = self.detect_colspecs( infer_nrows=infer_nrows, skiprows=skiprows ) else: self.colspecs = colspecs if not isinstance(self.colspecs, (tuple, list)): raise TypeError( "column specifications must be a list or tuple, " f"input was a {type(colspecs).__name__}" ) for colspec in self.colspecs: if not ( isinstance(colspec, (tuple, list)) and len(colspec) == 2 and isinstance(colspec[0], (int, np.integer, type(None))) and isinstance(colspec[1], (int, np.integer, type(None))) ): raise TypeError( "Each column specification must be " "2 element tuple or list of integers" ) def get_rows(self, infer_nrows: int, skiprows: set[int] | None = None) -> list[str]: """ Read rows from self.f, skipping as specified. We distinguish buffer_rows (the first <= infer_nrows lines) from the rows returned to detect_colspecs because it's simpler to leave the other locations with skiprows logic alone than to modify them to deal with the fact we skipped some rows here as well. Parameters ---------- infer_nrows : int Number of rows to read from self.f, not counting rows that are skipped. skiprows: set, optional Indices of rows to skip. Returns ------- detect_rows : list of str A list containing the rows to read. """ if skiprows is None: skiprows = set() buffer_rows = [] detect_rows = [] for i, row in enumerate(self.f): if i not in skiprows: detect_rows.append(row) buffer_rows.append(row) if len(detect_rows) >= infer_nrows: break self.buffer = iter(buffer_rows) return detect_rows def detect_colspecs( self, infer_nrows: int = 100, skiprows: set[int] | None = None ) -> list[tuple[int, int]]: # Regex escape the delimiters delimiters = "".join([rf"\{x}" for x in self.delimiter]) pattern = re.compile(f"([^{delimiters}]+)") rows = self.get_rows(infer_nrows, skiprows) if not rows: raise EmptyDataError("No rows from which to infer column width") max_len = max(map(len, rows)) mask = np.zeros(max_len + 1, dtype=int) if self.comment is not None: rows = [row.partition(self.comment)[0] for row in rows] for row in rows: for m in pattern.finditer(row): mask[m.start() : m.end()] = 1 shifted = np.roll(mask, 1) shifted[0] = 0 edges = np.where((mask ^ shifted) == 1)[0] edge_pairs = list(zip(edges[::2], edges[1::2], strict=True)) return edge_pairs def __next__(self) -> list[str]: if self.buffer is not None: try: line = next(self.buffer) except StopIteration: self.buffer = None line = next(self.f) # type: ignore[arg-type] else: line = next(self.f) # type: ignore[arg-type] # Note: 'colspecs' is a sequence of half-open intervals. return [line[from_:to].strip(self.delimiter) for (from_, to) in self.colspecs] class FixedWidthFieldParser(PythonParser): """ Specialization that Converts fixed-width fields into DataFrames. See PythonParser for details. """ def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None: # Support iterators, convert to a list. self.colspecs = kwds.pop("colspecs") self.infer_nrows = kwds.pop("infer_nrows") PythonParser.__init__(self, f, **kwds) def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> FixedWidthReader: return FixedWidthReader( f, self.colspecs, self.delimiter, self.comment, self.skiprows, self.infer_nrows, ) def _remove_empty_lines(self, lines: list[list[T]]) -> list[list[T]]: """ Returns the list of lines without the empty ones. With fixed-width fields, empty lines become arrays of empty strings. See PythonParser._remove_empty_lines. """ return [ line for line in lines if any(not isinstance(e, str) or e.strip() for e in line) ] def _validate_skipfooter_arg(skipfooter: int) -> int: """ Validate the 'skipfooter' parameter. Checks whether 'skipfooter' is a non-negative integer. Raises a ValueError if that is not the case. Parameters ---------- skipfooter : non-negative integer The number of rows to skip at the end of the file. Returns ------- validated_skipfooter : non-negative integer The original input if the validation succeeds. Raises ------ ValueError : 'skipfooter' was not a non-negative integer. """ if not is_integer(skipfooter): raise ValueError("skipfooter must be an integer") if skipfooter < 0: raise ValueError("skipfooter cannot be negative") # Incompatible return value type (got "Union[int, integer[Any]]", expected "int") return skipfooter # type: ignore[return-value]
python
github
https://github.com/pandas-dev/pandas
pandas/io/parsers/python_parser.py
"""distutils.command.upload Implements the Distutils 'upload' subcommand (upload package to PyPI).""" import os import socket import platform from urllib2 import urlopen, Request, HTTPError from base64 import standard_b64encode import urlparse import cStringIO as StringIO from hashlib import md5 from distutils.errors import DistutilsError, DistutilsOptionError from distutils.core import PyPIRCCommand from distutils.spawn import spawn from distutils import log class upload(PyPIRCCommand): description = "upload binary package to PyPI" user_options = PyPIRCCommand.user_options + [ ('sign', 's', 'sign files to upload using gpg'), ('identity=', 'i', 'GPG identity used to sign files'), ] boolean_options = PyPIRCCommand.boolean_options + ['sign'] def initialize_options(self): PyPIRCCommand.initialize_options(self) self.username = '' self.password = '' self.show_response = 0 self.sign = False self.identity = None def finalize_options(self): PyPIRCCommand.finalize_options(self) if self.identity and not self.sign: raise DistutilsOptionError( "Must use --sign for --identity to have meaning" ) config = self._read_pypirc() if config != {}: self.username = config['username'] self.password = config['password'] self.repository = config['repository'] self.realm = config['realm'] # getting the password from the distribution # if previously set by the register command if not self.password and self.distribution.password: self.password = self.distribution.password def run(self): if not self.distribution.dist_files: raise DistutilsOptionError("No dist file created in earlier command") for command, pyversion, filename in self.distribution.dist_files: self.upload_file(command, pyversion, filename) def upload_file(self, command, pyversion, filename): # Makes sure the repository URL is compliant schema, netloc, url, params, query, fragments = \ urlparse.urlparse(self.repository) if params or query or fragments: raise AssertionError("Incompatible url %s" % self.repository) if schema not in ('http', 'https'): raise AssertionError("unsupported schema " + schema) # Sign if requested if self.sign: gpg_args = ["gpg", "--detach-sign", "-a", filename] if self.identity: gpg_args[2:2] = ["--local-user", self.identity] spawn(gpg_args, dry_run=self.dry_run) # Fill in the data - send all the meta-data in case we need to # register a new release f = open(filename,'rb') try: content = f.read() finally: f.close() meta = self.distribution.metadata data = { # action ':action': 'file_upload', 'protcol_version': '1', # identify release 'name': meta.get_name(), 'version': meta.get_version(), # file content 'content': (os.path.basename(filename),content), 'filetype': command, 'pyversion': pyversion, 'md5_digest': md5(content).hexdigest(), # additional meta-data 'metadata_version' : '1.0', 'summary': meta.get_description(), 'home_page': meta.get_url(), 'author': meta.get_contact(), 'author_email': meta.get_contact_email(), 'license': meta.get_licence(), 'description': meta.get_long_description(), 'keywords': meta.get_keywords(), 'platform': meta.get_platforms(), 'classifiers': meta.get_classifiers(), 'download_url': meta.get_download_url(), # PEP 314 'provides': meta.get_provides(), 'requires': meta.get_requires(), 'obsoletes': meta.get_obsoletes(), } comment = '' if command == 'bdist_rpm': dist, version, id = platform.dist() if dist: comment = 'built for %s %s' % (dist, version) elif command == 'bdist_dumb': comment = 'built for %s' % platform.platform(terse=1) data['comment'] = comment if self.sign: data['gpg_signature'] = (os.path.basename(filename) + ".asc", open(filename+".asc").read()) # set up the authentication auth = "Basic " + standard_b64encode(self.username + ":" + self.password) # Build up the MIME payload for the POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = '\r\n--' + boundary end_boundary = sep_boundary + '--\r\n' body = StringIO.StringIO() for key, value in data.items(): # handle multiple entries for the same name if not isinstance(value, list): value = [value] for value in value: if isinstance(value, tuple): fn = ';filename="%s"' % value[0] value = value[1] else: fn = "" body.write(sep_boundary) body.write('\r\nContent-Disposition: form-data; name="%s"' % key) body.write(fn) body.write("\r\n\r\n") body.write(value) if value and value[-1] == '\r': body.write('\n') # write an extra newline (lurve Macs) body.write(end_boundary) body = body.getvalue() self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO) # build the Request headers = {'Content-type': 'multipart/form-data; boundary=%s' % boundary, 'Content-length': str(len(body)), 'Authorization': auth} request = Request(self.repository, data=body, headers=headers) # send the data try: result = urlopen(request) status = result.getcode() reason = result.msg if self.show_response: msg = '\n'.join(('-' * 75, result.read(), '-' * 75)) self.announce(msg, log.INFO) except socket.error, e: self.announce(str(e), log.ERROR) raise except HTTPError, e: status = e.code reason = e.msg if status == 200: self.announce('Server response (%s): %s' % (status, reason), log.INFO) else: msg = 'Upload failed (%s): %s' % (status, reason) self.announce(msg, log.ERROR) raise DistutilsError(msg)
unknown
codeparrot/codeparrot-clean
from gen import * ########## # shared # ########## flow_var[0] = """ (declare-fun tau () Real) """ flow_dec[0] = """ (define-ode flow_1 ((= d/dt[tau] 1))) """ state_dec[0] = """ (declare-fun time_{0} () Real) (declare-fun tau_{0}_0 () Real) (declare-fun tau_{0}_t () Real) """ state_val[0] = """ (assert (<= 0 time_{0})) (assert (<= time_{0} 1)) (assert (<= 0 tau_{0}_0)) (assert (<= tau_{0}_0 1)) (assert (<= 0 tau_{0}_t)) (assert (<= tau_{0}_t 1)) (assert (and (not (and (= mode_1_{0} 1) (= mode_1_{0} 2))) (not (and (= mode_2_{0} 1) (= mode_2_{0} 2))))) """ cont_cond[0] = [""" (assert (and (>= tau_{0}_0 0) (<= tau_{0}_0 1) (>= tau_{0}_t 0) (<= tau_{0}_t 1) (forall_t 1 [0 time_{0}] (>= tau_{0}_t 0)) (forall_t 2 [0 time_{0}] (<= tau_{0}_t 1)))) (assert (and (= [x1_{0}_t x2_{0}_t tau_{0}_t] (pintegral 0. time_{0} [x1_{0}_0 x2_{0}_0 tau_{0}_0] [holder_{1} holder_{2} holder_{3}])) (connect holder_{3} flow_1)))"""] jump_cond[0] = [""" (assert (and (= tau_{0}_t 1) (= tau_{1}_0 0)))"""] ################ # thermostat 1 # ################ flow_var[1] = """ (declare-fun x1 () Real) """ flow_dec[1] = """ (define-ode flow_2 ((= d/dt[x1] (* 0.015 (- 100 (+ (* (- 1 0.01) x1) (* 0.01 x2))))))) (define-ode flow_3 ((= d/dt[x1] (* -0.015 (+ (* (- 1 0.01) x1) (* 0.01 x2)))))) """ state_dec[1] = """ (declare-fun mode_1_{0} () Int) (declare-fun x1_{0}_0 () Real) (declare-fun x1_{0}_t () Real) """ state_val[1] = """ (assert (<= -20 x1_{0}_0)) (assert (<= x1_{0}_0 100)) (assert (<= -20 x1_{0}_t)) (assert (<= x1_{0}_t 100)) """ cont_cond[1] = [""" (assert (or (and (= mode_1_{0} 2) (connect holder_{1} flow_2)) (and (= mode_1_{0} 1) (connect holder_{1} flow_3)))) (assert (not (and (connect holder_{1} flow_2) (connect holder_{1} flow_3))))"""] jump_cond[1] = [""" (assert (and (= x1_{1}_0 x1_{0}_t))) (assert (or (and (<= x1_{0}_t 20) (= mode_1_{1} 2)) (and (> x1_{0}_t 20) (= mode_1_{1} 1))))"""] ################ # thermostat 2 # ################ flow_var[2] = """ (declare-fun x2 () Real) """ flow_dec[2] = """ (define-ode flow_4 ((= d/dt[x2] (* 0.045 (- 200 (+ (* (- 1 0.01) x2) (* 0.01 x1))))))) (define-ode flow_5 ((= d/dt[x2] (* -0.045 (+ (* (- 1 0.01) x2) (* 0.01 x1)))))) """ state_dec[2] = """ (declare-fun mode_2_{0} () Int) (declare-fun x2_{0}_0 () Real) (declare-fun x2_{0}_t () Real) """ state_val[2] = """ (assert (<= -20 x2_{0}_0)) (assert (<= x2_{0}_0 100)) (assert (<= -20 x2_{0}_t)) (assert (<= x2_{0}_t 100)) """ cont_cond[2] = [""" (assert (or (and (= mode_2_{0} 2) (connect holder_{2} flow_4)) (and (= mode_2_{0} 1) (connect holder_{2} flow_5)))) (assert (not (and (connect holder_{2} flow_4) (connect holder_{2} flow_5))))"""] jump_cond[2] = [""" (assert (and (= x2_{1}_0 x2_{0}_t))) (assert (or (and (<= x2_{0}_t 20) (= mode_2_{1} 2)) (and (> x2_{0}_t 20) (= mode_2_{1} 1))))"""] ############# # Init/Goal # ############# init_cond = """ (assert (= tau_{0}_0 0)) (assert (= mode_1_{0} 2)) (assert (and (>= x1_{0}_0 (- 20 1)) (<= x1_{0}_0 (+ 20 1)))) (assert (= mode_2_{0} 2)) (assert (and (>= x2_{0}_0 (- 20 1)) (<= x2_{0}_0 (+ 20 1)))) """ goal_cond = """ (assert (or (< x1_{0}_t (- 20 5)) (> x1_{0}_t (+ 20 5)))) (assert (or (< x2_{0}_t (- 20 5)) (> x2_{0}_t (+ 20 5)))) """ import sys try: bound = int(sys.argv[1]) except: print("Usage:", sys.argv[0], "<Bound>") else: generate(bound, 1, [0,1,2], 3, init_cond, goal_cond)
unknown
codeparrot/codeparrot-clean
### README first This section of the documentation contains a guide for Moby project users who want to contribute code or documentation to the Moby Engine project. As a community, we share rules of behavior and interaction. Make sure you are familiar with the <a href="https://github.com/moby/moby/blob/master/CONTRIBUTING.md#moby-community-guidelines" target="_blank">community guidelines</a> before continuing. ## Where and what you can contribute The Moby project consists of not just one but several repositories on GitHub. So, in addition to the `moby/moby` repository, there is the `containerd/containerd` repo, the `moby/buildkit` repo, and several more. Contribute to any of these and you contribute to the Moby project. Not all Moby repositories use the Go language. Also, each repository has its own focus area. So, if you are an experienced contributor, think about contributing to a Moby project repository that has a language or a focus area you are familiar with. If you are new to the open source community, to Moby, or to formal programming, you should start out contributing to the `moby/moby` repository. Why? Because this guide is written for that repository specifically. Finally, code or documentation isn't the only way to contribute. You can report an issue, add to discussions in our community channel, write a blog post, or take a usability test. You can even propose your own type of contribution. Right now we don't have a lot written about this yet, but feel free to open an issue to discuss other contributions. ## How to use this guide This is written for the distracted, the overworked, the sloppy reader with fair `git` skills and a failing memory for the GitHub GUI. The guide attempts to explain how to use the Moby Engine development environment as precisely, predictably, and procedurally as possible. Users who are new to Engine development should start by setting up their environment. Then, they should try a simple code change. After that, you should find something to work on or propose a totally new change. If you are a programming prodigy, you still may find this documentation useful. Please feel free to skim past information you find obvious or boring. ## How to get started Start by getting the software you require. If you are on Mac or Linux, go to [get the required software for Linux or macOS](software-required.md). If you are on Windows, see [get the required software for Windows](software-req-win.md).
unknown
github
https://github.com/moby/moby
docs/contributing/who-written-for.md
from __future__ import division, absolute_import, print_function import os from numpy.testing import run_module_suite, assert_equal, dec import util def _path(*a): return os.path.join(*((os.path.dirname(__file__),) + a)) class TestSizeSumExample(util.F2PyTest): sources = [_path('src', 'size', 'foo.f90')] @dec.slow def test_all(self): r = self.module.foo([[1, 2]]) assert_equal(r, [3], repr(r)) r = self.module.foo([[1, 2], [3, 4]]) assert_equal(r, [3, 7], repr(r)) r = self.module.foo([[1, 2], [3, 4], [5, 6]]) assert_equal(r, [3, 7, 11], repr(r)) @dec.slow def test_transpose(self): r = self.module.trans([[1, 2]]) assert_equal(r, [[1], [2]], repr(r)) r = self.module.trans([[1, 2, 3], [4, 5, 6]]) assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r)) @dec.slow def test_flatten(self): r = self.module.flatten([[1, 2]]) assert_equal(r, [1, 2], repr(r)) r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r)) if __name__ == "__main__": run_module_suite()
unknown
codeparrot/codeparrot-clean
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from caper import FragmentMatcher from caper.group import CaptureGroup from caper.result import CaperResult, CaperClosureNode, CaperRootNode from logr import Logr class Parser(object): def __init__(self, matcher, debug=False): self.debug = debug self.matcher = matcher self.closures = None #: :type: caper.result.CaperResult self.result = None self._match_cache = None self._fragment_pos = None self._closure_pos = None self._history = None self.reset() def reset(self): self.closures = None self.result = CaperResult() self._match_cache = {} self._fragment_pos = -1 self._closure_pos = -1 self._history = [] def setup(self, closures): """ :type closures: list of CaperClosure """ self.reset() self.closures = closures self.result.heads = [CaperRootNode(closures[0])] def run(self, closures): """ :type closures: list of CaperClosure """ raise NotImplementedError() # # Capture Methods # def capture_fragment(self, tag, regex=None, func=None, single=True, **kwargs): return CaptureGroup(self, self.result).capture_fragment( tag, regex=regex, func=func, single=single, **kwargs ) def capture_closure(self, tag, regex=None, func=None, single=True, **kwargs): return CaptureGroup(self, self.result).capture_closure( tag, regex=regex, func=func, single=single, **kwargs )
unknown
codeparrot/codeparrot-clean
from androguard.core.analysis import analysis from datetime import datetime from blessings import Terminal t = Terminal() class InsecureStorageEnum(object): values = [ "getExternalFilesDir", "getSharedPreferences", "openFileOutput" "openFileInput" ] class InsecureStorage(object): name = "storage" def __init__(self, apks): super(InsecureStorage, self).__init__() self.apks = apks self.enum = InsecureStorageEnum() def run(self): """ Search for storage API usage within target class and methods """ x = analysis.uVMAnalysis(self.apks.get_vm()) vm = self.apks.get_vm() if x: print(t.green("[{0}] ".format(datetime.now()) + t.yellow("Performing surgery ..."))) # Get enum values # for v in self.enum.values: # This returns PathP # objects into a list # paths = x.get_tainted_packages().search_methods("android.content.Context", "{0}".format(v), ".") if paths: for p in paths: for method in self.apks.get_methods(): if method.get_name() == p.get_src(vm.get_class_manager())[1]: if method.get_class_name() == p.get_src(vm.get_class_manager())[0]: print(t.green("[{0}] ".format(datetime.now()) + t.yellow("Found: ") + "{0}".format(v))) print(t.green("[{0}] ".format(datetime.now()) + t.yellow("Class: ") + "{0}".format(method.get_class_name()))) print(t.green("[{0}] ".format(datetime.now()) + t.yellow("Method: ") + "{0}".format(method.get_name()))) print(method.show())
unknown
codeparrot/codeparrot-clean
/*------------------------------------------------------------------------- * * encnames.c * Encoding names and routines for working with them. * * Portions Copyright (c) 2001-2026, PostgreSQL Global Development Group * * IDENTIFICATION * src/common/encnames.c * *------------------------------------------------------------------------- */ #include "c.h" #include <ctype.h> #include <unistd.h> #include "mb/pg_wchar.h" /* ---------- * All encoding names, sorted: *** A L P H A B E T I C *** * * All names must be without irrelevant chars, search routines use * isalnum() chars only. It means ISO-8859-1, iso_8859-1 and Iso8859_1 * are always converted to 'iso88591'. All must be lower case. * * The table doesn't contain 'cs' aliases (like csISOLatin1). It's needed? * * Karel Zak, Aug 2001 * ---------- */ typedef struct pg_encname { const char *name; pg_enc encoding; } pg_encname; static const pg_encname pg_encname_tbl[] = { { "abc", PG_WIN1258 }, /* alias for WIN1258 */ { "alt", PG_WIN866 }, /* IBM866 */ { "big5", PG_BIG5 }, /* Big5; Chinese for Taiwan multibyte set */ { "euccn", PG_EUC_CN }, /* EUC-CN; Extended Unix Code for simplified * Chinese */ { "eucjis2004", PG_EUC_JIS_2004 }, /* EUC-JIS-2004; Extended UNIX Code fixed * Width for Japanese, standard JIS X 0213 */ { "eucjp", PG_EUC_JP }, /* EUC-JP; Extended UNIX Code fixed Width for * Japanese, standard OSF */ { "euckr", PG_EUC_KR }, /* EUC-KR; Extended Unix Code for Korean , KS * X 1001 standard */ { "euctw", PG_EUC_TW }, /* EUC-TW; Extended Unix Code for * * traditional Chinese */ { "gb18030", PG_GB18030 }, /* GB18030;GB18030 */ { "gbk", PG_GBK }, /* GBK; Chinese Windows CodePage 936 * simplified Chinese */ { "iso88591", PG_LATIN1 }, /* ISO-8859-1; RFC1345,KXS2 */ { "iso885910", PG_LATIN6 }, /* ISO-8859-10; RFC1345,KXS2 */ { "iso885913", PG_LATIN7 }, /* ISO-8859-13; RFC1345,KXS2 */ { "iso885914", PG_LATIN8 }, /* ISO-8859-14; RFC1345,KXS2 */ { "iso885915", PG_LATIN9 }, /* ISO-8859-15; RFC1345,KXS2 */ { "iso885916", PG_LATIN10 }, /* ISO-8859-16; RFC1345,KXS2 */ { "iso88592", PG_LATIN2 }, /* ISO-8859-2; RFC1345,KXS2 */ { "iso88593", PG_LATIN3 }, /* ISO-8859-3; RFC1345,KXS2 */ { "iso88594", PG_LATIN4 }, /* ISO-8859-4; RFC1345,KXS2 */ { "iso88595", PG_ISO_8859_5 }, /* ISO-8859-5; RFC1345,KXS2 */ { "iso88596", PG_ISO_8859_6 }, /* ISO-8859-6; RFC1345,KXS2 */ { "iso88597", PG_ISO_8859_7 }, /* ISO-8859-7; RFC1345,KXS2 */ { "iso88598", PG_ISO_8859_8 }, /* ISO-8859-8; RFC1345,KXS2 */ { "iso88599", PG_LATIN5 }, /* ISO-8859-9; RFC1345,KXS2 */ { "johab", PG_JOHAB }, /* JOHAB; Extended Unix Code for simplified * Chinese */ { "koi8", PG_KOI8R }, /* _dirty_ alias for KOI8-R (backward * compatibility) */ { "koi8r", PG_KOI8R }, /* KOI8-R; RFC1489 */ { "koi8u", PG_KOI8U }, /* KOI8-U; RFC2319 */ { "latin1", PG_LATIN1 }, /* alias for ISO-8859-1 */ { "latin10", PG_LATIN10 }, /* alias for ISO-8859-16 */ { "latin2", PG_LATIN2 }, /* alias for ISO-8859-2 */ { "latin3", PG_LATIN3 }, /* alias for ISO-8859-3 */ { "latin4", PG_LATIN4 }, /* alias for ISO-8859-4 */ { "latin5", PG_LATIN5 }, /* alias for ISO-8859-9 */ { "latin6", PG_LATIN6 }, /* alias for ISO-8859-10 */ { "latin7", PG_LATIN7 }, /* alias for ISO-8859-13 */ { "latin8", PG_LATIN8 }, /* alias for ISO-8859-14 */ { "latin9", PG_LATIN9 }, /* alias for ISO-8859-15 */ { "mskanji", PG_SJIS }, /* alias for Shift_JIS */ { "muleinternal", PG_MULE_INTERNAL }, { "shiftjis", PG_SJIS }, /* Shift_JIS; JIS X 0202-1991 */ { "shiftjis2004", PG_SHIFT_JIS_2004 }, /* SHIFT-JIS-2004; Shift JIS for Japanese, * standard JIS X 0213 */ { "sjis", PG_SJIS }, /* alias for Shift_JIS */ { "sqlascii", PG_SQL_ASCII }, { "tcvn", PG_WIN1258 }, /* alias for WIN1258 */ { "tcvn5712", PG_WIN1258 }, /* alias for WIN1258 */ { "uhc", PG_UHC }, /* UHC; Korean Windows CodePage 949 */ { "unicode", PG_UTF8 }, /* alias for UTF8 */ { "utf8", PG_UTF8 }, /* alias for UTF8 */ { "vscii", PG_WIN1258 }, /* alias for WIN1258 */ { "win", PG_WIN1251 }, /* _dirty_ alias for windows-1251 (backward * compatibility) */ { "win1250", PG_WIN1250 }, /* alias for Windows-1250 */ { "win1251", PG_WIN1251 }, /* alias for Windows-1251 */ { "win1252", PG_WIN1252 }, /* alias for Windows-1252 */ { "win1253", PG_WIN1253 }, /* alias for Windows-1253 */ { "win1254", PG_WIN1254 }, /* alias for Windows-1254 */ { "win1255", PG_WIN1255 }, /* alias for Windows-1255 */ { "win1256", PG_WIN1256 }, /* alias for Windows-1256 */ { "win1257", PG_WIN1257 }, /* alias for Windows-1257 */ { "win1258", PG_WIN1258 }, /* alias for Windows-1258 */ { "win866", PG_WIN866 }, /* IBM866 */ { "win874", PG_WIN874 }, /* alias for Windows-874 */ { "win932", PG_SJIS }, /* alias for Shift_JIS */ { "win936", PG_GBK }, /* alias for GBK */ { "win949", PG_UHC }, /* alias for UHC */ { "win950", PG_BIG5 }, /* alias for BIG5 */ { "windows1250", PG_WIN1250 }, /* Windows-1251; Microsoft */ { "windows1251", PG_WIN1251 }, /* Windows-1251; Microsoft */ { "windows1252", PG_WIN1252 }, /* Windows-1252; Microsoft */ { "windows1253", PG_WIN1253 }, /* Windows-1253; Microsoft */ { "windows1254", PG_WIN1254 }, /* Windows-1254; Microsoft */ { "windows1255", PG_WIN1255 }, /* Windows-1255; Microsoft */ { "windows1256", PG_WIN1256 }, /* Windows-1256; Microsoft */ { "windows1257", PG_WIN1257 }, /* Windows-1257; Microsoft */ { "windows1258", PG_WIN1258 }, /* Windows-1258; Microsoft */ { "windows866", PG_WIN866 }, /* IBM866 */ { "windows874", PG_WIN874 }, /* Windows-874; Microsoft */ { "windows932", PG_SJIS }, /* alias for Shift_JIS */ { "windows936", PG_GBK }, /* alias for GBK */ { "windows949", PG_UHC }, /* alias for UHC */ { "windows950", PG_BIG5 } /* alias for BIG5 */ }; /* ---------- * These are "official" encoding names. * ---------- */ #ifndef WIN32 #define DEF_ENC2NAME(name, codepage) { #name, PG_##name } #else #define DEF_ENC2NAME(name, codepage) { #name, PG_##name, codepage } #endif const pg_enc2name pg_enc2name_tbl[] = { [PG_SQL_ASCII] = DEF_ENC2NAME(SQL_ASCII, 0), [PG_EUC_JP] = DEF_ENC2NAME(EUC_JP, 20932), [PG_EUC_CN] = DEF_ENC2NAME(EUC_CN, 20936), [PG_EUC_KR] = DEF_ENC2NAME(EUC_KR, 51949), [PG_EUC_TW] = DEF_ENC2NAME(EUC_TW, 0), [PG_EUC_JIS_2004] = DEF_ENC2NAME(EUC_JIS_2004, 20932), [PG_UTF8] = DEF_ENC2NAME(UTF8, 65001), [PG_MULE_INTERNAL] = DEF_ENC2NAME(MULE_INTERNAL, 0), [PG_LATIN1] = DEF_ENC2NAME(LATIN1, 28591), [PG_LATIN2] = DEF_ENC2NAME(LATIN2, 28592), [PG_LATIN3] = DEF_ENC2NAME(LATIN3, 28593), [PG_LATIN4] = DEF_ENC2NAME(LATIN4, 28594), [PG_LATIN5] = DEF_ENC2NAME(LATIN5, 28599), [PG_LATIN6] = DEF_ENC2NAME(LATIN6, 0), [PG_LATIN7] = DEF_ENC2NAME(LATIN7, 0), [PG_LATIN8] = DEF_ENC2NAME(LATIN8, 0), [PG_LATIN9] = DEF_ENC2NAME(LATIN9, 28605), [PG_LATIN10] = DEF_ENC2NAME(LATIN10, 0), [PG_WIN1256] = DEF_ENC2NAME(WIN1256, 1256), [PG_WIN1258] = DEF_ENC2NAME(WIN1258, 1258), [PG_WIN866] = DEF_ENC2NAME(WIN866, 866), [PG_WIN874] = DEF_ENC2NAME(WIN874, 874), [PG_KOI8R] = DEF_ENC2NAME(KOI8R, 20866), [PG_WIN1251] = DEF_ENC2NAME(WIN1251, 1251), [PG_WIN1252] = DEF_ENC2NAME(WIN1252, 1252), [PG_ISO_8859_5] = DEF_ENC2NAME(ISO_8859_5, 28595), [PG_ISO_8859_6] = DEF_ENC2NAME(ISO_8859_6, 28596), [PG_ISO_8859_7] = DEF_ENC2NAME(ISO_8859_7, 28597), [PG_ISO_8859_8] = DEF_ENC2NAME(ISO_8859_8, 28598), [PG_WIN1250] = DEF_ENC2NAME(WIN1250, 1250), [PG_WIN1253] = DEF_ENC2NAME(WIN1253, 1253), [PG_WIN1254] = DEF_ENC2NAME(WIN1254, 1254), [PG_WIN1255] = DEF_ENC2NAME(WIN1255, 1255), [PG_WIN1257] = DEF_ENC2NAME(WIN1257, 1257), [PG_KOI8U] = DEF_ENC2NAME(KOI8U, 21866), [PG_SJIS] = DEF_ENC2NAME(SJIS, 932), [PG_BIG5] = DEF_ENC2NAME(BIG5, 950), [PG_GBK] = DEF_ENC2NAME(GBK, 936), [PG_UHC] = DEF_ENC2NAME(UHC, 949), [PG_GB18030] = DEF_ENC2NAME(GB18030, 54936), [PG_JOHAB] = DEF_ENC2NAME(JOHAB, 0), [PG_SHIFT_JIS_2004] = DEF_ENC2NAME(SHIFT_JIS_2004, 932), }; /* ---------- * These are encoding names for gettext. * * This covers all encodings except MULE_INTERNAL, which is alien to gettext. * ---------- */ const char *pg_enc2gettext_tbl[] = { [PG_SQL_ASCII] = "US-ASCII", [PG_UTF8] = "UTF-8", [PG_MULE_INTERNAL] = NULL, [PG_LATIN1] = "LATIN1", [PG_LATIN2] = "LATIN2", [PG_LATIN3] = "LATIN3", [PG_LATIN4] = "LATIN4", [PG_ISO_8859_5] = "ISO-8859-5", [PG_ISO_8859_6] = "ISO_8859-6", [PG_ISO_8859_7] = "ISO-8859-7", [PG_ISO_8859_8] = "ISO-8859-8", [PG_LATIN5] = "LATIN5", [PG_LATIN6] = "LATIN6", [PG_LATIN7] = "LATIN7", [PG_LATIN8] = "LATIN8", [PG_LATIN9] = "LATIN-9", [PG_LATIN10] = "LATIN10", [PG_KOI8R] = "KOI8-R", [PG_KOI8U] = "KOI8-U", [PG_WIN1250] = "CP1250", [PG_WIN1251] = "CP1251", [PG_WIN1252] = "CP1252", [PG_WIN1253] = "CP1253", [PG_WIN1254] = "CP1254", [PG_WIN1255] = "CP1255", [PG_WIN1256] = "CP1256", [PG_WIN1257] = "CP1257", [PG_WIN1258] = "CP1258", [PG_WIN866] = "CP866", [PG_WIN874] = "CP874", [PG_EUC_CN] = "EUC-CN", [PG_EUC_JP] = "EUC-JP", [PG_EUC_KR] = "EUC-KR", [PG_EUC_TW] = "EUC-TW", [PG_EUC_JIS_2004] = "EUC-JP", [PG_SJIS] = "SHIFT-JIS", [PG_BIG5] = "BIG5", [PG_GBK] = "GBK", [PG_UHC] = "UHC", [PG_GB18030] = "GB18030", [PG_JOHAB] = "JOHAB", [PG_SHIFT_JIS_2004] = "SHIFT_JISX0213", }; /* * Table of encoding names for ICU (currently covers backend encodings only) * * Reference: <https://ssl.icu-project.org/icu-bin/convexp> * * NULL entries are not supported by ICU, or their mapping is unclear. */ static const char *const pg_enc2icu_tbl[] = { [PG_SQL_ASCII] = NULL, [PG_EUC_JP] = "EUC-JP", [PG_EUC_CN] = "EUC-CN", [PG_EUC_KR] = "EUC-KR", [PG_EUC_TW] = "EUC-TW", [PG_EUC_JIS_2004] = NULL, [PG_UTF8] = "UTF-8", [PG_MULE_INTERNAL] = NULL, [PG_LATIN1] = "ISO-8859-1", [PG_LATIN2] = "ISO-8859-2", [PG_LATIN3] = "ISO-8859-3", [PG_LATIN4] = "ISO-8859-4", [PG_LATIN5] = "ISO-8859-9", [PG_LATIN6] = "ISO-8859-10", [PG_LATIN7] = "ISO-8859-13", [PG_LATIN8] = "ISO-8859-14", [PG_LATIN9] = "ISO-8859-15", [PG_LATIN10] = NULL, [PG_WIN1256] = "CP1256", [PG_WIN1258] = "CP1258", [PG_WIN866] = "CP866", [PG_WIN874] = NULL, [PG_KOI8R] = "KOI8-R", [PG_WIN1251] = "CP1251", [PG_WIN1252] = "CP1252", [PG_ISO_8859_5] = "ISO-8859-5", [PG_ISO_8859_6] = "ISO-8859-6", [PG_ISO_8859_7] = "ISO-8859-7", [PG_ISO_8859_8] = "ISO-8859-8", [PG_WIN1250] = "CP1250", [PG_WIN1253] = "CP1253", [PG_WIN1254] = "CP1254", [PG_WIN1255] = "CP1255", [PG_WIN1257] = "CP1257", [PG_KOI8U] = "KOI8-U", }; StaticAssertDecl(lengthof(pg_enc2icu_tbl) == PG_ENCODING_BE_LAST + 1, "pg_enc2icu_tbl incomplete"); /* * Is this encoding supported by ICU? */ bool is_encoding_supported_by_icu(int encoding) { if (!PG_VALID_BE_ENCODING(encoding)) return false; return (pg_enc2icu_tbl[encoding] != NULL); } /* * Returns ICU's name for encoding, or NULL if not supported */ const char * get_encoding_name_for_icu(int encoding) { if (!PG_VALID_BE_ENCODING(encoding)) return NULL; return pg_enc2icu_tbl[encoding]; } /* ---------- * Encoding checks, for error returns -1 else encoding id * ---------- */ int pg_valid_client_encoding(const char *name) { int enc; if ((enc = pg_char_to_encoding(name)) < 0) return -1; if (!PG_VALID_FE_ENCODING(enc)) return -1; return enc; } int pg_valid_server_encoding(const char *name) { int enc; if ((enc = pg_char_to_encoding(name)) < 0) return -1; if (!PG_VALID_BE_ENCODING(enc)) return -1; return enc; } int pg_valid_server_encoding_id(int encoding) { return PG_VALID_BE_ENCODING(encoding); } /* * Remove irrelevant chars from encoding name, store at *newkey * * (Caller's responsibility to provide a large enough buffer) */ static char * clean_encoding_name(const char *key, char *newkey) { const char *p; char *np; for (p = key, np = newkey; *p != '\0'; p++) { if (isalnum((unsigned char) *p)) { if (*p >= 'A' && *p <= 'Z') *np++ = *p + 'a' - 'A'; else *np++ = *p; } } *np = '\0'; return newkey; } /* * Search encoding by encoding name * * Returns encoding ID, or -1 if not recognized */ int pg_char_to_encoding(const char *name) { unsigned int nel = lengthof(pg_encname_tbl); const pg_encname *base = pg_encname_tbl, *last = base + nel - 1, *position; int result; char buff[NAMEDATALEN], *key; if (name == NULL || *name == '\0') return -1; if (strlen(name) >= NAMEDATALEN) return -1; /* it's certainly not in the table */ key = clean_encoding_name(name, buff); while (last >= base) { position = base + ((last - base) >> 1); result = key[0] - position->name[0]; if (result == 0) { result = strcmp(key, position->name); if (result == 0) return position->encoding; } if (result < 0) last = position - 1; else base = position + 1; } return -1; } const char * pg_encoding_to_char(int encoding) { if (PG_VALID_ENCODING(encoding)) { const pg_enc2name *p = &pg_enc2name_tbl[encoding]; Assert(encoding == p->encoding); return p->name; } return ""; }
c
github
https://github.com/postgres/postgres
src/common/encnames.c
/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ /* * Source: * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.9 */ package com.google.common.cache; import com.google.common.annotations.GwtIncompatible; import java.lang.reflect.Field; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.Random; import org.jspecify.annotations.Nullable; import sun.misc.Unsafe; /** * A package-local class holding common representation and mechanics for classes supporting dynamic * striping on 64bit values. The class extends Number so that concrete subclasses must publicly do * so. */ @SuppressWarnings("SunApi") // b/345822163 @GwtIncompatible abstract class Striped64 extends Number { /* * This class maintains a lazily-initialized table of atomically * updated variables, plus an extra "base" field. The table size * is a power of two. Indexing uses masked per-thread hash codes. * Nearly all declarations in this class are package-private, * accessed directly by subclasses. * * Table entries are of class Cell; a variant of AtomicLong padded * to reduce cache contention on most processors. Padding is * overkill for most Atomics because they are usually irregularly * scattered in memory and thus don't interfere much with each * other. But Atomic objects residing in arrays will tend to be * placed adjacent to each other, and so will most often share * cache lines (with a huge negative performance impact) without * this precaution. * * In part because Cells are relatively large, we avoid creating * them until they are needed. When there is no contention, all * updates are made to the base field. Upon first contention (a * failed CAS on base update), the table is initialized to size 2. * The table size is doubled upon further contention until * reaching the nearest power of two greater than or equal to the * number of CPUS. Table slots remain empty (null) until they are * needed. * * A single spinlock ("busy") is used for initializing and * resizing the table, as well as populating slots with new Cells. * There is no need for a blocking lock; when the lock is not * available, threads try other slots (or the base). During these * retries, there is increased contention and reduced locality, * which is still better than alternatives. * * Per-thread hash codes are initialized to random values. * Contention and/or table collisions are indicated by failed * CASes when performing an update operation (see method * retryUpdate). Upon a collision, if the table size is less than * the capacity, it is doubled in size unless some other thread * holds the lock. If a hashed slot is empty, and lock is * available, a new Cell is created. Otherwise, if the slot * exists, a CAS is tried. Retries proceed by "double hashing", * using a secondary hash (Marsaglia XorShift) to try to find a * free slot. * * The table size is capped because, when there are more threads * than CPUs, supposing that each thread were bound to a CPU, * there would exist a perfect hash function mapping threads to * slots that eliminates collisions. When we reach capacity, we * search for this mapping by randomly varying the hash codes of * colliding threads. Because search is random, and collisions * only become known via CAS failures, convergence can be slow, * and because threads are typically not bound to CPUS forever, * may not occur at all. However, despite these limitations, * observed contention rates are typically low in these cases. * * It is possible for a Cell to become unused when threads that * once hashed to it terminate, as well as in the case where * doubling the table causes no thread to hash to it under * expanded mask. We do not try to detect or remove such cells, * under the assumption that for long-running instances, observed * contention levels will recur, so the cells will eventually be * needed again; and for short-lived ones, it does not matter. */ /** * Padded variant of AtomicLong supporting only raw accesses plus CAS. The value field is placed * between pads, hoping that the JVM doesn't reorder them. * * <p>JVM intrinsics note: It would be possible to use a release-only form of CAS here, if it were * provided. */ static final class Cell { volatile long p0, p1, p2, p3, p4, p5, p6; volatile long value; volatile long q0, q1, q2, q3, q4, q5, q6; Cell(long x) { value = x; } final boolean cas(long cmp, long val) { return UNSAFE.compareAndSwapLong(this, VALUE_OFFSET, cmp, val); } // Unsafe mechanics private static final Unsafe UNSAFE; private static final long VALUE_OFFSET; static { try { UNSAFE = getUnsafe(); Class<?> ak = Cell.class; VALUE_OFFSET = UNSAFE.objectFieldOffset(ak.getDeclaredField("value")); } catch (Exception e) { throw new Error(e); } } } /** * ThreadLocal holding a single-slot int array holding hash code. Unlike the JDK8 version of this * class, we use a suboptimal int[] representation to avoid introducing a new type that can impede * class-unloading when ThreadLocals are not removed. */ static final ThreadLocal<int @Nullable []> threadHashCode = new ThreadLocal<>(); /** Generator of new random hash codes */ static final Random rng = new Random(); /** Number of CPUS, to place bound on table size */ static final int NCPU = Runtime.getRuntime().availableProcessors(); /** Table of cells. When non-null, size is a power of 2. */ transient volatile Cell @Nullable [] cells; /** * Base value, used mainly when there is no contention, but also as a fallback during table * initialization races. Updated via CAS. */ transient volatile long base; /** Spinlock (locked via CAS) used when resizing and/or creating Cells. */ transient volatile int busy; /** Package-private default constructor */ Striped64() {} /** CASes the base field. */ final boolean casBase(long cmp, long val) { return UNSAFE.compareAndSwapLong(this, BASE_OFFSET, cmp, val); } /** CASes the busy field from 0 to 1 to acquire lock. */ final boolean casBusy() { return UNSAFE.compareAndSwapInt(this, BUSY_OFFSET, 0, 1); } /** * Computes the function of current and new value. Subclasses should open-code this update * function for most uses, but the virtualized form is needed within retryUpdate. * * @param currentValue the current value (of either base or a cell) * @param newValue the argument from a user update call * @return result of the update function */ abstract long fn(long currentValue, long newValue); /** * Handles cases of updates involving initialization, resizing, creating new Cells, and/or * contention. See above for explanation. This method suffers the usual non-modularity problems of * optimistic retry code, relying on rechecked sets of reads. * * @param x the value * @param hc the hash code holder * @param wasUncontended false if CAS failed before call */ final void retryUpdate(long x, int @Nullable [] hc, boolean wasUncontended) { int h; if (hc == null) { threadHashCode.set(hc = new int[1]); // Initialize randomly int r = rng.nextInt(); // Avoid zero to allow xorShift rehash h = hc[0] = (r == 0) ? 1 : r; } else h = hc[0]; boolean collide = false; // True if last slot nonempty for (; ; ) { Cell[] as; Cell a; int n; long v; if ((as = cells) != null && (n = as.length) > 0) { if ((a = as[(n - 1) & h]) == null) { if (busy == 0) { // Try to attach new Cell Cell r = new Cell(x); // Optimistically create if (busy == 0 && casBusy()) { boolean created = false; try { // Recheck under lock Cell[] rs; int m, j; if ((rs = cells) != null && (m = rs.length) > 0 && rs[j = (m - 1) & h] == null) { rs[j] = r; created = true; } } finally { busy = 0; } if (created) break; continue; // Slot is now non-empty } } collide = false; } else if (!wasUncontended) // CAS already known to fail wasUncontended = true; // Continue after rehash else if (a.cas(v = a.value, fn(v, x))) break; else if (n >= NCPU || cells != as) collide = false; // At max size or stale else if (!collide) collide = true; else if (busy == 0 && casBusy()) { try { if (cells == as) { // Expand table unless stale Cell[] rs = new Cell[n << 1]; for (int i = 0; i < n; ++i) rs[i] = as[i]; cells = rs; } } finally { busy = 0; } collide = false; continue; // Retry with expanded table } h ^= h << 13; // Rehash h ^= h >>> 17; h ^= h << 5; hc[0] = h; // Record index for next time } else if (busy == 0 && cells == as && casBusy()) { boolean init = false; try { // Initialize table if (cells == as) { Cell[] rs = new Cell[2]; rs[h & 1] = new Cell(x); cells = rs; init = true; } } finally { busy = 0; } if (init) break; } else if (casBase(v = base, fn(v, x))) break; // Fall back on using base } } /** Sets base and all cells to the given value. */ final void internalReset(long initialValue) { Cell[] as = cells; base = initialValue; if (as != null) { int n = as.length; for (int i = 0; i < n; ++i) { Cell a = as[i]; if (a != null) a.value = initialValue; } } } // Unsafe mechanics private static final Unsafe UNSAFE; private static final long BASE_OFFSET; private static final long BUSY_OFFSET; static { try { UNSAFE = getUnsafe(); Class<?> sk = Striped64.class; BASE_OFFSET = UNSAFE.objectFieldOffset(sk.getDeclaredField("base")); BUSY_OFFSET = UNSAFE.objectFieldOffset(sk.getDeclaredField("busy")); } catch (Exception e) { throw new Error(e); } } /** * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. Replace with a simple call * to Unsafe.getUnsafe when integrating into a jdk. * * @return a sun.misc.Unsafe */ private static Unsafe getUnsafe() { try { return Unsafe.getUnsafe(); } catch (SecurityException tryReflectionInstead) { } try { return AccessController.doPrivileged( new PrivilegedExceptionAction<Unsafe>() { @Override public Unsafe run() throws Exception { Class<Unsafe> k = Unsafe.class; for (Field f : k.getDeclaredFields()) { f.setAccessible(true); Object x = f.get(null); if (k.isInstance(x)) return k.cast(x); } throw new NoSuchFieldError("the Unsafe"); } }); } catch (PrivilegedActionException e) { throw new RuntimeException("Could not initialize intrinsics", e.getCause()); } } }
java
github
https://github.com/google/guava
android/guava/src/com/google/common/cache/Striped64.java
import numpy def vl_xyz2lab(I,il='E'): # VL_XYZ2LAB Convert XYZ color space to LAB # J = VL_XYZ2LAB(I) converts the image from XYZ format to LAB format. # # VL_XYZ2LAB(I,IL) uses one of the illuminants A, B, C, E, D50, D55, # D65, D75, D93. The default illuminant is E. # # See also:: VL_XYZ2LUV(), VL_HELP(). # AUTORIGHTS # Copyright (C) 2007-10 Andrea Vedaldi and Brian Fulkerson # # This file is part of VLFeat, available under the terms of the # GNU GPLv2, or (at your option) any later version. def f(a): k = 903.3 b=numpy.zeros(a.shape) b[a>0.00856] = a[a>0.00856]**(1/3.) b[a<=0.00856] = (k*a[a<=0.00856] + 16)/116. return b il=il.lower() if il=='a': xw = 0.4476 yw = 0.4074 elif il=='b': xw = 0.3324 yw = 0.3474 elif il=='c': xw = 0.3101 yw = 0.3162 elif il=='e': xw = 1./3 yw = 1./3 elif il=='d50': xw = 0.3457 yw = 0.3585 elif il=='d55': xw = 0.3324 yw = 0.3474 elif il=='d65': xw = 0.312713 yw = 0.329016 elif il=='d75': xw = 0.299 yw = 0.3149 elif il=='d93': xw = 0.2848 yw = 0.2932 J=numpy.zeros(I.shape) # Reference white Yw = 1.0 Xw = xw/yw Zw = (1-xw-yw)/yw * Yw # XYZ components X = I[:,:,0] Y = I[:,:,1] Z = I[:,:,2] x = X/Xw y = Y/Yw z = Z/Zw L = 116 * f(y) - 16 a = 500*(f(x) - f(y)) b = 200*(f(y) - f(z)) J = numpy.rollaxis(numpy.array([L,a,b]),0,3) return J def vl_rgb2xyz(I,workspace="CIE"): #VL_RGB2XYZ Convert RGB color space to XYZ #J=VL_RGB2XYZ(I) converts the CIE RGB image I to the image J in #CIE XYZ format. CIE RGB has a white point of R=G=B=1.0 #VL_RGB2XYZ(I,WS) uses the specified RGB working space WS. The #function supports the following RGB working spaces: #* `CIE' E illuminant, gamma=2.2 #* `Adobe' D65 illuminant, gamma=2.2 #The default workspace is CIE. #See also:: VL_XYZ2RGB(), VL_HELP(). #AUTORIGHTS #Copyright (C) 2007-10 Andrea Vedaldi and Brian Fulkerson #This file is part of VLFeat, available under the terms of the #GNU GPLv2, or (at your option) any later version. M,N,K = I.shape if not K==3: print('I must be a MxNx3 array.') exit(0) #I=im2double(I) ; if workspace=='CIE': #CIE: E illuminant and 2.2 gamma A = numpy.array([ [0.488718, 0.176204, 0.000000], [0.310680, 0.812985, 0.0102048], [0.200602, 0.0108109, 0.989795 ]]).T gamma = 2.2 if workspace=='Adobe': #Adobe 1998: D65 illuminant and 2.2 gamma A = numpy.array([ [0.576700, 0.297361, 0.0270328], [0.185556, 0.627355, 0.0706879], [0.188212, 0.0752847, 0.99124 ]]).T gamma = 2.2 I = (I**gamma).reshape(M*N, K) ; J = numpy.dot(A,I.T) J = J.T.reshape(M, N, K) return J
unknown
codeparrot/codeparrot-clean
import re from django import forms from django.db.models import get_model, Q from django.utils.translation import ugettext_lazy as _ Product = get_model('catalogue', 'Product') Range = get_model('offer', 'Range') class RangeForm(forms.ModelForm): class Meta: model = Range exclude = ('included_products', 'excluded_products', 'classes', 'proxy_class') class RangeProductForm(forms.Form): query = forms.CharField(max_length=1024, label=_("Product SKUs or UPCs"), widget=forms.Textarea, required=False, help_text=_("You can paste in a selection of SKUs or UPCs")) file_upload = forms.FileField( label=_("File of SKUs or UPCs"), required=False, max_length=255, help_text=_('Either comma-separated, or one identifier per line')) def __init__(self, range, *args, **kwargs): self.range = range super(RangeProductForm, self).__init__(*args, **kwargs) def clean(self): clean_data = super(RangeProductForm, self).clean() if not clean_data.get('query') and not clean_data.get('file_upload'): raise forms.ValidationError(_("You must submit either a list of SKU/UPCs or a file")) return clean_data def clean_query(self): raw = self.cleaned_data['query'] if not raw: return raw # Check that the search matches some products ids = set(re.compile(r'[\w-]+').findall(raw)) products = self.range.included_products.all() existing_skus = set(products.values_list('stockrecord__partner_sku', flat=True)) existing_upcs = set(products.values_list('upc', flat=True)) existing_ids = existing_skus.union(existing_upcs) new_ids = ids - existing_ids if len(new_ids) == 0: raise forms.ValidationError( _("The products with SKUs or UPCs matching %s are already in this range") % ( ', '.join(ids))) self.products = Product._default_manager.filter( Q(stockrecord__partner_sku__in=new_ids) | Q(upc__in=new_ids)) if len(self.products) == 0: raise forms.ValidationError(_("No products exist with a SKU or UPC matching %s") % ", ".join(ids)) found_skus = set(self.products.values_list('stockrecord__partner_sku', flat=True)) found_upcs = set(self.products.values_list('upc', flat=True)) found_ids = found_skus.union(found_upcs) self.missing_skus = new_ids - found_ids self.duplicate_skus = existing_ids.intersection(ids) return raw def get_products(self): return self.products if hasattr(self, 'products') else [] def get_missing_skus(self): return self.missing_skus def get_duplicate_skus(self): return self.duplicate_skus
unknown
codeparrot/codeparrot-clean
// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "fmt" "strings" ) // exprParser is a //go:build expression parser and evaluator. // The parser is a trivial precedence-based parser which is still // almost overkill for these very simple expressions. type exprParser struct { x string t exprToken // upcoming token } // val is the value type result of parsing. // We don't keep a parse tree, just the value of the expression. type val bool // exprToken describes a single token in the input. // Prefix operators define a prefix func that parses the // upcoming value. Binary operators define an infix func // that combines two values according to the operator. // In that case, the parsing loop parses the two values. type exprToken struct { tok string prec int prefix func(*exprParser) val infix func(val, val) val } var exprTokens []exprToken func init() { // init to break init cycle exprTokens = []exprToken{ {tok: "&&", prec: 1, infix: func(x, y val) val { return x && y }}, {tok: "||", prec: 2, infix: func(x, y val) val { return x || y }}, {tok: "!", prec: 3, prefix: (*exprParser).not}, {tok: "(", prec: 3, prefix: (*exprParser).paren}, {tok: ")"}, } } // matchexpr parses and evaluates the //go:build expression x. func matchexpr(x string) (matched bool, err error) { defer func() { if e := recover(); e != nil { matched = false err = fmt.Errorf("parsing //go:build line: %v", e) } }() p := &exprParser{x: x} p.next() v := p.parse(0) if p.t.tok != "end of expression" { panic("unexpected " + p.t.tok) } return bool(v), nil } // parse parses an expression, including binary operators at precedence >= prec. func (p *exprParser) parse(prec int) val { if p.t.prefix == nil { panic("unexpected " + p.t.tok) } v := p.t.prefix(p) for p.t.prec >= prec && p.t.infix != nil { t := p.t p.next() v = t.infix(v, p.parse(t.prec+1)) } return v } // not is the prefix parser for a ! token. func (p *exprParser) not() val { p.next() return !p.parse(100) } // paren is the prefix parser for a ( token. func (p *exprParser) paren() val { p.next() v := p.parse(0) if p.t.tok != ")" { panic("missing )") } p.next() return v } // next advances the parser to the next token, // leaving the token in p.t. func (p *exprParser) next() { p.x = strings.TrimSpace(p.x) if p.x == "" { p.t = exprToken{tok: "end of expression"} return } for _, t := range exprTokens { if strings.HasPrefix(p.x, t.tok) { p.x = p.x[len(t.tok):] p.t = t return } } i := 0 for i < len(p.x) && validtag(p.x[i]) { i++ } if i == 0 { panic(fmt.Sprintf("syntax error near %#q", rune(p.x[i]))) } tag := p.x[:i] p.x = p.x[i:] p.t = exprToken{ tok: "tag", prefix: func(p *exprParser) val { p.next() return val(matchtag(tag)) }, } } func validtag(c byte) bool { return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '.' || c == '_' }
go
github
https://github.com/golang/go
src/cmd/dist/buildtag.go
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies two actions can be attached to the same input files. """ import sys import TestGyp test = TestGyp.TestGyp() test.run_gyp('actions.gyp', chdir='src') test.relocate('src', 'relocate/src') # Test of fine-grained dependencies for generators that can build individual # files on demand. # In particular: # - TargetA depends on TargetB. # - TargetA and TargetB are 'none' type with actions attached. # - TargetA has multiple actions. # - An output from one of the actions in TargetA (not the first listed), # is requested as the build target. # Ensure that TargetB gets built. # # This sub-test can only be done with generators/build tools that can # be asked to build individual files rather than whole targets (make, ninja). if test.format in ['make', 'ninja']: # Select location of target based on generator. if test.format == 'make': target = 'multi2.txt' elif test.format == 'ninja': if sys.platform in ['win32', 'cygwin']: target = '..\\..\\multi2.txt' else: target = '../../multi2.txt' else: assert False test.build('actions.gyp', chdir='relocate/src', target=target) test.must_contain('relocate/src/multi2.txt', 'hello there') test.must_contain('relocate/src/multi_dep.txt', 'hello there') # Test that two actions can be attached to the same inputs. test.build('actions.gyp', test.ALL, chdir='relocate/src') test.must_contain('relocate/src/output1.txt', 'hello there') test.must_contain('relocate/src/output2.txt', 'hello there') test.must_contain('relocate/src/output3.txt', 'hello there') test.must_contain('relocate/src/output4.txt', 'hello there') # Test that process_outputs_as_sources works in conjuction with merged # actions. test.run_built_executable( 'multiple_action_source_filter', chdir='relocate/src', stdout=( '{\n' 'bar\n' 'car\n' 'dar\n' 'ear\n' '}\n' ), ) test.pass_test()
unknown
codeparrot/codeparrot-clean
import os import re from setuptools import setup def rel(*parts): '''returns the relative path to a file wrt to the current directory''' return os.path.abspath(os.path.join(os.path.dirname(__file__), *parts)) README = open('README.md', 'r').read() with open(rel('webpack_loader', '__init__.py')) as handler: INIT_PY = handler.read() VERSION = re.findall("__version__ = '([^']+)'", INIT_PY)[0] setup( name = 'django-webpack-loader', packages = ['webpack_loader', 'webpack_loader/templatetags', 'webpack_loader/contrib'], version = VERSION, description = 'Transparently use webpack with django', long_description=README, long_description_content_type="text/markdown", author = 'Owais Lone', author_email = 'hello@owaislone.org', download_url = 'https://github.com/django-webpack/django-webpack-loader/tarball/{0}'.format(VERSION), url = 'https://github.com/django-webpack/django-webpack-loader', # use the URL to the github repo keywords = ['django', 'webpack', 'assets'], # arbitrary keywords classifiers = [ 'Programming Language :: Python', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Framework :: Django', 'Framework :: Django :: 2.0', 'Framework :: Django :: 2.1', 'Framework :: Django :: 2.2', 'Framework :: Django :: 3.0', 'Framework :: Django :: 3.1', 'Framework :: Django :: 3.2', 'Environment :: Web Environment', 'License :: OSI Approved :: MIT License', ], )
unknown
codeparrot/codeparrot-clean
--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-apis.html - https://www.elastic.co/guide/en/serverless/current/elasticsearch-differences.html applies_to: stack: ga serverless: ga --- # REST APIs Elasticsearch exposes REST APIs that are used by the UI components and can be called directly to configure and access Elasticsearch features. For API reference information, go to [{{es}} API]({{es-apis}}) and [{{es-serverless}} API]({{es-serverless-apis}}). This section includes: - [API conventions](/reference/elasticsearch/rest-apis/api-conventions.md) - [Common options](/reference/elasticsearch/rest-apis/common-options.md) - [Compatibility](/reference/elasticsearch/rest-apis/compatibility.md) - [Examples](/reference/elasticsearch/rest-apis/api-examples.md) ## API endpoints ### [Behavioral analytics](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-analytics) ```{applies_to} stack: deprecated ``` The behavioral analytics APIs enable you to create and manage analytics collections and retrieve information about analytics collections. Behavioral Analytics is an analytics event collection platform. You can use it to analyze your users' searching and clicking behavior. Leverage this information to improve the relevance of your search results and identify gaps in your content. | API | Description | | --- | ----------- | | [Get Collections](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics) | Lists all behavioral analytics collections. | | [Create Collection](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics) | Creates a new behavioral analytics collection. | | [Delete Collection](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics) | Deletes a behavioral analytics collection. | | [Create Event](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event) | Sends a behavioral analytics event to a collection. | ### [Compact and aligned text (CAT)](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat) The compact and aligned text (CAT) APIs return human-readable text as a response, instead of a JSON object. The CAT APIs aim are intended only for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, it's recommend to use a corresponding JSON API. | API | Description | | --- | ----------- | | [Get aliases](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases) | Returns index aliases. | | [Get allocation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation) | Provides a snapshot of shard allocation across nodes. | | [Get component templates](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates) | Returns information about component templates. | | [Get count](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count) | Returns document count for specified indices. | | [Get fielddata](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata) | Shows fielddata memory usage by field. | | [Get health](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health) | Returns cluster health status. | | [Get help](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-help) | Shows help for CAT APIs. | | [Get index information](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices) | Returns index statistics. | | [Get master](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master) | Returns information about the elected master node. | | [Get ml data frame analytics](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics) | Returns data frame analytics jobs. | | [Get ml datafeeds](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds) | Returns information about datafeeds. | | [Get ml jobs](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs) | Returns anomaly detection jobs. | | [Get ml trained models](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models) | Returns trained machine learning models. | | [Get nodeattrs](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs) | Returns custom node attributes. | | [Get node information](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes) | Returns cluster node info and statistics. | | [Get pending tasks](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks) | Returns cluster pending tasks. | | [Get plugins](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins) | Returns information about installed plugins. | | [Get recovery](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery) | Returns shard recovery information. | | [Get repositories](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories) | Returns snapshot repository information. | | [Get segments](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments) | Returns low-level segment information. | | [Get shard information](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards) | Returns shard allocation across nodes. | | [Get snapshots](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots) | Returns snapshot information. | | [Get tasks](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks) | Returns information about running tasks. | | [Get templates](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates) | Returns index template information. | | [Get thread pool](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool) | Returns thread pool statistics. | | [Get transforms](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms) | Returns transform information. | ### [Cluster](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster) The cluster APIs enable you to retrieve information about your infrastructure on cluster, node, or shard level. You can manage cluster settings and voting configuration exceptions, collect node statistics and retrieve node information. | API | Description | | --- | ----------- | | [Get cluster health](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health) | Returns health status of the cluster. | | [Get cluster info](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info) | Returns basic information about the cluster. | | [Reroute cluster](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute) | Manually reassigns shard allocations. | | [Get cluster state](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state) | Retrieves the current cluster state. | | [Explain shard allocation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) | Get explanations for shard allocations in the cluster. | | [Update cluster settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) | Updates persistent or transient cluster settings. | | [Get cluster stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats) | Returns cluster-wide statistics, including node, index, and shard metrics. | | [Get cluster pending tasks](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks) | Lists cluster-level tasks that are pending execution. | | [Get cluster settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings) | Retrieves the current cluster-wide settings, including persistent and transient settings. | | [Get cluster remote info](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info) | Returns information about configured remote clusters for cross-cluster search and replication. | | [Update cluster voting config exclusions](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions) | Update the cluster voting config exclusions by node IDs or node names. | | [Delete voting config exclusions](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-delete-voting-config-exclusions) | Clears voting configuration exclusions, allowing previously excluded nodes to participate in master elections. | | [Exclude nodes from voting](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions) | Excludes nodes from voting in master elections. | | [Clear voting config exclusions](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-delete-voting-config-exclusions) | Clears voting config exclusions. | ### [Cluster - Health](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-health_report) The cluster - health API provides you a report with the health status of an Elasticsearch cluster. | API | Description | | --- | ----------- | | [Get cluster health report](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report) | Returns health status of the cluster, including index-level details. | ### [Connector](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-connector) The connector and sync jobs APIs provide a convenient way to create and manage Elastic connectors and sync jobs in an internal index. | API | Description | | --- | ----------- | | [Get connector](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get) | Retrieves a connector configuration. | | [Put connector](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put) | Creates or updates a connector configuration. | | [Delete connector](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete) | Deletes a connector configuration. | | [Start connector sync job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post) | Starts a sync job for a connector. | | [Get connector sync job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get) | Retrieves sync job details for a connector. | | [Get all connectors](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list) | Retrieves a list of all connector configurations. | | [Get all connector sync jobs](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list) | Retrieves a list of all connector sync jobs. | | [Delete connector sync job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete) | Deletes a connector sync job. | The connector and sync jobs APIs provide a convenient way to create and manage Elastic connectors and sync jobs in an internal index. | API | Description | | --- | ----------- | | [Get connector](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get) | Retrieves a connector configuration. | | [Put connector](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put) | Creates or updates a connector configuration. | | [Delete connector](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete) | Deletes a connector configuration. | | [Start connector sync job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post) | Starts a sync job for a connector. | | [Get connector sync job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get) | Retrieves sync job details for a connector. | ### [Cross-cluster replication (CCR)](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ccr) The cross-cluster replication (CCR) APIs enable you to run cross-cluster replication operations, such as creating and managing follower indices or auto-follow patterns. With CCR, you can replicate indices across clusters to continue handling search requests in the event of a datacenter outage, prevent search volume from impacting indexing throughput, and reduce search latency by processing search requests in geo-proximity to the user. | API | Description | | --- | ----------- | | [Create or update auto-follow pattern](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern) | Creates or updates an auto-follow pattern. | | [Delete auto-follow pattern](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern) | Deletes an auto-follow pattern. | | [Get auto-follow pattern](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern) | Retrieves auto-follow pattern configuration. | | [Pause auto-follow pattern](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern) | Pauses an auto-follow pattern. | | [Resume auto-follow pattern](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern) | Resumes a paused auto-follow pattern. | | [Forget follower](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower) | Removes follower retention leases from leader index. | | [Create follower](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow) | Creates a follower index. | | [Get follower](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info) | Retrieves information about follower indices. | | [Get follower stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats) | Retrieves stats about follower indices. | | [Pause follower](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow) | Pauses replication of a follower index. | | [Resume follower](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow) | Resumes replication of a paused follower index. | | [Unfollow index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow) | Converts a follower index into a regular index. | | [Get CCR stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats) | Retrieves overall CCR statistics for the cluster. | ### [Data stream](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-data-stream) The data stream APIs enable you to create and manage data streams and data stream lifecycles. A data stream lets you store append-only time series data across multiple indices while giving you a single named resource for requests. Data streams are well-suited for logs, events, metrics, and other continuously generated data. | API | Description | | --- | ----------- | | [Create data stream](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream) | Creates a new data stream. | | [Delete data stream](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream) | Deletes an existing data stream. | | [Get data stream](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream) | Retrieves one or more data streams. | | [Modify data stream](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream) | Updates the backing index configuration for a data stream. | | [Promote data stream write index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream) | Promotes a backing index to be the write index. | | [Data streams stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats) | Returns statistics about data streams. | | [Migrate to data stream](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream) | Migrates an index or indices to a data stream. | ### [Document](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-document) The document APIs enable you to create and manage documents in an {{es}} index. | API | Description | | --- | ----------- | | [Index document](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) | Indexes a document into a specific index. | | [Get document](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) | Retrieves a document by ID. | | [Delete document](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete) | Deletes a document by ID. | | [Update document](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update) | Updates a document using a script or partial doc. | | [Bulk](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) | Performs multiple indexing or delete operations in a single API call. | | [Multi-get document](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget) | Retrieves multiple documents by ID in one request. | | [Update documents by query](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) | Updates documents that match a query. | | [Delete documents by query](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query) | Deletes documents that match a query. | | [Get term vectors](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors) | Retrieves term vectors for a document. | | [Multi-termvectors](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors) | Retrieves term vectors for multiple documents. | | [Reindex](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) | Copies documents from one index to another. | | [Reindex Rethrottle](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex-rethrottle) | Changes the throttle for a running reindex task. | | [Explain](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain) | Explains how a document matches (or doesn't match) a query. | | [Get source](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-source) | Retrieves the source of a document by ID. | | [Exists](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-exists) | Checks if a document exists by ID. | ### [Enrich](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-enrich) The enrich APIs enable you to manage enrich policies. An enrich policy is a set of configuration options used to add the right enrich data to the right incoming documents. | API | Description | | --- | ----------- | | [Create or update enrich policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy) | Creates or updates an enrich policy. | | [Get enrich policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy) | Retrieves enrich policy definitions. | | [Delete enrich policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy) | Deletes an enrich policy. | | [Execute enrich policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy) | Executes an enrich policy to create an enrich index. | | [Get enrich stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats) | Returns enrich coordinator and policy execution statistics. | ### [EQL](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-eql) The EQL APIs enable you to run EQL-related operations. Event Query Language (EQL) is a query language for event-based time series data, such as logs, metrics, and traces. | API | Description | | --- | ----------- | | [Submit EQL search](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search) | Runs an EQL search. | | [Get EQL search status](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get) | Retrieves the status of an asynchronous EQL search. | | [Get EQL search results](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get) | Retrieves results of an asynchronous EQL search. | | [Delete EQL search](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete) | Cancels an asynchronous EQL search. | ### [ES|QL](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-esql) The ES|QL APIs enable you to run ES|QL-related operations. The Elasticsearch Query Language (ES|QL) provides a powerful way to filter, transform, and analyze data stored in Elasticsearch, and in the future in other runtimes. | API | Description | | --- | ----------- | | [ES|QL Query](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-query) | Executes an ES|QL query using a SQL-like syntax. | | [ES|QL Async Submit](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query) | Submits an ES|QL query to run asynchronously. | | [ES|QL Async Get](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get) | Retrieves results of an asynchronous ES|QL query. | | [ES|QL Async Delete](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete) | Cancels an asynchronous ES|QL query. | ### [Features](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-features) The feature APIs enable you to introspect and manage features provided by {{es}} and {{es}} plugins. | API | Description | | --- | ----------- | | [Get Features](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features) | Lists all available features in the cluster. | | [Reset Features](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features) | Resets internal state for system features. | ### [Fleet](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-fleet) The Fleet APIs support Fleet’s use of Elasticsearch as a data store for internal agent and action data. | API | Description | | --- | ----------- | | [Run Multiple Fleet Searches](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch) | Runs several Fleet searches with a single API request. | | [Run a Fleet Search](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search) | Runs a Fleet search. | | [Get global checkpoints](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-global-checkpoints) | Get the current global checkpoints for an index. | ### [Graph explore](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-graph) The graph explore APIs enable you to extract and summarize information about the documents and terms in an {{es}} data stream or index. | API | Description | | --- | ----------- | | [Graph Explore](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-graph-explore) | Discovers relationships between indexed terms using relevance-based graph exploration. | ### [Index](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-indices) The index APIs enable you to manage individual indices, index settings, aliases, mappings, and index templates. | API | Description | | --- | ----------- | | [Create index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create) | Creates a new index with optional settings and mappings. | | [Delete index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete) | Deletes an existing index. | | [Get index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get) | Retrieves information about one or more indices. | | [Open index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open) | Opens a closed index to make it available for operations. | | [Close index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close) | Closes an index to free up resources. | | [Shrink index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink) | Shrinks an existing index into a new index with fewer primary shards. | | [Split index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split) | Splits an existing index into a new index with more primary shards. | | [Clone index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone) | Clones an existing index into a new index. | | [Check alias](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias) | Manages index aliases. | | [Update field mappings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) | Updates index mappings. | | [Get field mappings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) | Retrieves index mappings. | | [Get index settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings) | Retrieves settings for one or more indices. | | [Update index settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) | Updates index-level settings dynamically. | | [Get index templates](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template) | Retrieves legacy index templates. | | [Put index template](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template) | Creates or updates a legacy index template. | | [Delete index template](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template) | Deletes a legacy index template. | | [Get composable index templates](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template) | Retrieves composable index templates. | | [Put composable index template](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template) | Creates or updates a composable index template. | | [Delete composable index template](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template) | Deletes a composable index template. | | [Get index alias](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias) | Retrieves index aliases. | | [Delete index alias](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias) | Deletes index aliases. | | [Refresh index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh) | Refreshes one or more indices, making recent changes searchable. | | [Flush index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush) | Performs a flush operation on one or more indices. | | [Clear index cache](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache) | Clears caches associated with one or more indices. | | [Force merge index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge) | Merges index segments to reduce their number and improve performance. | | [Rollover index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover) | Rolls over an alias to a new index when conditions are met. | | [Resolve index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index) | Resolves expressions to index names, aliases, and data streams. | | [Simulate index template](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template) | Simulates the application of a composable index template. | | [Simulate template](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template) | Simulates the application of a legacy index template. | | [Get mapping](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) | Retrieves mapping definitions for one or more indices. | | [Put mapping](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) | Updates mapping definitions for one or more indices. | | [Reload search analyzers](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers) | Reloads search analyzers for one or more indices. | | [Shrink index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink) | Shrinks an existing index into a new index with fewer primary shards. | | [Split index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split) | Splits an existing index into a new index with more primary shards. | | [Clone index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone) | Clones an existing index into a new index. | ### [Index lifecycle management](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-ilm) The index lifecycle management APIs enable you to set up policies to automatically manage the index lifecycle. | API | Description | | --- | ----------- | | [Put Lifecycle Policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle) | Creates or updates an ILM policy. | | [Get Lifecycle Policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle) | Retrieves one or more ILM policies. | | [Delete Lifecycle Policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle) | Deletes an ILM policy. | | [Explain Lifecycle](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle) | Shows the current lifecycle step for indices. | | [Move to Step](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step) | Manually moves an index to the next step in its lifecycle. | | [Retry Lifecycle Step](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry) | Retries the current lifecycle step for failed indices. | | [Start ILM](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start) | Starts the ILM plugin. | | [Stop ILM](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop) | Stops the ILM plugin. | | [Get ILM Status](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status) | Returns the status of the ILM plugin. | ### [Inference](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-inference) The inference APIs enable you to create inference endpoints and integrate with machine learning models of different services - such as Amazon Bedrock, Anthropic, Azure AI Studio, Cohere, Google AI, Groq, Mistral, OpenAI, or HuggingFace. | API | Description | | --- | ----------- | | [Put Inference Endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put) | Creates an inference endpoint. | | [Get Inference Endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get) | Retrieves one or more inference endpoints. | | [Delete Inference Endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete) | Deletes an inference endpoint. | | [Infer](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) | Runs inference using a deployed model. | ### [Info](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-info) The info API provides basic build, version, and cluster information. | API | Description | | --- | ----------- | | [Get cluster information](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info) | Returns basic information about the cluster. | ### [Ingest](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-ingest) The ingest APIs enable you to manage tasks and resources related to ingest pipelines and processors. | API | Description | | --- | ----------- | | [Create or update pipeline](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) | Creates or updates an ingest pipeline. | | [Get pipeline](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline) | Retrieves one or more ingest pipelines. | | [Delete pipeline](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline) | Deletes an ingest pipeline. | | [Simulate pipeline](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate) | Simulates a document through an ingest pipeline. | | [Get built-in grok patterns](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-processor-grok) | Returns a list of built-in grok patterns. | ### [Licensing](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-license) The licensing APIs enable you to manage your licenses. | API | Description | | --- | ----------- | | [Get license](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get) | Retrieves the current license for the cluster. | | [Update license](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post) | Updates the license for the cluster. | | [Delete license](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete) | Removes the current license. | | [Start basic license](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic) | Starts a basic license. | | [Start trial license](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial) | Starts a trial license. | | [Get the trial status](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status) | Returns the status of the current trial license. | ### [Logstash](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-logstash) The logstash APIs enable you to manage pipelines that are used by Logstash Central Management. | API | Description | | --- | ----------- | | [Create or update Logstash pipeline](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline) | Creates or updates a Logstash pipeline. | | [Get Logstash pipeline](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline) | Retrieves one or more Logstash pipelines. | | [Delete Logstash pipeline](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline) | Deletes a Logstash pipeline. | ### [Machine learning](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-ml) The machine learning APIs enable you to retrieve information related to the {{stack}} {{ml}} features. | API | Description | | --- | ----------- | | [Get machine learning memory stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats) | Gets information about how machine learning jobs and trained models are using memory. | | [Get machine learning info](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info) | Gets defaults and limits used by machine learning. | | [Set upgrade mode](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode) | Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. | | [Get ML job stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats) | Retrieves usage statistics for ML jobs. | | [Get ML calendar events](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events) | Retrieves scheduled events for ML calendars. | | [Get ML filters](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters) | Retrieves ML filters. | | [Put ML filter](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter) | Creates or updates an ML filter. | | [Delete ML filter](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter) | Deletes an ML filter. | | [Get ML info](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info) | Gets overall ML info. | | [Get ML model snapshots](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots) | Retrieves model snapshots for ML jobs. | | [Revert ML model snapshot](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot) | Reverts an ML job to a previous model snapshot. | | [Delete expired ML data](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data) | Deletes expired ML results and model snapshots. | ### [Machine learning anomaly detection](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-ml-anomaly) The machine learning anomaly detection APIs enbale you to perform anomaly detection activities. | API | Description | | --- | ----------- | | [Put Job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job) | Creates an anomaly detection job. | | [Get Job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs) | Retrieves configuration info for anomaly detection jobs. | | [Delete Job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job) | Deletes an anomaly detection job. | | [Open Job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job) | Opens an existing anomaly detection job. | | [Close anomaly detection jobs](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job) | Closes an anomaly detection job. | | [Flush Job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job) | Forces any buffered data to be processed. | | [Forecast Job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast) | Generates forecasts for anomaly detection jobs. | | [Get Buckets](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets) | Retrieves bucket results from a job. | | [Get Records](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records) | Retrieves anomaly records for a job. | | [Get calendar configuration info](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars) | Gets calendar configuration information. | | [Create a calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar) | Create a calendar. | | [Delete a calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar) | Delete a calendar. | | [Delete events from a calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar) | Delete events from a calendar. | | [Add anomaly detection job to calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job) | Add an anomoly detection job to a calendar. | | [Delete anomaly detection jobs from calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job) | Deletes anomoly detection jobs from a calendar. | | [Get datafeeds configuration info](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds) | Get configuration information for a datafeed. | | [Create datafeed](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed) | Creates a datafeed. | | [Delete a datafeed](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed) | Deletes a datafeed. | | [Delete expired ML data](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data) | Delete all job results, model snapshots and forecast data that have exceeded their retention days period. | | [Delete expired ML data](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data) | Delete all job results, model snapshots and forecast data that have exceeded their retention days period. | | [Get filters](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters) | Get a single filter or all filters. | | [Get anomaly detection job results for influencers](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers) | Get anomaly detection job results for entities that contributed to or are to blame for anomalies. | | [Get anomaly detection job stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats) | Get anomaly detection job stats. | | [Get anomaly detection jobs configuration info](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs) | You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. | ### [Machine learning data frame analytics](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-ml-data-frame) The machine learning data frame analytics APIs enbale you to perform data frame analytics activities. | API | Description | | --- | ----------- | | [Create a data frame analytics job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics) | Creates a data frame analytics job. | | [Get data frame analytics job configuration info](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics) | Retrieves configuration and results for analytics jobs. | | [Delete a data frame analytics job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics) | Deletes a data frame analytics job. | | [Start a data frame analytics job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics) | Starts a data frame analytics job. | | [Stop data frame analytics jobs](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics) | Stops a running data frame analytics job. | ### [Machine learning trained models](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-ml-trained-model) The machine learning trained models APIs enable you to perform model management operations. | API | Description | | --- | ----------- | | [Put Trained Model](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model) | Uploads a trained model for inference. | | [Get Trained Models](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models) | Retrieves configuration and stats for trained models. | | [Delete Trained Model](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model) | Deletes a trained model. | | [Start Deployment](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment) | Starts a trained model deployment. | | [Stop Deployment](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment) | Stops a trained model deployment. | | [Get Deployment Stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats) | Retrieves stats for deployed models. | ### [Migration](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-migration) The migration APIs power {{kib}}'s Upgrade Assistant feature. | API | Description | | --- | ----------- | | [Deprecation Info](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations) | Retrieves deprecation warnings for cluster and indices. | | [Get Feature Upgrade Status](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status) | Checks upgrade status of system features. | | [Post Feature Upgrade](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-post-feature-upgrade) | Upgrades internal system features after a version upgrade. | ### [Query rules](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-query_rules) Query rules enable you to configure per-query rules that are applied at query time to queries that match the specific rule. Query rules are organized into rulesets, collections of query rules that are matched against incoming queries. Query rules are applied using the rule query. If a query matches one or more rules in the ruleset, the query is re-written to apply the rules before searching. This allows pinning documents for only queries that match a specific term. | API | Description | | --- | ----------- | | [Create or update query ruleset](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset) | Creates or updates a query ruleset. | | [Get query ruleset](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset) | Retrieves one or more query rulesets. | | [Delete query ruleset](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset) | Deletes a query ruleset. | ### [Rollup](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-rollup) The rollup APIs enable you to create, manage, and retrieve infromation about rollup jobs. | API | Description | | --- | ----------- | | [Create or update rollup job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job) | Creates or updates a rollup job for summarizing historical data. | | [Get rollup jobs](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs) | Retrieves configuration for one or more rollup jobs. | | [Delete rollup job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job) | Deletes a rollup job. | | [Start rollup job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job) | Starts a rollup job. | | [Stop rollup job](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job) | Stops a running rollup job. | | [Get rollup capabilities](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps) | Returns the capabilities of rollup jobs. | | [Search rollup data](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search) | Searches rolled-up data using a rollup index. | ### [Script](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-script) Use the script support APIs to get a list of supported script contexts and languages. Use the stored script APIs to manage stored scripts and search templates. | API | Description | | --- | ----------- | | [Add or update stored script](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script) | Adds or updates a stored script. | | [Get stored script](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script) | Retrieves a stored script. | | [Delete stored script](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script) | Deletes a stored script. | | [Execute Painless script](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scripts-painless-execute) | Executes a script using the Painless language. | | [Get script contexts](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context) | Returns available script execution contexts. | | [Get script languages](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages) | Returns available scripting languages. | ### [Search](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-search) The search APIs enable you to search and aggregate data stored in {{es}} indices and data streams. | API | Description | | --- | ----------- | | [Search](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) | Executes a search query on one or more indices. | | [Multi search](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch) | Executes multiple search requests in a single API call. | | [Search template](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template) | Executes a search using a stored or inline template. | | [Render search template](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template) | Renders a search template with parameters. | | [Explain search](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain) | Explains how a document scores against a query. | | [Get field capabilities](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps) | Returns the capabilities of fields across indices. | | [Scroll search](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll) | Efficiently retrieves large numbers of results (pagination). | | [Clear scroll](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll) | Clears search contexts for scroll requests. | ### [Search application](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-search_application) The search applcation APIs enable you to manage tasks and resources related to Search Applications. | API | Description | | --- | ----------- | | [Create or update search application](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put) | Creates or updates a search application. | | [Get search application](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get) | Retrieves a search application by name. | | [Delete search application](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete) | Deletes a search application. | | [Search search application](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search) | Executes a search using a search application. | ### [Searchable snapshots](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-searchable_snapshots) The searchable snapshots APIs enable you to perform searchable snapshots operations. | API | Description | | --- | ----------- | | [Mount searchable snapshot](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount) | Mounts a snapshot as a searchable index. | | [Clear searchable snapshot cache](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache) | Clears the cache of searchable snapshots. | | [Get searchable snapshot stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats) | Returns stats about searchable snapshots. | ### [Security](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-security) The security APIs enable you to perform security activities, and add, update, retrieve, and remove application privileges, role mappings, and roles. You can also create and update API keys and create and invalidate bearer tokens. | API | Description | | --- | ----------- | | [Create or update user](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user) | Creates or updates a user in the native realm. | | [Get user](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user) | Retrieves one or more users. | | [Delete user](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user) | Deletes a user from the native realm. | | [Create or update role](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role) | Creates or updates a role. | | [Get role](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role) | Retrieves one or more roles. | | [Delete role](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role) | Deletes a role. | | [Create API key](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) | Creates an API key for access without basic auth. | | [Invalidate API key](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key) | Invalidates one or more API keys. | | [Authenticate](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate) | Retrieves information about the authenticated user. | ### [Snapshot and restore](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-snapshot) The snapshot and restore APIs enable you to set up snapshot repositories, manage snapshot backups, and restore snapshots to a running cluster. | API | Description | | --- | ----------- | | [Clean up snapshot repository](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository) | Removes stale data from a repository. | | [Clone snapshot](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone) | Clones indices from a snapshot into a new snapshot. | | [Get snapshot](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get) | Retrieves information about snapshots. | | [Create snapshot](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create) | Creates a snapshot of one or more indices. | | [Delete snapshot](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete) | Deletes a snapshot from a repository. | | [Get snapshot repository](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository) | Retrieves information about snapshot repositories. | | [Create or update snapshot repository](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository) | Registers or updates a snapshot repository. | | [Delete snapshot repository](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository) | Deletes a snapshot repository. | | [Restore snapshot](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore) | Restores a snapshot. | | [Analyze snapshot repository](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze) | Analyzes a snapshot repository for correctness and performance. | | [Verify snapshot repository](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity) | Verifies access to a snapshot repository. | | [Get snapshot status](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status) | Gets the status of a snapshot. | ### [Snapshot lifecycle management](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-slm) The snapshot lifecycle management APIs enable you to set up policies to automatically take snapshots and control how long they are retained. | API | Description | | --- | ----------- | | [Get snapshot lifecycle policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle) | Retrieves one or more snapshot lifecycle policies. | | [Create or update snapshot lifecycle policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle) | Creates or updates a snapshot lifecycle policy. | | [Delete snapshot lifecycle policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle) | Deletes a snapshot lifecycle policy. | | [Execute snapshot lifecycle policy](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle) | Triggers a snapshot lifecycle policy manually. | | [Execute snapshot retention](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention) | Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. | | [Get snapshot lifecycle stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats) | Returns statistics about snapshot lifecycle executions. | | [Get snapshot lifecycle status](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status) | Returns the status of the snapshot lifecycle management feature. | | [Start snapshot lifecycle management](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start) | Starts the snapshot lifecycle management feature. | | [Stop snapshot lifecycle management](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop) | Stops the snapshot lifecycle management feature. | ### [SQL](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-sql) The SQL APIs enable you to run SQL queries on Elasticsearch indices and data streams. | API | Description | | --- | ----------- | | [Clear SQL cursor](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor) | Clears the server-side cursor for an SQL search. | | [Delete async SQL search](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async) | Deletes an async SQL search. | | [Get async SQL search results](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async) | Retrieves results of an async SQL query. | | [Get async SQL search status](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status) | Gets the current status of an async SQL search or a stored synchronous SQL search. | | [SQL query](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query) | Executes an SQL query. | | [Translate SQL](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate) | Translates SQL into Elasticsearch DSL. | ### [Synonyms](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-synonyms) The synonyms management APIs provide a convenient way to define and manage synonyms in an internal system index. Related synonyms can be grouped in a "synonyms set". | API | Description | | --- | ----------- | | [Get synonym set](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym) | Retrieves a synonym set by ID. | | [Create of update synonym set](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym) | Creates or updates a synonym set. | | [Delete synonym set](https://www.elastic.co/docs/api/doc/elasticsearch/endpoint/operation/operation-synonyms-delete-synonym) | Deletes a synonym set. | | [Get synonym rule](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule) | | | [Get synonym sets](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonyms-sets) | Lists all synonym sets. | ### [Task management](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-tasks) The task management APIs enable you to retrieve information about tasks or cancel tasks running in a cluster. | API | Description | | --- | ----------- | | [Cancel a task](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-tasks-cancel) | Cancels a running task. | | [Get task information](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-tasks-get) | | | [Get all tasks](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-tasks-list) | Retrieves information about running tasks. | ### [Text structure](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-text_structure) The text structure APIs enable you to find the structure of a text field in an {{es}} index. | API | Description | | --- | ----------- | | [Find the structure of a text field](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-field-structure) | | | [Find the structure of a text message](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure) | | | [Find the structure of a text file](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure) | Analyzes a text file and returns its structure. | | [Test a Grok pattern](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern) | | ### [Transform](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-transform) The transform APIs enable you to create and manage transforms. | API | Description | | --- | ----------- | | [Get transforms](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform) | Retrieves configuration for one or more transforms. | | [Create a transform](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform) | Creates or updates a transform job. | | [Get transform stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats) | Get usage information for transforms. | | [Preview transform](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform) | Previews the results of a transform job. | | [Reset a transform](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform) | Previews the results of a transform job. | | [Delete transform](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform) | Deletes a transform job. | | [Schedule a transform](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform) | Previews the results of a transform job. | | [Start transform](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform) | Starts a transform job. | | [Stop transform](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform) | Stops a running transform job. | | [Update transform](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform) | Updates certain properties of a transform. | | [Upgrade all transforms](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms) | Updates certain properties of a transform. | ### [Usage](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-xpack) The usage API provides usage information about the installed X-Pack features. | API | Description | | --- | ----------- | | [Get information](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-xpack-info) | Gets information about build details, license status, and a list of features currently available under the installed license. | | [Get usage information](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-xpack-usage) | Get information about the features that are currently enabled and available under the current license. | ### [Watcher](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-watcher) You can use Watcher to watch for changes or anomalies in your data and perform the necessary actions in response. | API | Description | | --- | ----------- | | [Acknowledge a watch](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch) | Acknowledges a watch action. | | [Activate a watch](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch) | Activates a watch. | | [Deactivates a watch](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch) | Deactivates a watch. | | [Get a watch](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch) | Retrieves a watch by ID. | | [Create or update a watch](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch) | Creates or updates a watch. | | [Delete a watch](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch) | Deletes a watch. | | [Run a watch](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch) | Executes a watch manually. | | [Get Watcher index settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings) | Get settings for the Watcher internal index | | [Update Watcher index settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings) | Update settings for the Watcher internal index | | [Query watches](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches) | Get all registered watches in a paginated manner and optionally filter watches by a query. | | [Start the watch service](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start) | Starts the Watcher service. | | [Get Watcher statistics](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats) | Returns statistics about the Watcher service. | | [Stop the watch service](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop) | Stops the Watcher service. |
unknown
github
https://github.com/elastic/elasticsearch
docs/reference/elasticsearch/rest-apis/index.md
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016 # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import absolute_import, division, print_function FILE_ATTRIBUTES = { 'A': 'noatime', 'a': 'append', 'c': 'compressed', 'C': 'nocow', 'd': 'nodump', 'D': 'dirsync', 'e': 'extents', 'E': 'encrypted', 'h': 'blocksize', 'i': 'immutable', 'I': 'indexed', 'j': 'journalled', 'N': 'inline', 's': 'zero', 'S': 'synchronous', 't': 'notail', 'T': 'blockroot', 'u': 'undelete', 'X': 'compressedraw', 'Z': 'compresseddirty', } # Ansible modules can be written in any language. # The functions available here can be used to do many common tasks, # to simplify development of Python modules. import __main__ import atexit import errno import datetime import grp import fcntl import locale import os import pwd import platform import re import select import shlex import shutil import signal import stat import subprocess import sys import tempfile import time import traceback import types from collections import deque from itertools import chain, repeat try: import syslog HAS_SYSLOG = True except ImportError: HAS_SYSLOG = False try: from systemd import journal has_journal = True except ImportError: has_journal = False HAVE_SELINUX = False try: import selinux HAVE_SELINUX = True except ImportError: pass # Python2 & 3 way to get NoneType NoneType = type(None) from ansible.module_utils._text import to_native, to_bytes, to_text from ansible.module_utils.common.text.converters import ( jsonify, container_to_bytes as json_dict_unicode_to_bytes, container_to_text as json_dict_bytes_to_unicode, ) from ansible.module_utils.common.text.formatters import ( lenient_lowercase, bytes_to_human, human_to_bytes, SIZE_RANGES, ) try: from ansible.module_utils.common._json_compat import json except ImportError as e: print('\n{{"msg": "Error: ansible requires the stdlib json: {0}", "failed": true}}'.format(to_native(e))) sys.exit(1) AVAILABLE_HASH_ALGORITHMS = dict() try: import hashlib # python 2.7.9+ and 2.7.0+ for attribute in ('available_algorithms', 'algorithms'): algorithms = getattr(hashlib, attribute, None) if algorithms: break if algorithms is None: # python 2.5+ algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') for algorithm in algorithms: AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm) # we may have been able to import md5 but it could still not be available try: hashlib.md5() except ValueError: AVAILABLE_HASH_ALGORITHMS.pop('md5', None) except Exception: import sha AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha} try: import md5 AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5 except Exception: pass from ansible.module_utils.common._collections_compat import ( KeysView, Mapping, MutableMapping, Sequence, MutableSequence, Set, MutableSet, ) from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.file import ( _PERM_BITS as PERM_BITS, _EXEC_PERM_BITS as EXEC_PERM_BITS, _DEFAULT_PERM as DEFAULT_PERM, is_executable, format_attributes, get_flags_from_attributes, ) from ansible.module_utils.common.sys_info import ( get_distribution, get_distribution_version, get_platform_subclass, ) from ansible.module_utils.pycompat24 import get_exception, literal_eval from ansible.module_utils.common.parameters import ( handle_aliases, list_deprecations, list_no_log_values, PASS_VARS, PASS_BOOLS, ) from ansible.module_utils.six import ( PY2, PY3, b, binary_type, integer_types, iteritems, string_types, text_type, ) from ansible.module_utils.six.moves import map, reduce, shlex_quote from ansible.module_utils.common.validation import ( check_missing_parameters, check_mutually_exclusive, check_required_arguments, check_required_by, check_required_if, check_required_one_of, check_required_together, count_terms, check_type_bool, check_type_bits, check_type_bytes, check_type_float, check_type_int, check_type_jsonarg, check_type_list, check_type_dict, check_type_path, check_type_raw, check_type_str, safe_eval, ) from ansible.module_utils.common._utils import get_all_subclasses as _get_all_subclasses from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean # Note: When getting Sequence from collections, it matches with strings. If # this matters, make sure to check for strings before checking for sequencetype SEQUENCETYPE = frozenset, KeysView, Sequence PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I) imap = map try: # Python 2 unicode except NameError: # Python 3 unicode = text_type try: # Python 2 basestring except NameError: # Python 3 basestring = string_types _literal_eval = literal_eval # End of deprecated names # Internal global holding passed in params. This is consulted in case # multiple AnsibleModules are created. Otherwise each AnsibleModule would # attempt to read from stdin. Other code should not use this directly as it # is an internal implementation detail _ANSIBLE_ARGS = None FILE_COMMON_ARGUMENTS = dict( # These are things we want. About setting metadata (mode, ownership, permissions in general) on # created files (these are used by set_fs_attributes_if_different and included in # load_file_common_arguments) mode=dict(type='raw'), owner=dict(), group=dict(), seuser=dict(), serole=dict(), selevel=dict(), setype=dict(), attributes=dict(aliases=['attr']), # The following are not about perms and should not be in a rewritten file_common_args src=dict(), # Maybe dest or path would be appropriate but src is not follow=dict(type='bool', default=False), # Maybe follow is appropriate because it determines whether to follow symlinks for permission purposes too force=dict(type='bool'), # not taken by the file module, but other action plugins call the file module so this ignores # them for now. In the future, the caller should take care of removing these from the module # arguments before calling the file module. content=dict(no_log=True), # used by copy backup=dict(), # Used by a few modules to create a remote backup before updating the file remote_src=dict(), # used by assemble regexp=dict(), # used by assemble delimiter=dict(), # used by assemble directory_mode=dict(), # used by copy unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move ) PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?') # Used for parsing symbolic file perms MODE_OPERATOR_RE = re.compile(r'[+=-]') USERS_RE = re.compile(r'[^ugo]') PERMS_RE = re.compile(r'[^rwxXstugo]') # Used for determining if the system is running a new enough python version # and should only restrict on our documented minimum versions _PY3_MIN = sys.version_info[:2] >= (3, 5) _PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,) _PY_MIN = _PY3_MIN or _PY2_MIN if not _PY_MIN: print( '\n{"failed": true, ' '"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines()) ) sys.exit(1) # # Deprecated functions # def get_platform(): ''' **Deprecated** Use :py:func:`platform.system` directly. :returns: Name of the platform the module is running on in a native string Returns a native string that labels the platform ("Linux", "Solaris", etc). Currently, this is the result of calling :py:func:`platform.system`. ''' return platform.system() # End deprecated functions # # Compat shims # def load_platform_subclass(cls, *args, **kwargs): """**Deprecated**: Use ansible.module_utils.common.sys_info.get_platform_subclass instead""" platform_cls = get_platform_subclass(cls) return super(cls, platform_cls).__new__(platform_cls) def get_all_subclasses(cls): """**Deprecated**: Use ansible.module_utils.common._utils.get_all_subclasses instead""" return list(_get_all_subclasses(cls)) # End compat shims def _remove_values_conditions(value, no_log_strings, deferred_removals): """ Helper function for :meth:`remove_values`. :arg value: The value to check for strings that need to be stripped :arg no_log_strings: set of strings which must be stripped out of any values :arg deferred_removals: List which holds information about nested containers that have to be iterated for removals. It is passed into this function so that more entries can be added to it if value is a container type. The format of each entry is a 2-tuple where the first element is the ``value`` parameter and the second value is a new container to copy the elements of ``value`` into once iterated. :returns: if ``value`` is a scalar, returns ``value`` with two exceptions: 1. :class:`~datetime.datetime` objects which are changed into a string representation. 2. objects which are in no_log_strings are replaced with a placeholder so that no sensitive data is leaked. If ``value`` is a container type, returns a new empty container. ``deferred_removals`` is added to as a side-effect of this function. .. warning:: It is up to the caller to make sure the order in which value is passed in is correct. For instance, higher level containers need to be passed in before lower level containers. For example, given ``{'level1': {'level2': 'level3': [True]} }`` first pass in the dictionary for ``level1``, then the dict for ``level2``, and finally the list for ``level3``. """ if isinstance(value, (text_type, binary_type)): # Need native str type native_str_value = value if isinstance(value, text_type): value_is_text = True if PY2: native_str_value = to_bytes(value, errors='surrogate_or_strict') elif isinstance(value, binary_type): value_is_text = False if PY3: native_str_value = to_text(value, errors='surrogate_or_strict') if native_str_value in no_log_strings: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' for omit_me in no_log_strings: native_str_value = native_str_value.replace(omit_me, '*' * 8) if value_is_text and isinstance(native_str_value, binary_type): value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace') elif not value_is_text and isinstance(native_str_value, text_type): value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace') else: value = native_str_value elif isinstance(value, Sequence): if isinstance(value, MutableSequence): new_value = type(value)() else: new_value = [] # Need a mutable value deferred_removals.append((value, new_value)) value = new_value elif isinstance(value, Set): if isinstance(value, MutableSet): new_value = type(value)() else: new_value = set() # Need a mutable value deferred_removals.append((value, new_value)) value = new_value elif isinstance(value, Mapping): if isinstance(value, MutableMapping): new_value = type(value)() else: new_value = {} # Need a mutable value deferred_removals.append((value, new_value)) value = new_value elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))): stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict') if stringy_value in no_log_strings: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' for omit_me in no_log_strings: if omit_me in stringy_value: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' elif isinstance(value, datetime.datetime): value = value.isoformat() else: raise TypeError('Value of unknown type: %s, %s' % (type(value), value)) return value def remove_values(value, no_log_strings): """ Remove strings in no_log_strings from value. If value is a container type, then remove a lot more""" deferred_removals = deque() no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings] new_value = _remove_values_conditions(value, no_log_strings, deferred_removals) while deferred_removals: old_data, new_data = deferred_removals.popleft() if isinstance(new_data, Mapping): for old_key, old_elem in old_data.items(): new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals) new_data[old_key] = new_elem else: for elem in old_data: new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals) if isinstance(new_data, MutableSequence): new_data.append(new_elem) elif isinstance(new_data, MutableSet): new_data.add(new_elem) else: raise TypeError('Unknown container type encountered when removing private values from output') return new_value def heuristic_log_sanitize(data, no_log_values=None): ''' Remove strings that look like passwords from log messages ''' # Currently filters: # user:pass@foo/whatever and http://username:pass@wherever/foo # This code has false positives and consumes parts of logs that are # not passwds # begin: start of a passwd containing string # end: end of a passwd containing string # sep: char between user and passwd # prev_begin: where in the overall string to start a search for # a passwd # sep_search_end: where in the string to end a search for the sep data = to_native(data) output = [] begin = len(data) prev_begin = begin sep = 1 while sep: # Find the potential end of a passwd try: end = data.rindex('@', 0, begin) except ValueError: # No passwd in the rest of the data output.insert(0, data[0:begin]) break # Search for the beginning of a passwd sep = None sep_search_end = end while not sep: # URL-style username+password try: begin = data.rindex('://', 0, sep_search_end) except ValueError: # No url style in the data, check for ssh style in the # rest of the string begin = 0 # Search for separator try: sep = data.index(':', begin + 3, end) except ValueError: # No separator; choices: if begin == 0: # Searched the whole string so there's no password # here. Return the remaining data output.insert(0, data[0:begin]) break # Search for a different beginning of the password field. sep_search_end = begin continue if sep: # Password was found; remove it. output.insert(0, data[end:prev_begin]) output.insert(0, '********') output.insert(0, data[begin:sep + 1]) prev_begin = begin output = ''.join(output) if no_log_values: output = remove_values(output, no_log_values) return output def _load_params(): ''' read the modules parameters and store them globally. This function may be needed for certain very dynamic custom modules which want to process the parameters that are being handed the module. Since this is so closely tied to the implementation of modules we cannot guarantee API stability for it (it may change between versions) however we will try not to break it gratuitously. It is certainly more future-proof to call this function and consume its outputs than to implement the logic inside it as a copy in your own code. ''' global _ANSIBLE_ARGS if _ANSIBLE_ARGS is not None: buffer = _ANSIBLE_ARGS else: # debug overrides to read args from file or cmdline # Avoid tracebacks when locale is non-utf8 # We control the args and we pass them as utf8 if len(sys.argv) > 1: if os.path.isfile(sys.argv[1]): fd = open(sys.argv[1], 'rb') buffer = fd.read() fd.close() else: buffer = sys.argv[1] if PY3: buffer = buffer.encode('utf-8', errors='surrogateescape') # default case, read from stdin else: if PY2: buffer = sys.stdin.read() else: buffer = sys.stdin.buffer.read() _ANSIBLE_ARGS = buffer try: params = json.loads(buffer.decode('utf-8')) except ValueError: # This helper used too early for fail_json to work. print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}') sys.exit(1) if PY2: params = json_dict_unicode_to_bytes(params) try: return params['ANSIBLE_MODULE_ARGS'] except KeyError: # This helper does not have access to fail_json so we have to print # json output on our own. print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", ' '"failed": true}') sys.exit(1) def env_fallback(*args, **kwargs): ''' Load value from environment ''' for arg in args: if arg in os.environ: return os.environ[arg] raise AnsibleFallbackNotFound def missing_required_lib(library, reason=None, url=None): hostname = platform.node() msg = "Failed to import the required Python library (%s) on %s's Python %s." % (library, hostname, sys.executable) if reason: msg += " This is required %s." % reason if url: msg += " See %s for more info." % url return msg + " Please read module documentation and install in the appropriate location" class AnsibleFallbackNotFound(Exception): pass class AnsibleModule(object): def __init__(self, argument_spec, bypass_checks=False, no_log=False, check_invalid_arguments=None, mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False, supports_check_mode=False, required_if=None, required_by=None): ''' Common code for quickly building an ansible module in Python (although you can write modules with anything that can return JSON). See :ref:`developing_modules_general` for a general introduction and :ref:`developing_program_flow_modules` for more detailed explanation. ''' self._name = os.path.basename(__file__) # initialize name until we can parse from options self.argument_spec = argument_spec self.supports_check_mode = supports_check_mode self.check_mode = False self.bypass_checks = bypass_checks self.no_log = no_log # Check whether code set this explicitly for deprecation purposes if check_invalid_arguments is None: check_invalid_arguments = True module_set_check_invalid_arguments = False else: module_set_check_invalid_arguments = True self.check_invalid_arguments = check_invalid_arguments self.mutually_exclusive = mutually_exclusive self.required_together = required_together self.required_one_of = required_one_of self.required_if = required_if self.required_by = required_by self.cleanup_files = [] self._debug = False self._diff = False self._socket_path = None self._shell = None self._verbosity = 0 # May be used to set modifications to the environment for any # run_command invocation self.run_command_environ_update = {} self._warnings = [] self._deprecations = [] self._clean = {} self._string_conversion_action = '' self.aliases = {} self._legal_inputs = [] self._options_context = list() self._tmpdir = None if add_file_common_args: for k, v in FILE_COMMON_ARGUMENTS.items(): if k not in self.argument_spec: self.argument_spec[k] = v self._load_params() self._set_fallbacks() # append to legal_inputs and then possibly check against them try: self.aliases = self._handle_aliases() except (ValueError, TypeError) as e: # Use exceptions here because it isn't safe to call fail_json until no_log is processed print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e)) sys.exit(1) # Save parameter values that should never be logged self.no_log_values = set() self._handle_no_log_values() # check the locale as set by the current environment, and reset to # a known valid (LANG=C) if it's an invalid/unavailable locale self._check_locale() self._check_arguments(check_invalid_arguments) # check exclusive early if not bypass_checks: self._check_mutually_exclusive(mutually_exclusive) self._set_defaults(pre=True) self._CHECK_ARGUMENT_TYPES_DISPATCHER = { 'str': self._check_type_str, 'list': self._check_type_list, 'dict': self._check_type_dict, 'bool': self._check_type_bool, 'int': self._check_type_int, 'float': self._check_type_float, 'path': self._check_type_path, 'raw': self._check_type_raw, 'jsonarg': self._check_type_jsonarg, 'json': self._check_type_jsonarg, 'bytes': self._check_type_bytes, 'bits': self._check_type_bits, } if not bypass_checks: self._check_required_arguments() self._check_argument_types() self._check_argument_values() self._check_required_together(required_together) self._check_required_one_of(required_one_of) self._check_required_if(required_if) self._check_required_by(required_by) self._set_defaults(pre=False) # deal with options sub-spec self._handle_options() if not self.no_log: self._log_invocation() # finally, make sure we're in a sane working dir self._set_cwd() # Do this at the end so that logging parameters have been set up # This is to warn third party module authors that the functionatlity is going away. # We exclude uri and zfs as they have their own deprecation warnings for users and we'll # make sure to update their code to stop using check_invalid_arguments when 2.9 rolls around if module_set_check_invalid_arguments and self._name not in ('uri', 'zfs'): self.deprecate('Setting check_invalid_arguments is deprecated and will be removed.' ' Update the code for this module In the future, AnsibleModule will' ' always check for invalid arguments.', version='2.9') @property def tmpdir(self): # if _ansible_tmpdir was not set and we have a remote_tmp, # the module needs to create it and clean it up once finished. # otherwise we create our own module tmp dir from the system defaults if self._tmpdir is None: basedir = None if self._remote_tmp is not None: basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp)) if basedir is not None and not os.path.exists(basedir): try: os.makedirs(basedir, mode=0o700) except (OSError, IOError) as e: self.warn("Unable to use %s as temporary directory, " "failing back to system: %s" % (basedir, to_native(e))) basedir = None else: self.warn("Module remote_tmp %s did not exist and was " "created with a mode of 0700, this may cause" " issues when running as another user. To " "avoid this, create the remote_tmp dir with " "the correct permissions manually" % basedir) basefile = "ansible-moduletmp-%s-" % time.time() try: tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir) except (OSError, IOError) as e: self.fail_json( msg="Failed to create remote module tmp path at dir %s " "with prefix %s: %s" % (basedir, basefile, to_native(e)) ) if not self._keep_remote_files: atexit.register(shutil.rmtree, tmpdir) self._tmpdir = tmpdir return self._tmpdir def warn(self, warning): if isinstance(warning, string_types): self._warnings.append(warning) self.log('[WARNING] %s' % warning) else: raise TypeError("warn requires a string not a %s" % type(warning)) def deprecate(self, msg, version=None): if isinstance(msg, string_types): self._deprecations.append({ 'msg': msg, 'version': version }) self.log('[DEPRECATION WARNING] %s %s' % (msg, version)) else: raise TypeError("deprecate requires a string not a %s" % type(msg)) def load_file_common_arguments(self, params): ''' many modules deal with files, this encapsulates common options that the file module accepts such that it is directly available to all modules and they can share code. ''' path = params.get('path', params.get('dest', None)) if path is None: return {} else: path = os.path.expanduser(os.path.expandvars(path)) b_path = to_bytes(path, errors='surrogate_or_strict') # if the path is a symlink, and we're following links, get # the target of the link instead for testing if params.get('follow', False) and os.path.islink(b_path): b_path = os.path.realpath(b_path) path = to_native(b_path) mode = params.get('mode', None) owner = params.get('owner', None) group = params.get('group', None) # selinux related options seuser = params.get('seuser', None) serole = params.get('serole', None) setype = params.get('setype', None) selevel = params.get('selevel', None) secontext = [seuser, serole, setype] if self.selinux_mls_enabled(): secontext.append(selevel) default_secontext = self.selinux_default_context(path) for i in range(len(default_secontext)): if i is not None and secontext[i] == '_default': secontext[i] = default_secontext[i] attributes = params.get('attributes', None) return dict( path=path, mode=mode, owner=owner, group=group, seuser=seuser, serole=serole, setype=setype, selevel=selevel, secontext=secontext, attributes=attributes, ) # Detect whether using selinux that is MLS-aware. # While this means you can set the level/range with # selinux.lsetfilecon(), it may or may not mean that you # will get the selevel as part of the context returned # by selinux.lgetfilecon(). def selinux_mls_enabled(self): if not HAVE_SELINUX: return False if selinux.is_selinux_mls_enabled() == 1: return True else: return False def selinux_enabled(self): if not HAVE_SELINUX: seenabled = self.get_bin_path('selinuxenabled') if seenabled is not None: (rc, out, err) = self.run_command(seenabled) if rc == 0: self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!") return False if selinux.is_selinux_enabled() == 1: return True else: return False # Determine whether we need a placeholder for selevel/mls def selinux_initial_context(self): context = [None, None, None] if self.selinux_mls_enabled(): context.append(None) return context # If selinux fails to find a default, return an array of None def selinux_default_context(self, path, mode=0): context = self.selinux_initial_context() if not HAVE_SELINUX or not self.selinux_enabled(): return context try: ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode) except OSError: return context if ret[0] == -1: return context # Limit split to 4 because the selevel, the last in the list, # may contain ':' characters context = ret[1].split(':', 3) return context def selinux_context(self, path): context = self.selinux_initial_context() if not HAVE_SELINUX or not self.selinux_enabled(): return context try: ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict')) except OSError as e: if e.errno == errno.ENOENT: self.fail_json(path=path, msg='path %s does not exist' % path) else: self.fail_json(path=path, msg='failed to retrieve selinux context') if ret[0] == -1: return context # Limit split to 4 because the selevel, the last in the list, # may contain ':' characters context = ret[1].split(':', 3) return context def user_and_group(self, path, expand=True): b_path = to_bytes(path, errors='surrogate_or_strict') if expand: b_path = os.path.expanduser(os.path.expandvars(b_path)) st = os.lstat(b_path) uid = st.st_uid gid = st.st_gid return (uid, gid) def find_mount_point(self, path): path_is_bytes = False if isinstance(path, binary_type): path_is_bytes = True b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict')) while not os.path.ismount(b_path): b_path = os.path.dirname(b_path) if path_is_bytes: return b_path return to_text(b_path, errors='surrogate_or_strict') def is_special_selinux_path(self, path): """ Returns a tuple containing (True, selinux_context) if the given path is on a NFS or other 'special' fs mount point, otherwise the return will be (False, None). """ try: f = open('/proc/mounts', 'r') mount_data = f.readlines() f.close() except Exception: return (False, None) path_mount_point = self.find_mount_point(path) for line in mount_data: (device, mount_point, fstype, options, rest) = line.split(' ', 4) if path_mount_point == mount_point: for fs in self._selinux_special_fs: if fs in fstype: special_context = self.selinux_context(path_mount_point) return (True, special_context) return (False, None) def set_default_selinux_context(self, path, changed): if not HAVE_SELINUX or not self.selinux_enabled(): return changed context = self.selinux_default_context(path) return self.set_context_if_different(path, context, False) def set_context_if_different(self, path, context, changed, diff=None): if not HAVE_SELINUX or not self.selinux_enabled(): return changed if self.check_file_absent_if_check_mode(path): return True cur_context = self.selinux_context(path) new_context = list(cur_context) # Iterate over the current context instead of the # argument context, which may have selevel. (is_special_se, sp_context) = self.is_special_selinux_path(path) if is_special_se: new_context = sp_context else: for i in range(len(cur_context)): if len(context) > i: if context[i] is not None and context[i] != cur_context[i]: new_context[i] = context[i] elif context[i] is None: new_context[i] = cur_context[i] if cur_context != new_context: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['secontext'] = cur_context if 'after' not in diff: diff['after'] = {} diff['after']['secontext'] = new_context try: if self.check_mode: return True rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context)) except OSError as e: self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e), new_context=new_context, cur_context=cur_context, input_was=context) if rc != 0: self.fail_json(path=path, msg='set selinux context failed') changed = True return changed def set_owner_if_different(self, path, owner, changed, diff=None, expand=True): if owner is None: return changed b_path = to_bytes(path, errors='surrogate_or_strict') if expand: b_path = os.path.expanduser(os.path.expandvars(b_path)) if self.check_file_absent_if_check_mode(b_path): return True orig_uid, orig_gid = self.user_and_group(b_path, expand) try: uid = int(owner) except ValueError: try: uid = pwd.getpwnam(owner).pw_uid except KeyError: path = to_text(b_path) self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner) if orig_uid != uid: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['owner'] = orig_uid if 'after' not in diff: diff['after'] = {} diff['after']['owner'] = uid if self.check_mode: return True try: os.lchown(b_path, uid, -1) except (IOError, OSError) as e: path = to_text(b_path) self.fail_json(path=path, msg='chown failed: %s' % (to_text(e))) changed = True return changed def set_group_if_different(self, path, group, changed, diff=None, expand=True): if group is None: return changed b_path = to_bytes(path, errors='surrogate_or_strict') if expand: b_path = os.path.expanduser(os.path.expandvars(b_path)) if self.check_file_absent_if_check_mode(b_path): return True orig_uid, orig_gid = self.user_and_group(b_path, expand) try: gid = int(group) except ValueError: try: gid = grp.getgrnam(group).gr_gid except KeyError: path = to_text(b_path) self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group) if orig_gid != gid: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['group'] = orig_gid if 'after' not in diff: diff['after'] = {} diff['after']['group'] = gid if self.check_mode: return True try: os.lchown(b_path, -1, gid) except OSError: path = to_text(b_path) self.fail_json(path=path, msg='chgrp failed') changed = True return changed def set_mode_if_different(self, path, mode, changed, diff=None, expand=True): if mode is None: return changed b_path = to_bytes(path, errors='surrogate_or_strict') if expand: b_path = os.path.expanduser(os.path.expandvars(b_path)) path_stat = os.lstat(b_path) if self.check_file_absent_if_check_mode(b_path): return True if not isinstance(mode, int): try: mode = int(mode, 8) except Exception: try: mode = self._symbolic_mode_to_octal(path_stat, mode) except Exception as e: path = to_text(b_path) self.fail_json(path=path, msg="mode must be in octal or symbolic form", details=to_native(e)) if mode != stat.S_IMODE(mode): # prevent mode from having extra info orbeing invalid long number path = to_text(b_path) self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode) prev_mode = stat.S_IMODE(path_stat.st_mode) if prev_mode != mode: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['mode'] = '0%03o' % prev_mode if 'after' not in diff: diff['after'] = {} diff['after']['mode'] = '0%03o' % mode if self.check_mode: return True # FIXME: comparison against string above will cause this to be executed # every time try: if hasattr(os, 'lchmod'): os.lchmod(b_path, mode) else: if not os.path.islink(b_path): os.chmod(b_path, mode) else: # Attempt to set the perms of the symlink but be # careful not to change the perms of the underlying # file while trying underlying_stat = os.stat(b_path) os.chmod(b_path, mode) new_underlying_stat = os.stat(b_path) if underlying_stat.st_mode != new_underlying_stat.st_mode: os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode)) except OSError as e: if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links pass elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links pass else: raise except Exception as e: path = to_text(b_path) self.fail_json(path=path, msg='chmod failed', details=to_native(e), exception=traceback.format_exc()) path_stat = os.lstat(b_path) new_mode = stat.S_IMODE(path_stat.st_mode) if new_mode != prev_mode: changed = True return changed def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True): if attributes is None: return changed b_path = to_bytes(path, errors='surrogate_or_strict') if expand: b_path = os.path.expanduser(os.path.expandvars(b_path)) if self.check_file_absent_if_check_mode(b_path): return True existing = self.get_file_attributes(b_path) attr_mod = '=' if attributes.startswith(('-', '+')): attr_mod = attributes[0] attributes = attributes[1:] if existing.get('attr_flags', '') != attributes or attr_mod == '-': attrcmd = self.get_bin_path('chattr') if attrcmd: attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path] changed = True if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['attributes'] = existing.get('attr_flags') if 'after' not in diff: diff['after'] = {} diff['after']['attributes'] = '%s%s' % (attr_mod, attributes) if not self.check_mode: try: rc, out, err = self.run_command(attrcmd) if rc != 0 or err: raise Exception("Error while setting attributes: %s" % (out + err)) except Exception as e: self.fail_json(path=to_text(b_path), msg='chattr failed', details=to_native(e), exception=traceback.format_exc()) return changed def get_file_attributes(self, path): output = {} attrcmd = self.get_bin_path('lsattr', False) if attrcmd: attrcmd = [attrcmd, '-vd', path] try: rc, out, err = self.run_command(attrcmd) if rc == 0: res = out.split() output['attr_flags'] = res[1].replace('-', '').strip() output['version'] = res[0].strip() output['attributes'] = format_attributes(output['attr_flags']) except Exception: pass return output @classmethod def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode): """ This enables symbolic chmod string parsing as stated in the chmod man-page This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X" """ new_mode = stat.S_IMODE(path_stat.st_mode) # Now parse all symbolic modes for mode in symbolic_mode.split(','): # Per single mode. This always contains a '+', '-' or '=' # Split it on that permlist = MODE_OPERATOR_RE.split(mode) # And find all the operators opers = MODE_OPERATOR_RE.findall(mode) # The user(s) where it's all about is the first element in the # 'permlist' list. Take that and remove it from the list. # An empty user or 'a' means 'all'. users = permlist.pop(0) use_umask = (users == '') if users == 'a' or users == '': users = 'ugo' # Check if there are illegal characters in the user list # They can end up in 'users' because they are not split if USERS_RE.match(users): raise ValueError("bad symbolic permission for mode: %s" % mode) # Now we have two list of equal length, one contains the requested # permissions and one with the corresponding operators. for idx, perms in enumerate(permlist): # Check if there are illegal characters in the permissions if PERMS_RE.match(perms): raise ValueError("bad symbolic permission for mode: %s" % mode) for user in users: mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask) new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode) return new_mode @staticmethod def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode): if operator == '=': if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX # mask out u, g, or o permissions from current_mode and apply new permissions inverse_mask = mask ^ PERM_BITS new_mode = (current_mode & inverse_mask) | mode_to_apply elif operator == '+': new_mode = current_mode | mode_to_apply elif operator == '-': new_mode = current_mode - (current_mode & mode_to_apply) return new_mode @staticmethod def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask): prev_mode = stat.S_IMODE(path_stat.st_mode) is_directory = stat.S_ISDIR(path_stat.st_mode) has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0 apply_X_permission = is_directory or has_x_permissions # Get the umask, if the 'user' part is empty, the effect is as if (a) were # given, but bits that are set in the umask are not affected. # We also need the "reversed umask" for masking umask = os.umask(0) os.umask(umask) rev_umask = umask ^ PERM_BITS # Permission bits constants documented at: # http://docs.python.org/2/library/stat.html#stat.S_ISUID if apply_X_permission: X_perms = { 'u': {'X': stat.S_IXUSR}, 'g': {'X': stat.S_IXGRP}, 'o': {'X': stat.S_IXOTH}, } else: X_perms = { 'u': {'X': 0}, 'g': {'X': 0}, 'o': {'X': 0}, } user_perms_to_modes = { 'u': { 'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR, 'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR, 'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR, 's': stat.S_ISUID, 't': 0, 'u': prev_mode & stat.S_IRWXU, 'g': (prev_mode & stat.S_IRWXG) << 3, 'o': (prev_mode & stat.S_IRWXO) << 6}, 'g': { 'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP, 'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP, 'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP, 's': stat.S_ISGID, 't': 0, 'u': (prev_mode & stat.S_IRWXU) >> 3, 'g': prev_mode & stat.S_IRWXG, 'o': (prev_mode & stat.S_IRWXO) << 3}, 'o': { 'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH, 'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH, 'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH, 's': 0, 't': stat.S_ISVTX, 'u': (prev_mode & stat.S_IRWXU) >> 6, 'g': (prev_mode & stat.S_IRWXG) >> 3, 'o': prev_mode & stat.S_IRWXO}, } # Insert X_perms into user_perms_to_modes for key, value in X_perms.items(): user_perms_to_modes[key].update(value) def or_reduce(mode, perm): return mode | user_perms_to_modes[user][perm] return reduce(or_reduce, perms, 0) def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True): # set modes owners and context as needed changed = self.set_context_if_different( file_args['path'], file_args['secontext'], changed, diff ) changed = self.set_owner_if_different( file_args['path'], file_args['owner'], changed, diff, expand ) changed = self.set_group_if_different( file_args['path'], file_args['group'], changed, diff, expand ) changed = self.set_mode_if_different( file_args['path'], file_args['mode'], changed, diff, expand ) changed = self.set_attributes_if_different( file_args['path'], file_args['attributes'], changed, diff, expand ) return changed def check_file_absent_if_check_mode(self, file_path): return self.check_mode and not os.path.exists(file_path) def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True): return self.set_fs_attributes_if_different(file_args, changed, diff, expand) def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True): return self.set_fs_attributes_if_different(file_args, changed, diff, expand) def add_path_info(self, kwargs): ''' for results that are files, supplement the info about the file in the return path with stats about the file path. ''' path = kwargs.get('path', kwargs.get('dest', None)) if path is None: return kwargs b_path = to_bytes(path, errors='surrogate_or_strict') if os.path.exists(b_path): (uid, gid) = self.user_and_group(path) kwargs['uid'] = uid kwargs['gid'] = gid try: user = pwd.getpwuid(uid)[0] except KeyError: user = str(uid) try: group = grp.getgrgid(gid)[0] except KeyError: group = str(gid) kwargs['owner'] = user kwargs['group'] = group st = os.lstat(b_path) kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE]) # secontext not yet supported if os.path.islink(b_path): kwargs['state'] = 'link' elif os.path.isdir(b_path): kwargs['state'] = 'directory' elif os.stat(b_path).st_nlink > 1: kwargs['state'] = 'hard' else: kwargs['state'] = 'file' if HAVE_SELINUX and self.selinux_enabled(): kwargs['secontext'] = ':'.join(self.selinux_context(path)) kwargs['size'] = st[stat.ST_SIZE] return kwargs def _check_locale(self): ''' Uses the locale module to test the currently set locale (per the LANG and LC_CTYPE environment settings) ''' try: # setting the locale to '' uses the default locale # as it would be returned by locale.getdefaultlocale() locale.setlocale(locale.LC_ALL, '') except locale.Error: # fallback to the 'C' locale, which may cause unicode # issues but is preferable to simply failing because # of an unknown locale locale.setlocale(locale.LC_ALL, 'C') os.environ['LANG'] = 'C' os.environ['LC_ALL'] = 'C' os.environ['LC_MESSAGES'] = 'C' except Exception as e: self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % to_native(e), exception=traceback.format_exc()) def _handle_aliases(self, spec=None, param=None): if spec is None: spec = self.argument_spec if param is None: param = self.params # this uses exceptions as it happens before we can safely call fail_json alias_results, self._legal_inputs = handle_aliases(spec, param) return alias_results def _handle_no_log_values(self, spec=None, param=None): if spec is None: spec = self.argument_spec if param is None: param = self.params self.no_log_values.update(list_no_log_values(spec, param)) self._deprecations.extend(list_deprecations(spec, param)) def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None): self._syslog_facility = 'LOG_USER' unsupported_parameters = set() if spec is None: spec = self.argument_spec if param is None: param = self.params if legal_inputs is None: legal_inputs = self._legal_inputs for k in list(param.keys()): if check_invalid_arguments and k not in legal_inputs: unsupported_parameters.add(k) for k in PASS_VARS: # handle setting internal properties from internal ansible vars param_key = '_ansible_%s' % k if param_key in param: if k in PASS_BOOLS: setattr(self, PASS_VARS[k][0], self.boolean(param[param_key])) else: setattr(self, PASS_VARS[k][0], param[param_key]) # clean up internal top level params: if param_key in self.params: del self.params[param_key] else: # use defaults if not already set if not hasattr(self, PASS_VARS[k][0]): setattr(self, PASS_VARS[k][0], PASS_VARS[k][1]) if unsupported_parameters: msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters)))) if self._options_context: msg += " found in %s." % " -> ".join(self._options_context) msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys()))) self.fail_json(msg=msg) if self.check_mode and not self.supports_check_mode: self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name) def _count_terms(self, check, param=None): if param is None: param = self.params return count_terms(check, param) def _check_mutually_exclusive(self, spec, param=None): if param is None: param = self.params try: check_mutually_exclusive(spec, param) except TypeError as e: msg = to_native(e) if self._options_context: msg += " found in %s" % " -> ".join(self._options_context) self.fail_json(msg=msg) def _check_required_one_of(self, spec, param=None): if spec is None: return if param is None: param = self.params try: check_required_one_of(spec, param) except TypeError as e: msg = to_native(e) if self._options_context: msg += " found in %s" % " -> ".join(self._options_context) self.fail_json(msg=msg) def _check_required_together(self, spec, param=None): if spec is None: return if param is None: param = self.params try: check_required_together(spec, param) except TypeError as e: msg = to_native(e) if self._options_context: msg += " found in %s" % " -> ".join(self._options_context) self.fail_json(msg=msg) def _check_required_by(self, spec, param=None): if spec is None: return if param is None: param = self.params try: check_required_by(spec, param) except TypeError as e: self.fail_json(msg=to_native(e)) def _check_required_arguments(self, spec=None, param=None): if spec is None: spec = self.argument_spec if param is None: param = self.params try: check_required_arguments(spec, param) except TypeError as e: msg = to_native(e) if self._options_context: msg += " found in %s" % " -> ".join(self._options_context) self.fail_json(msg=msg) def _check_required_if(self, spec, param=None): ''' ensure that parameters which conditionally required are present ''' if spec is None: return if param is None: param = self.params try: check_required_if(spec, param) except TypeError as e: msg = to_native(e) if self._options_context: msg += " found in %s" % " -> ".join(self._options_context) self.fail_json(msg=msg) def _check_argument_values(self, spec=None, param=None): ''' ensure all arguments have the requested values, and there are no stray arguments ''' if spec is None: spec = self.argument_spec if param is None: param = self.params for (k, v) in spec.items(): choices = v.get('choices', None) if choices is None: continue if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)): if k in param: # Allow one or more when type='list' param with choices if isinstance(param[k], list): diff_list = ", ".join([item for item in param[k] if item not in choices]) if diff_list: choices_str = ", ".join([to_native(c) for c in choices]) msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list) if self._options_context: msg += " found in %s" % " -> ".join(self._options_context) self.fail_json(msg=msg) elif param[k] not in choices: # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking # the value. If we can't figure this out, module author is responsible. lowered_choices = None if param[k] == 'False': lowered_choices = lenient_lowercase(choices) overlap = BOOLEANS_FALSE.intersection(choices) if len(overlap) == 1: # Extract from a set (param[k],) = overlap if param[k] == 'True': if lowered_choices is None: lowered_choices = lenient_lowercase(choices) overlap = BOOLEANS_TRUE.intersection(choices) if len(overlap) == 1: (param[k],) = overlap if param[k] not in choices: choices_str = ", ".join([to_native(c) for c in choices]) msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k]) if self._options_context: msg += " found in %s" % " -> ".join(self._options_context) self.fail_json(msg=msg) else: msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices) if self._options_context: msg += " found in %s" % " -> ".join(self._options_context) self.fail_json(msg=msg) def safe_eval(self, value, locals=None, include_exceptions=False): return safe_eval(value, locals, include_exceptions) def _check_type_str(self, value): opts = { 'error': False, 'warn': False, 'ignore': True } # Ignore, warn, or error when converting to a string. allow_conversion = opts.get(self._string_conversion_action, True) try: return check_type_str(value, allow_conversion) except TypeError: common_msg = 'quote the entire value to ensure it does not change.' if self._string_conversion_action == 'error': msg = common_msg.capitalize() raise TypeError(to_native(msg)) elif self._string_conversion_action == 'warn': msg = ('The value {0!r} (type {0.__class__.__name__}) in a string field was converted to {1!r} (type string). ' 'If this does not look like what you expect, {2}').format(value, to_text(value), common_msg) self.warn(to_native(msg)) return to_native(value, errors='surrogate_or_strict') def _check_type_list(self, value): return check_type_list(value) def _check_type_dict(self, value): return check_type_dict(value) def _check_type_bool(self, value): return check_type_bool(value) def _check_type_int(self, value): return check_type_int(value) def _check_type_float(self, value): return check_type_float(value) def _check_type_path(self, value): return check_type_path(value) def _check_type_jsonarg(self, value): return check_type_jsonarg(value) def _check_type_raw(self, value): return check_type_raw(value) def _check_type_bytes(self, value): return check_type_bytes(value) def _check_type_bits(self, value): return check_type_bits(value) def _handle_options(self, argument_spec=None, params=None): ''' deal with options to create sub spec ''' if argument_spec is None: argument_spec = self.argument_spec if params is None: params = self.params for (k, v) in argument_spec.items(): wanted = v.get('type', None) if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'): spec = v.get('options', None) if v.get('apply_defaults', False): if spec is not None: if params.get(k) is None: params[k] = {} else: continue elif spec is None or k not in params or params[k] is None: continue self._options_context.append(k) if isinstance(params[k], dict): elements = [params[k]] else: elements = params[k] for param in elements: if not isinstance(param, dict): self.fail_json(msg="value of %s must be of type dict or list of dict" % k) self._set_fallbacks(spec, param) options_aliases = self._handle_aliases(spec, param) self._handle_no_log_values(spec, param) options_legal_inputs = list(spec.keys()) + list(options_aliases.keys()) self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs) # check exclusive early if not self.bypass_checks: self._check_mutually_exclusive(v.get('mutually_exclusive', None), param) self._set_defaults(pre=True, spec=spec, param=param) if not self.bypass_checks: self._check_required_arguments(spec, param) self._check_argument_types(spec, param) self._check_argument_values(spec, param) self._check_required_together(v.get('required_together', None), param) self._check_required_one_of(v.get('required_one_of', None), param) self._check_required_if(v.get('required_if', None), param) self._check_required_by(v.get('required_by', None), param) self._set_defaults(pre=False, spec=spec, param=param) # handle multi level options (sub argspec) self._handle_options(spec, param) self._options_context.pop() def _get_wanted_type(self, wanted, k): if not callable(wanted): if wanted is None: # Mostly we want to default to str. # For values set to None explicitly, return None instead as # that allows a user to unset a parameter wanted = 'str' try: type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted] except KeyError: self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k)) else: # set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock) type_checker = wanted wanted = getattr(wanted, '__name__', to_native(type(wanted))) return type_checker, wanted def _handle_elements(self, wanted, param, values): type_checker, wanted_name = self._get_wanted_type(wanted, param) validated_params = [] for value in values: try: validated_params.append(type_checker(value)) except (TypeError, ValueError) as e: msg = "Elements value for option %s" % param if self._options_context: msg += " found in '%s'" % " -> ".join(self._options_context) msg += " is of type %s and we were unable to convert to %s: %s" % (type(value), wanted_name, to_native(e)) self.fail_json(msg=msg) return validated_params def _check_argument_types(self, spec=None, param=None): ''' ensure all arguments have the requested type ''' if spec is None: spec = self.argument_spec if param is None: param = self.params for (k, v) in spec.items(): wanted = v.get('type', None) if k not in param: continue value = param[k] if value is None: continue type_checker, wanted_name = self._get_wanted_type(wanted, k) try: param[k] = type_checker(value) wanted_elements = v.get('elements', None) if wanted_elements: if wanted != 'list' or not isinstance(param[k], list): msg = "Invalid type %s for option '%s'" % (wanted_name, param) if self._options_context: msg += " found in '%s'." % " -> ".join(self._options_context) msg += ", elements value check is supported only with 'list' type" self.fail_json(msg=msg) param[k] = self._handle_elements(wanted_elements, k, param[k]) except (TypeError, ValueError) as e: msg = "argument %s is of type %s" % (k, type(value)) if self._options_context: msg += " found in '%s'." % " -> ".join(self._options_context) msg += " and we were unable to convert to %s: %s" % (wanted_name, to_native(e)) self.fail_json(msg=msg) def _set_defaults(self, pre=True, spec=None, param=None): if spec is None: spec = self.argument_spec if param is None: param = self.params for (k, v) in spec.items(): default = v.get('default', None) if pre is True: # this prevents setting defaults on required items if default is not None and k not in param: param[k] = default else: # make sure things without a default still get set None if k not in param: param[k] = default def _set_fallbacks(self, spec=None, param=None): if spec is None: spec = self.argument_spec if param is None: param = self.params for (k, v) in spec.items(): fallback = v.get('fallback', (None,)) fallback_strategy = fallback[0] fallback_args = [] fallback_kwargs = {} if k not in param and fallback_strategy is not None: for item in fallback[1:]: if isinstance(item, dict): fallback_kwargs = item else: fallback_args = item try: param[k] = fallback_strategy(*fallback_args, **fallback_kwargs) except AnsibleFallbackNotFound: continue def _load_params(self): ''' read the input and set the params attribute. This method is for backwards compatibility. The guts of the function were moved out in 2.1 so that custom modules could read the parameters. ''' # debug overrides to read args from file or cmdline self.params = _load_params() def _log_to_syslog(self, msg): if HAS_SYSLOG: module = 'ansible-%s' % self._name facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER) syslog.openlog(str(module), 0, facility) syslog.syslog(syslog.LOG_INFO, msg) def debug(self, msg): if self._debug: self.log('[debug] %s' % msg) def log(self, msg, log_args=None): if not self.no_log: if log_args is None: log_args = dict() module = 'ansible-%s' % self._name if isinstance(module, binary_type): module = module.decode('utf-8', 'replace') # 6655 - allow for accented characters if not isinstance(msg, (binary_type, text_type)): raise TypeError("msg should be a string (got %s)" % type(msg)) # We want journal to always take text type # syslog takes bytes on py2, text type on py3 if isinstance(msg, binary_type): journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values) else: # TODO: surrogateescape is a danger here on Py3 journal_msg = remove_values(msg, self.no_log_values) if PY3: syslog_msg = journal_msg else: syslog_msg = journal_msg.encode('utf-8', 'replace') if has_journal: journal_args = [("MODULE", os.path.basename(__file__))] for arg in log_args: journal_args.append((arg.upper(), str(log_args[arg]))) try: if HAS_SYSLOG: # If syslog_facility specified, it needs to convert # from the facility name to the facility code, and # set it as SYSLOG_FACILITY argument of journal.send() facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER) >> 3 journal.send(MESSAGE=u"%s %s" % (module, journal_msg), SYSLOG_FACILITY=facility, **dict(journal_args)) else: journal.send(MESSAGE=u"%s %s" % (module, journal_msg), **dict(journal_args)) except IOError: # fall back to syslog since logging to journal failed self._log_to_syslog(syslog_msg) else: self._log_to_syslog(syslog_msg) def _log_invocation(self): ''' log that ansible ran the module ''' # TODO: generalize a separate log function and make log_invocation use it # Sanitize possible password argument when logging. log_args = dict() for param in self.params: canon = self.aliases.get(param, param) arg_opts = self.argument_spec.get(canon, {}) no_log = arg_opts.get('no_log', False) if self.boolean(no_log): log_args[param] = 'NOT_LOGGING_PARAMETER' # try to capture all passwords/passphrase named fields missed by no_log elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False): # skip boolean and enums as they are about 'password' state log_args[param] = 'NOT_LOGGING_PASSWORD' self.warn('Module did not set no_log for %s' % param) else: param_val = self.params[param] if not isinstance(param_val, (text_type, binary_type)): param_val = str(param_val) elif isinstance(param_val, text_type): param_val = param_val.encode('utf-8') log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values) msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()] if msg: msg = 'Invoked with %s' % ' '.join(msg) else: msg = 'Invoked' self.log(msg, log_args=log_args) def _set_cwd(self): try: cwd = os.getcwd() if not os.access(cwd, os.F_OK | os.R_OK): raise Exception() return cwd except Exception: # we don't have access to the cwd, probably because of sudo. # Try and move to a neutral location to prevent errors for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]: try: if os.access(cwd, os.F_OK | os.R_OK): os.chdir(cwd) return cwd except Exception: pass # we won't error here, as it may *not* be a problem, # and we don't want to break modules unnecessarily return None def get_bin_path(self, arg, required=False, opt_dirs=None): ''' Find system executable in PATH. :param arg: The executable to find. :param required: if executable is not found and required is ``True``, fail_json :param opt_dirs: optional list of directories to search in addition to ``PATH`` :returns: if found return full path; otherwise return None ''' bin_path = None try: bin_path = get_bin_path(arg, required, opt_dirs) except ValueError as e: self.fail_json(msg=to_text(e)) return bin_path def boolean(self, arg): '''Convert the argument to a boolean''' if arg is None: return arg try: return boolean(arg) except TypeError as e: self.fail_json(msg=to_native(e)) def jsonify(self, data): try: return jsonify(data) except UnicodeError as e: self.fail_json(msg=to_text(e)) def from_json(self, data): return json.loads(data) def add_cleanup_file(self, path): if path not in self.cleanup_files: self.cleanup_files.append(path) def do_cleanup_files(self): for path in self.cleanup_files: self.cleanup(path) def _return_formatted(self, kwargs): self.add_path_info(kwargs) if 'invocation' not in kwargs: kwargs['invocation'] = {'module_args': self.params} if 'warnings' in kwargs: if isinstance(kwargs['warnings'], list): for w in kwargs['warnings']: self.warn(w) else: self.warn(kwargs['warnings']) if self._warnings: kwargs['warnings'] = self._warnings if 'deprecations' in kwargs: if isinstance(kwargs['deprecations'], list): for d in kwargs['deprecations']: if isinstance(d, SEQUENCETYPE) and len(d) == 2: self.deprecate(d[0], version=d[1]) elif isinstance(d, Mapping): self.deprecate(d['msg'], version=d.get('version', None)) else: self.deprecate(d) else: self.deprecate(kwargs['deprecations']) if self._deprecations: kwargs['deprecations'] = self._deprecations kwargs = remove_values(kwargs, self.no_log_values) print('\n%s' % self.jsonify(kwargs)) def exit_json(self, **kwargs): ''' return from the module, without error ''' self.do_cleanup_files() self._return_formatted(kwargs) sys.exit(0) def fail_json(self, **kwargs): ''' return from the module, with an error message ''' if 'msg' not in kwargs: raise AssertionError("implementation error -- msg to explain the error is required") kwargs['failed'] = True # Add traceback if debug or high verbosity and it is missing # NOTE: Badly named as exception, it really always has been a traceback if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3): if PY2: # On Python 2 this is the last (stack frame) exception and as such may be unrelated to the failure kwargs['exception'] = 'WARNING: The below traceback may *not* be related to the actual failure.\n' +\ ''.join(traceback.format_tb(sys.exc_info()[2])) else: kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2])) self.do_cleanup_files() self._return_formatted(kwargs) sys.exit(1) def fail_on_missing_params(self, required_params=None): if not required_params: return try: check_missing_parameters(self.params, required_params) except TypeError as e: self.fail_json(msg=to_native(e)) def digest_from_file(self, filename, algorithm): ''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. ''' b_filename = to_bytes(filename, errors='surrogate_or_strict') if not os.path.exists(b_filename): return None if os.path.isdir(b_filename): self.fail_json(msg="attempted to take checksum of directory: %s" % filename) # preserve old behaviour where the third parameter was a hash algorithm object if hasattr(algorithm, 'hexdigest'): digest_method = algorithm else: try: digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]() except KeyError: self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" % (filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS))) blocksize = 64 * 1024 infile = open(os.path.realpath(b_filename), 'rb') block = infile.read(blocksize) while block: digest_method.update(block) block = infile.read(blocksize) infile.close() return digest_method.hexdigest() def md5(self, filename): ''' Return MD5 hex digest of local file using digest_from_file(). Do not use this function unless you have no other choice for: 1) Optional backwards compatibility 2) Compatibility with a third party protocol This function will not work on systems complying with FIPS-140-2. Most uses of this function can use the module.sha1 function instead. ''' if 'md5' not in AVAILABLE_HASH_ALGORITHMS: raise ValueError('MD5 not available. Possibly running in FIPS mode') return self.digest_from_file(filename, 'md5') def sha1(self, filename): ''' Return SHA1 hex digest of local file using digest_from_file(). ''' return self.digest_from_file(filename, 'sha1') def sha256(self, filename): ''' Return SHA-256 hex digest of local file using digest_from_file(). ''' return self.digest_from_file(filename, 'sha256') def backup_local(self, fn): '''make a date-marked backup of the specified file, return True or False on success or failure''' backupdest = '' if os.path.exists(fn): # backups named basename.PID.YYYY-MM-DD@HH:MM:SS~ ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time())) backupdest = '%s.%s.%s' % (fn, os.getpid(), ext) try: self.preserved_copy(fn, backupdest) except (shutil.Error, IOError) as e: self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e))) return backupdest def cleanup(self, tmpfile): if os.path.exists(tmpfile): try: os.unlink(tmpfile) except OSError as e: sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e))) def preserved_copy(self, src, dest): """Copy a file with preserved ownership, permissions and context""" # shutil.copy2(src, dst) # Similar to shutil.copy(), but metadata is copied as well - in fact, # this is just shutil.copy() followed by copystat(). This is similar # to the Unix command cp -p. # # shutil.copystat(src, dst) # Copy the permission bits, last access time, last modification time, # and flags from src to dst. The file contents, owner, and group are # unaffected. src and dst are path names given as strings. shutil.copy2(src, dest) # Set the context if self.selinux_enabled(): context = self.selinux_context(src) self.set_context_if_different(dest, context, False) # chown it try: dest_stat = os.stat(src) tmp_stat = os.stat(dest) if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid): os.chown(dest, dest_stat.st_uid, dest_stat.st_gid) except OSError as e: if e.errno != errno.EPERM: raise # Set the attributes current_attribs = self.get_file_attributes(src) current_attribs = current_attribs.get('attr_flags', '') self.set_attributes_if_different(dest, current_attribs, True) def atomic_move(self, src, dest, unsafe_writes=False): '''atomically move src to dest, copying attributes from dest, returns true on success it uses os.rename to ensure this as it is an atomic operation, rest of the function is to work around limitations, corner cases and ensure selinux context is saved if possible''' context = None dest_stat = None b_src = to_bytes(src, errors='surrogate_or_strict') b_dest = to_bytes(dest, errors='surrogate_or_strict') if os.path.exists(b_dest): try: dest_stat = os.stat(b_dest) # copy mode and ownership os.chmod(b_src, dest_stat.st_mode & PERM_BITS) os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid) # try to copy flags if possible if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'): try: os.chflags(b_src, dest_stat.st_flags) except OSError as e: for err in 'EOPNOTSUPP', 'ENOTSUP': if hasattr(errno, err) and e.errno == getattr(errno, err): break else: raise except OSError as e: if e.errno != errno.EPERM: raise if self.selinux_enabled(): context = self.selinux_context(dest) else: if self.selinux_enabled(): context = self.selinux_default_context(dest) creating = not os.path.exists(b_dest) try: # Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic. os.rename(b_src, b_dest) except (IOError, OSError) as e: if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]: # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied) # and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)), exception=traceback.format_exc()) else: # Use bytes here. In the shippable CI, this fails with # a UnicodeError with surrogateescape'd strings for an unknown # reason (doesn't happen in a local Ubuntu16.04 VM) b_dest_dir = os.path.dirname(b_dest) b_suffix = os.path.basename(b_dest) error_msg = None tmp_dest_name = None try: tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp', dir=b_dest_dir, suffix=b_suffix) except (OSError, IOError) as e: error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e)) except TypeError: # We expect that this is happening because python3.4.x and # below can't handle byte strings in mkstemp(). Traceback # would end in something like: # file = _os.path.join(dir, pre + name + suf) # TypeError: can't concat bytes to str error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. ' 'Please use Python2.x or Python3.5 or greater.') finally: if error_msg: if unsafe_writes: self._unsafe_writes(b_src, b_dest) else: self.fail_json(msg=error_msg, exception=traceback.format_exc()) if tmp_dest_name: b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict') try: try: # close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host) os.close(tmp_dest_fd) # leaves tmp file behind when sudo and not root try: shutil.move(b_src, b_tmp_dest_name) except OSError: # cleanup will happen by 'rm' of tmpdir # copy2 will preserve some metadata shutil.copy2(b_src, b_tmp_dest_name) if self.selinux_enabled(): self.set_context_if_different( b_tmp_dest_name, context, False) try: tmp_stat = os.stat(b_tmp_dest_name) if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid): os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid) except OSError as e: if e.errno != errno.EPERM: raise try: os.rename(b_tmp_dest_name, b_dest) except (shutil.Error, OSError, IOError) as e: if unsafe_writes and e.errno == errno.EBUSY: self._unsafe_writes(b_tmp_dest_name, b_dest) else: self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' % (src, dest, b_tmp_dest_name, to_native(e)), exception=traceback.format_exc()) except (shutil.Error, OSError, IOError) as e: self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)), exception=traceback.format_exc()) finally: self.cleanup(b_tmp_dest_name) if creating: # make sure the file has the correct permissions # based on the current value of umask umask = os.umask(0) os.umask(umask) os.chmod(b_dest, DEFAULT_PERM & ~umask) try: os.chown(b_dest, os.geteuid(), os.getegid()) except OSError: # We're okay with trying our best here. If the user is not # root (or old Unices) they won't be able to chown. pass if self.selinux_enabled(): # rename might not preserve context self.set_context_if_different(dest, context, False) def _unsafe_writes(self, src, dest): # sadly there are some situations where we cannot ensure atomicity, but only if # the user insists and we get the appropriate error we update the file unsafely try: out_dest = in_src = None try: out_dest = open(dest, 'wb') in_src = open(src, 'rb') shutil.copyfileobj(in_src, out_dest) finally: # assuring closed files in 2.4 compatible way if out_dest: out_dest.close() if in_src: in_src.close() except (shutil.Error, OSError, IOError) as e: self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)), exception=traceback.format_exc()) def _read_from_pipes(self, rpipes, rfds, file_descriptor): data = b('') if file_descriptor in rfds: data = os.read(file_descriptor.fileno(), self.get_buffer_size(file_descriptor)) if data == b(''): rpipes.remove(file_descriptor) return data def _clean_args(self, args): if not self._clean: # create a printable version of the command for use in reporting later, # which strips out things like passwords from the args list to_clean_args = args if PY2: if isinstance(args, text_type): to_clean_args = to_bytes(args) else: if isinstance(args, binary_type): to_clean_args = to_text(args) if isinstance(args, (text_type, binary_type)): to_clean_args = shlex.split(to_clean_args) clean_args = [] is_passwd = False for arg in (to_native(a) for a in to_clean_args): if is_passwd: is_passwd = False clean_args.append('********') continue if PASSWD_ARG_RE.match(arg): sep_idx = arg.find('=') if sep_idx > -1: clean_args.append('%s=********' % arg[:sep_idx]) continue else: is_passwd = True arg = heuristic_log_sanitize(arg, self.no_log_values) clean_args.append(arg) self._clean = ' '.join(shlex_quote(arg) for arg in clean_args) return self._clean def _restore_signal_handlers(self): # Reset SIGPIPE to SIG_DFL, otherwise in Python2.7 it gets ignored in subprocesses. if PY2 and sys.platform != 'win32': signal.signal(signal.SIGPIPE, signal.SIG_DFL) def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict', expand_user_and_vars=True, pass_fds=None, before_communicate_callback=None): ''' Execute a command, returns rc, stdout, and stderr. :arg args: is the command to run * If args is a list, the command will be run with shell=False. * If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False * If args is a string and use_unsafe_shell=True it runs with shell=True. :kw check_rc: Whether to call fail_json in case of non zero RC. Default False :kw close_fds: See documentation for subprocess.Popen(). Default True :kw executable: See documentation for subprocess.Popen(). Default None :kw data: If given, information to write to the stdin of the command :kw binary_data: If False, append a newline to the data. Default False :kw path_prefix: If given, additional path to find the command in. This adds to the PATH environment variable so helper commands in the same directory can also be found :kw cwd: If given, working directory to run the command inside :kw use_unsafe_shell: See `args` parameter. Default False :kw prompt_regex: Regex string (not a compiled regex) which can be used to detect prompts in the stdout which would otherwise cause the execution to hang (especially if no input data is specified) :kw environ_update: dictionary to *update* os.environ with :kw umask: Umask to be used when running the command. Default None :kw encoding: Since we return native strings, on python3 we need to know the encoding to use to transform from bytes to text. If you want to always get bytes back, use encoding=None. The default is "utf-8". This does not affect transformation of strings given as args. :kw errors: Since we return native strings, on python3 we need to transform stdout and stderr from bytes to text. If the bytes are undecodable in the ``encoding`` specified, then use this error handler to deal with them. The default is ``surrogate_or_strict`` which means that the bytes will be decoded using the surrogateescape error handler if available (available on all python3 versions we support) otherwise a UnicodeError traceback will be raised. This does not affect transformations of strings given as args. :kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument dictates whether ``~`` is expanded in paths and environment variables are expanded before running the command. When ``True`` a string such as ``$SHELL`` will be expanded regardless of escaping. When ``False`` and ``use_unsafe_shell=False`` no path or variable expansion will be done. :kw pass_fds: When running on python3 this argument dictates which file descriptors should be passed to an underlying ``Popen`` constructor. :kw before_communicate_callback: This function will be called after ``Popen`` object will be created but before communicating to the process. (``Popen`` object will be passed to callback as a first argument) :returns: A 3-tuple of return code (integer), stdout (native string), and stderr (native string). On python2, stdout and stderr are both byte strings. On python3, stdout and stderr are text strings converted according to the encoding and errors parameters. If you want byte strings on python3, use encoding=None to turn decoding to text off. ''' # used by clean args later on self._clean = None if not isinstance(args, (list, binary_type, text_type)): msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) shell = False if use_unsafe_shell: # stringify args for unsafe/direct shell usage if isinstance(args, list): args = " ".join([shlex_quote(x) for x in args]) # not set explicitly, check if set by controller if executable: args = [executable, '-c', args] elif self._shell not in (None, '/bin/sh'): args = [self._shell, '-c', args] else: shell = True else: # ensure args are a list if isinstance(args, (binary_type, text_type)): # On python2.6 and below, shlex has problems with text type # On python3, shlex needs a text type. if PY2: args = to_bytes(args, errors='surrogate_or_strict') elif PY3: args = to_text(args, errors='surrogateescape') args = shlex.split(args) # expand ``~`` in paths, and all environment vars if expand_user_and_vars: args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None] else: args = [x for x in args if x is not None] prompt_re = None if prompt_regex: if isinstance(prompt_regex, text_type): if PY3: prompt_regex = to_bytes(prompt_regex, errors='surrogateescape') elif PY2: prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict') try: prompt_re = re.compile(prompt_regex, re.MULTILINE) except re.error: self.fail_json(msg="invalid prompt regular expression given to run_command") rc = 0 msg = None st_in = None # Manipulate the environ we'll send to the new process old_env_vals = {} # We can set this from both an attribute and per call for key, val in self.run_command_environ_update.items(): old_env_vals[key] = os.environ.get(key, None) os.environ[key] = val if environ_update: for key, val in environ_update.items(): old_env_vals[key] = os.environ.get(key, None) os.environ[key] = val if path_prefix: old_env_vals['PATH'] = os.environ['PATH'] os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH']) # If using test-module and explode, the remote lib path will resemble ... # /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py # If using ansible or ansible-playbook with a remote system ... # /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py # Clean out python paths set by ansiballz if 'PYTHONPATH' in os.environ: pypaths = os.environ['PYTHONPATH'].split(':') pypaths = [x for x in pypaths if not x.endswith('/ansible_modlib.zip') and not x.endswith('/debug_dir')] os.environ['PYTHONPATH'] = ':'.join(pypaths) if not os.environ['PYTHONPATH']: del os.environ['PYTHONPATH'] if data: st_in = subprocess.PIPE kwargs = dict( executable=executable, shell=shell, close_fds=close_fds, stdin=st_in, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=self._restore_signal_handlers, ) if PY3 and pass_fds: kwargs["pass_fds"] = pass_fds # store the pwd prev_dir = os.getcwd() # make sure we're in the right working directory if cwd and os.path.isdir(cwd): cwd = os.path.abspath(os.path.expanduser(cwd)) kwargs['cwd'] = cwd try: os.chdir(cwd) except (OSError, IOError) as e: self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)), exception=traceback.format_exc()) old_umask = None if umask: old_umask = os.umask(umask) try: if self._debug: self.log('Executing: ' + self._clean_args(args)) cmd = subprocess.Popen(args, **kwargs) if before_communicate_callback: before_communicate_callback(cmd) # the communication logic here is essentially taken from that # of the _communicate() function in ssh.py stdout = b('') stderr = b('') rpipes = [cmd.stdout, cmd.stderr] if data: if not binary_data: data += '\n' if isinstance(data, text_type): data = to_bytes(data) cmd.stdin.write(data) cmd.stdin.close() while True: rfds, wfds, efds = select.select(rpipes, [], rpipes, 1) stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout) stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr) # if we're checking for prompts, do it now if prompt_re: if prompt_re.search(stdout) and not data: if encoding: stdout = to_native(stdout, encoding=encoding, errors=errors) return (257, stdout, "A prompt was encountered while running a command, but no input data was specified") # only break out if no pipes are left to read or # the pipes are completely read and # the process is terminated if (not rpipes or not rfds) and cmd.poll() is not None: break # No pipes are left to read but process is not yet terminated # Only then it is safe to wait for the process to be finished # NOTE: Actually cmd.poll() is always None here if rpipes is empty elif not rpipes and cmd.poll() is None: cmd.wait() # The process is terminated. Since no pipes to read from are # left, there is no need to call select() again. break cmd.stdout.close() cmd.stderr.close() rc = cmd.returncode except (OSError, IOError) as e: self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e))) self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args)) except Exception as e: self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc()))) self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args)) # Restore env settings for key, val in old_env_vals.items(): if val is None: del os.environ[key] else: os.environ[key] = val if old_umask: os.umask(old_umask) if rc != 0 and check_rc: msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values) self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg) # reset the pwd os.chdir(prev_dir) if encoding is not None: return (rc, to_native(stdout, encoding=encoding, errors=errors), to_native(stderr, encoding=encoding, errors=errors)) return (rc, stdout, stderr) def append_to_file(self, filename, str): filename = os.path.expandvars(os.path.expanduser(filename)) fh = open(filename, 'a') fh.write(str) fh.close() def bytes_to_human(self, size): return bytes_to_human(size) # for backwards compatibility pretty_bytes = bytes_to_human def human_to_bytes(self, number, isbits=False): return human_to_bytes(number, isbits) # # Backwards compat # # In 2.0, moved from inside the module to the toplevel is_executable = is_executable @staticmethod def get_buffer_size(fd): try: # 1032 == FZ_GETPIPE_SZ buffer_size = fcntl.fcntl(fd, 1032) except Exception: try: # not as exact as above, but should be good enough for most platforms that fail the previous call buffer_size = select.PIPE_BUF except Exception: buffer_size = 9000 # use sane default JIC return buffer_size def get_module_path(): return os.path.dirname(os.path.realpath(__file__))
unknown
codeparrot/codeparrot-clean
# Copyright 2010-2015 RethinkDB, all rights reserved. import errno import json import numbers import socket import struct import time import ssl try: from importlib import import_module except ImportError: def import_module(name, package=None): # song & dance needed to do relative import in 2.6, which # doesn't have importlib return __import__(name[1:], globals(), locals(), [], 1) from . import ql2_pb2 as p __all__ = ['connect', 'set_loop_type', 'Connection', 'Cursor'] pErrorType = p.Response.ErrorType pResponse = p.Response.ResponseType pQuery = p.Query.QueryType from .errors import * from .ast import RqlQuery, RqlTopLevelQuery, DB, Repl from .ast import recursively_convert_pseudotypes try: from ssl import match_hostname, CertificateError except ImportError: from backports.ssl_match_hostname import match_hostname, CertificateError try: xrange except NameError: xrange = range try: {}.iteritems dict_items = lambda d: d.iteritems() except AttributeError: dict_items = lambda d: d.items() def decodeUTF(inputPipe): # attempt to decode input as utf-8 with fallbacks to get something useful try: return inputPipe.decode('utf-8', errors='ignore') except TypeError: try: return inputPipe.decode('utf-8') except UnicodeError: return repr(inputPipe) def convert_pseudo(value, query): return recursively_convert_pseudotypes(value, query.global_optargs) def maybe_profile(value, res): if res.profile is not None: return {'value': value, 'profile': res.profile} return value class Query(object): def __init__(self, type, token, term, global_optargs): self.type = type self.token = token self.term = term self.global_optargs = global_optargs def serialize(self): message = [self.type] if self.term is not None: message.append(self.term.build()) if self.global_optargs is not None: optargs = {} for k, v in dict_items(self.global_optargs): optargs[k] = v.build() if isinstance(v, RqlQuery) else v message.append(optargs) query_str = json.dumps(message, ensure_ascii=False, allow_nan=False).encode('utf-8') query_header = struct.pack('<QL', self.token, len(query_str)) return query_header + query_str class Response(object): def __init__(self, token, json_str): try: json_str = json_str.decode('utf-8') except AttributeError: pass # Python3 str objects are already utf-8 self.token = token full_response = json.loads(json_str) self.type = full_response["t"] self.data = full_response["r"] self.backtrace = full_response.get("b", None) self.profile = full_response.get("p", None) self.error_type = full_response.get("e", None) def make_error(self, query): if self.type == pResponse.CLIENT_ERROR: return RqlClientError(self.data[0], query.term, self.backtrace) elif self.type == pResponse.COMPILE_ERROR: return RqlCompileError(self.data[0], query.term, self.backtrace) elif self.type == pResponse.RUNTIME_ERROR: return { pErrorType.INTERNAL: RqlInternalError, pErrorType.RESOURCE: RqlResourceError, pErrorType.LOGIC: RqlLogicError, pErrorType.NON_EXISTENCE: RqlNonExistenceError, pErrorType.OP_FAILED: RqlOpFailedError, pErrorType.OP_INDETERMINATE: RqlOpIndeterminateError, pErrorType.USER: RqlUserError }.get(self.error_type, RqlRuntimeError)( self.data[0], query.term, self.backtrace) return RqlDriverError("Unknown Response type %d encountered" + " in a response." % self.type) # This class encapsulates all shared behavior between cursor implementations. # It provides iteration over the cursor using `iter`, as well as incremental # iteration using `next`. # # query - the original query that resulted in the cursor, used for: # query.term - the term to be used for pretty-printing backtraces # query.token - the token to use for subsequent CONTINUE and STOP requests # query.global_optargs - dictate how to format results # items - The current list of items obtained from the server, this is # added to in `_extend`, which is called by the ConnectionInstance when a # new response arrives for this cursor. # outstanding_requests - The number of requests that are currently awaiting # a response from the server. This will typically be 0 or 1 unless the # cursor is exhausted, but this can be higher if `close` is called. # threshold - a CONTINUE request will be sent when the length of `items` goes # below this number. # error - indicates the current state of the cursor: # None - there is more data available from the server and no errors have # occurred yet # Exception - an error has occurred in the cursor and should be raised # to the user once all results in `items` have been returned. This # will be a RqlCursorEmpty exception if the cursor completed successfully. # # A class that derives from this should implement the following functions: # def _get_next(self, timeout): # where `timeout` is the maximum amount of time (in seconds) to wait for the # next result in the cursor before raising a RqlTimeoutError. # def _empty_error(self): # which returns the appropriate error to be raised when the cursor is empty class Cursor(object): def __init__(self, conn_instance, query): self.conn = conn_instance self.query = query self.items = list() self.outstanding_requests = 1 self.threshold = 0 self.error = None self.conn._cursor_cache[self.query.token] = self def close(self): if self.error is None: self.error = self._empty_error() if self.conn.is_open(): self.outstanding_requests += 1 self.conn._parent._stop(self) @staticmethod def _wait_to_timeout(wait): if isinstance(wait, bool): return None if wait else 0 elif isinstance(wait, numbers.Real) and wait >= 0: return wait else: raise RqlDriverError("Invalid wait timeout '%s'" % str(wait)) def next(self, wait=True): return self._get_next(Cursor._wait_to_timeout(wait)) def _extend(self, res): self.outstanding_requests -= 1 self.threshold = len(res.data) if self.error is None: if res.type == pResponse.SUCCESS_PARTIAL: self.items.extend(res.data) elif res.type == pResponse.SUCCESS_SEQUENCE: self.items.extend(res.data) self.error = self._empty_error() else: self.error = res.make_error(self.query) self._maybe_fetch_batch() if self.outstanding_requests == 0 and self.error is not None: del self.conn._cursor_cache[res.token] def __str__(self): val_str = ', '.join(map(repr, self.items[:10])) if len(self.items) > 10 or self.error is None: val_str += ', ...' if self.error is None: err_str = 'streaming' elif isinstance(self.error, RqlCursorEmpty): err_str = 'done streaming' else: err_str = 'error: %s' % repr(self.error) return "%s (%s):\n[%s]" % (object.__str__(self), err_str, val_str) def _error(self, message): # Set an error and extend with a dummy response to trigger any waiters if self.error is None: self.error = RqlRuntimeError(message, self.query.term, []) dummy_response = Response(self.query.token, '{"t":%d,"r":[]}' % pResponse.SUCCESS_SEQUENCE) self._extend(dummy_response) def _maybe_fetch_batch(self): if self.error is None and \ len(self.items) <= self.threshold and \ self.outstanding_requests == 0: self.outstanding_requests += 1 self.conn._parent._continue(self) class DefaultCursorEmpty(RqlCursorEmpty, StopIteration): def __init__(self, term): RqlCursorEmpty.__init__(self, term) class DefaultCursor(Cursor): def __iter__(self): return self def __next__(self): return self._get_next(None) def _empty_error(self): return DefaultCursorEmpty(self.query.term) def _get_next(self, timeout): deadline = None if timeout is None else time.time() + timeout while len(self.items) == 0: self._maybe_fetch_batch() if self.error is not None: raise self.error self.conn._read_response(self.query.token, deadline) return convert_pseudo(self.items.pop(0), self.query) class SocketWrapper(object): def __init__(self, parent, timeout): self.host = parent._parent.host self.port = parent._parent.port self._read_buffer = None self._socket = None self.ssl = parent._parent.ssl deadline = time.time() + timeout try: self._socket = \ socket.create_connection((self.host, self.port), timeout) self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if len(self.ssl) > 0: ssl_context = self._get_ssl_context(self.ssl["ca_certs"]) try: self._socket = ssl_context.wrap_socket(self._socket, server_hostname=self.host) except IOError as exc: self._socket.close() raise RqlDriverError("SSL handshake failed: %s" % (str(exc),)) try: match_hostname(self._socket.getpeercert(), hostname=self.host) except CertificateError: self._socket.close() raise self.sendall(parent._parent.handshake) # The response from the server is a null-terminated string response = b'' while True: char = self.recvall(1, deadline) if char == b'\0': break response += char except RqlDriverError as ex: self.close() error = str(ex)\ .replace('receiving from', 'during handshake with')\ .replace('sending to', 'during handshake with') raise RqlDriverError(error) except socket.timeout as ex: self.close() raise RqlTimeoutError() except Exception as ex: self.close() raise RqlDriverError("Could not connect to %s:%s. Error: %s" % (self.host, self.port, ex)) if response != b"SUCCESS": self.close() raise RqlDriverError(("Server dropped connection " + "with message: \"%s\"") % decodeUTF(response).strip()) def is_open(self): return self._socket is not None def close(self): if self._socket is not None: try: self._socket.shutdown(socket.SHUT_RDWR) self._socket.close() except Exception: pass finally: self._socket = None def recvall(self, length, deadline): res = b'' if self._read_buffer is None else self._read_buffer timeout = None if deadline is None else max(0, deadline - time.time()) self._socket.settimeout(timeout) while len(res) < length: while True: try: chunk = self._socket.recv(length - len(res)) self._socket.settimeout(None) break except socket.timeout: self._read_buffer = res self._socket.settimeout(None) raise RqlTimeoutError() except IOError as ex: if ex.errno == errno.ECONNRESET: self.close() raise RqlDriverError("Connection is closed.") elif ex.errno == errno.EWOULDBLOCK: # This should only happen with a timeout of 0 raise RqlTimeoutError() elif ex.errno != errno.EINTR: self.close() raise RqlDriverError(('Connection interrupted ' + 'receiving from %s:%s - %s') % (self.host, self.port, str(ex))) except Exception as ex: self.close() raise RqlDriverError('Error receiving from %s:%s - %s' % (self.host, self.port, str(ex))) except: self.close() raise if len(chunk) == 0: self.close() raise RqlDriverError("Connection is closed.") res += chunk return res def sendall(self, data): offset = 0 while offset < len(data): try: offset += self._socket.send(data[offset:]) except IOError as ex: if ex.errno == errno.ECONNRESET: self.close() raise RqlDriverError("Connection is closed.") elif ex.errno != errno.EINTR: self.close() raise RqlDriverError(('Connection interrupted ' + 'sending to %s:%s - %s') % (self.host, self.port, str(ex))) except Exception as ex: self.close() raise RqlDriverError('Error sending to %s:%s - %s' % (self.host, self.port, str(ex))) except: self.close() raise def _get_ssl_context(self, ca_certs): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if hasattr(ctx, "options"): ctx.options |= getattr(ssl, "OP_NO_SSLv2", 0) ctx.options |= getattr(ssl, "OP_NO_SSLv3", 0) ctx.verify_mode = ssl.CERT_REQUIRED ctx.check_hostname = True ctx.load_verify_locations(ca_certs) return ctx class ConnectionInstance(object): def __init__(self, parent): self._parent = parent self._cursor_cache = {} self._header_in_progress = None self._socket = None self._closing = False def connect(self, timeout): self._socket = SocketWrapper(self, timeout) return self._parent def is_open(self): return self._socket.is_open() def close(self, noreply_wait, token): self._closing = True # Cursors may remove themselves when errored, so copy a list of them for cursor in list(self._cursor_cache.values()): cursor._error("Connection is closed.") self._cursor_cache = {} try: if noreply_wait: noreply = Query(pQuery.NOREPLY_WAIT, token, None, None) self.run_query(noreply, False) finally: self._socket.close() self._header_in_progress = None def run_query(self, query, noreply): self._socket.sendall(query.serialize()) if noreply: return None # Get response res = self._read_response(query.token) if res.type == pResponse.SUCCESS_ATOM: return maybe_profile(convert_pseudo(res.data[0], query), res) elif res.type in (pResponse.SUCCESS_PARTIAL, pResponse.SUCCESS_SEQUENCE): cursor = DefaultCursor(self, query) cursor._extend(res) return maybe_profile(cursor, res) elif res.type == pResponse.WAIT_COMPLETE: return None else: raise res.make_error(query) def _read_response(self, token, deadline=None): # We may get an async continue result, in which case we save # it and read the next response while True: try: # The first 8 bytes give the corresponding query token # of this response. The next 4 bytes give the # expected length of this response. if self._header_in_progress is None: self._header_in_progress \ = self._socket.recvall(12, deadline) (res_token, res_len,) \ = struct.unpack("<qL", self._header_in_progress) res_buf = self._socket.recvall(res_len, deadline) self._header_in_progress = None except KeyboardInterrupt as ex: # Cancel outstanding queries by dropping this connection, # then create a new connection for the user's convenience. self._parent.reconnect(noreply_wait=False) raise ex # Construct response res = Response(res_token, res_buf) cursor = self._cursor_cache.get(res.token) if cursor is not None: self._handle_cursor_response(cursor, res) if res.token == token: return res elif not self._closing and cursor is None: # This response is corrupted or not intended for us self.close(False, None) raise RqlDriverError("Unexpected response received.") def _handle_cursor_response(self, cursor, res): cursor._extend(res) class Connection(object): _r = None def __init__(self, conn_type, host, port, db, auth_key, timeout, ssl, **kwargs): self.db = db self.auth_key = auth_key.encode('ascii') self.handshake = \ struct.pack("<2L", p.VersionDummy.Version.V0_4, len(self.auth_key)) + \ self.auth_key + \ struct.pack("<L", p.VersionDummy.Protocol.JSON) self.host = host self.port = port self.connect_timeout = timeout self.ssl = ssl self._conn_type = conn_type self._child_kwargs = kwargs self._instance = None self._next_token = 0 def reconnect(self, noreply_wait=True, timeout=None): if timeout is None: timeout = self.connect_timeout self.close(noreply_wait) # Do this here rather than in the constructor so that we don't throw # in the constructor (which doesn't play well with Tornado) try: self.port = int(self.port) except ValueError: raise RqlDriverError("Could not convert port %s to an integer." % self.port) self._instance = self._conn_type(self, **self._child_kwargs) return self._instance.connect(timeout) # Not thread safe. Sets this connection as global state that will be used # by subsequence calls to `query.run`. Useful for trying out RethinkDB in # a Python repl environment. def repl(self): Repl.set(self) return self def __enter__(self): return self def __exit__(self, type, value, traceback): self.close(noreply_wait=False) def use(self, db): self.db = db def is_open(self): return self._instance is not None and self._instance.is_open() def check_open(self): if self._instance is None or not self._instance.is_open(): raise RqlDriverError('Connection is closed.') def close(self, noreply_wait=True): if self._instance is not None: instance = self._instance noreply_wait_token = self._new_token() self._instance = None self._next_token = 0 return instance.close(noreply_wait, noreply_wait_token) def noreply_wait(self): self.check_open() q = Query(pQuery.NOREPLY_WAIT, self._new_token(), None, None) return self._instance.run_query(q, False) def _new_token(self): res = self._next_token self._next_token += 1 return res def _start(self, term, **global_optargs): self.check_open() if 'db' in global_optargs or self.db is not None: global_optargs['db'] = DB(global_optargs.get('db', self.db)) q = Query(pQuery.START, self._new_token(), term, global_optargs) return self._instance.run_query(q, global_optargs.get('noreply', False)) def _continue(self, cursor): self.check_open() q = Query(pQuery.CONTINUE, cursor.query.token, None, None) return self._instance.run_query(q, True) def _stop(self, cursor): self.check_open() q = Query(pQuery.STOP, cursor.query.token, None, None) return self._instance.run_query(q, True) class DefaultConnection(Connection): def __init__(self, *args, **kwargs): Connection.__init__(self, ConnectionInstance, *args, **kwargs) connection_type = DefaultConnection def connect(host='localhost', port=28015, db=None, auth_key="", timeout=20, ssl=dict(), **kwargs): global connection_type conn = connection_type(host, port, db, auth_key, timeout, ssl, **kwargs) return conn.reconnect(timeout=timeout) def set_loop_type(library): global connection_type mod = import_module('.net_%s' % library, package=__package__) connection_type = mod.Connection
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Kuulemma # Copyright (C) 2014, Fast Monkeys Oy # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Rename `hearing_section` to `alternative`""" # revision identifiers, used by Alembic. revision = '14051cff79e' down_revision = '51051f5b195' from alembic import op def upgrade(): op.rename_table('hearing_section', 'alternative') op.rename_table('hearing_section_version', 'alternative_version') op.alter_column( 'comment', 'hearing_section_id', new_column_name='alternative_id' ) op.alter_column( 'image', 'hearing_section_id', new_column_name='alternative_id' ) op.alter_column( 'comment_version', 'hearing_section_id', new_column_name='alternative_id' ) op.create_index(op.f( 'ix_alternative_version_end_transaction_id'), 'alternative_version', ['end_transaction_id'], unique=False ) op.create_index(op.f( 'ix_alternative_version_operation_type'), 'alternative_version', ['operation_type'], unique=False ) op.create_index(op.f( 'ix_alternative_version_transaction_id'), 'alternative_version', ['transaction_id'], unique=False ) op.drop_index( 'ix_hearing_section_version_end_transaction_id', table_name='alternative_version' ) op.drop_index( 'ix_hearing_section_version_operation_type', table_name='alternative_version' ) op.drop_index( 'ix_hearing_section_version_transaction_id', table_name='alternative_version' ) op.create_index( op.f('ix_image_alternative_id'), 'image', ['alternative_id'], unique=False ) op.drop_index( 'ix_image_hearing_section_id', table_name='image' ) def downgrade(): op.drop_index( op.f('ix_image_alternative_id'), table_name='image' ) op.drop_index( op.f('ix_alternative_version_transaction_id'), table_name='alternative_version' ) op.drop_index( op.f('ix_alternative_version_operation_type'), table_name='alternative_version' ) op.drop_index( op.f('ix_alternative_version_end_transaction_id'), table_name='alternative_version' ) op.rename_table('alternative', 'hearing_section') op.rename_table('alternative_version', 'hearing_section_version') op.alter_column( 'comment', 'alternative_id', new_column_name='hearing_section_id' ) op.alter_column( 'image', 'alternative_id', new_column_name='hearing_section_id' ) op.alter_column( 'comment_version', 'alternative_id', new_column_name='hearing_section_id' ) op.create_index( 'ix_image_hearing_section_id', 'image', ['hearing_section_id'], unique=False ) op.create_index( 'ix_hearing_section_version_transaction_id', 'hearing_section_version', ['transaction_id'], unique=False ) op.create_index( 'ix_hearing_section_version_operation_type', 'hearing_section_version', ['operation_type'], unique=False ) op.create_index( 'ix_hearing_section_version_end_transaction_id', 'hearing_section_version', ['end_transaction_id'], unique=False )
unknown
codeparrot/codeparrot-clean
############################################################################# ## ## ## This file is part of Owade : www.owade.org ## ## Offline Windows Analyzer and Data Extractor ## ## ## ## Authors: ## ## Elie Bursztein <owade@elie.im> ## ## Ivan Fontarensky <ivan.fontarensky@cassidian.com> ## ## Matthieu Martin <matthieu.mar+owade@gmail.com> ## ## Jean-Michel Picod <jean-michel.picod@cassidian.com> ## ## ## ## This program is distributed under GPLv3 licence (see LICENCE.txt) ## ## ## ############################################################################# __author__="ashe" __date__ ="$Aug 2, 2011 4:09:16 PM$" import re from owade.process import Process from owade.tools.sizeList import SizeList class Passwords(Process): def __init__(self, internLog, terminalLog, partition): Process.__init__(self, internLog, terminalLog) self.partition_ = partition def credentials(self): credentials = [] passwords = SizeList() users = SizeList() mails = SizeList() program = self.getDbGenericDic('ProgramAnalyze', self.partition_) for user in program: user = program[user] for creds in [('GetIE7Passwords', 'Internet Explorer'), ('GetFirefoxPasswords', 'Firefox'), ('GetChromePasswords', 'Chrome'), ('GetSafariPasswords', 'Safari'), ('GetTrillianAccounts', 'Trillian'), ('GetGTalkAccounts', 'GTalk')]: software = creds[1] creds = user.get(creds[0], {}) for cred in creds: cred = creds[cred] if type(cred) is not dict: continue dic = {'password':cred['password'], 'login':cred['login'], 'domain':cred.get('domain', ''), 'software':software} if not dic in credentials: credentials.append(dic) passwords.add(cred['password']) login = cred['login'] users.add(login) if isMail(login): mails.add(login) for forms in ['GetFirefoxHistory', 'GetChromeHistory']: forms = user.get(forms, {'forms':{}}) forms = forms['forms'] for form in forms: form = forms[form] value = form['value'] if form['fieldname'].lower() in ['username', 'user', 'login']: users.add(value) if isMail(value): mails.add(value) self.credentials_ = credentials self.passwords_ = passwords.getList() self.users_ = users.getList() self.mails_ = mails.getList() def run(self): self.credentials() partition_ = None credentials_ = None passwords_ = None users_ = None mails_ = None def isMail(value): if re.match(r'^[^@]+@[^.]+.[^.]{2,3}(.[^.]{2,3})?$', value): return True return False
unknown
codeparrot/codeparrot-clean
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations from datetime import datetime from typing import TYPE_CHECKING from uuid import UUID import sqlalchemy as sa from sqlalchemy import Boolean, ForeignKeyConstraint, Text, Uuid from sqlalchemy.orm import Mapped, mapped_column, relationship from airflow._shared.timezones import timezone from airflow.models.base import Base from airflow.models.hitl import HITLDetailPropertyMixin from airflow.utils.sqlalchemy import UtcDateTime if TYPE_CHECKING: from airflow.models.hitl import HITLDetail class HITLDetailHistory(Base, HITLDetailPropertyMixin): """ Store HITLDetail for old tries of TaskInstances. :meta private: """ __tablename__ = "hitl_detail_history" ti_history_id: Mapped[UUID] = mapped_column( Uuid(), primary_key=True, nullable=False, ) # User Request Detail options: Mapped[dict] = mapped_column(sa.JSON(), nullable=False) subject: Mapped[str] = mapped_column(Text, nullable=False) body: Mapped[str | None] = mapped_column(Text, nullable=True) defaults: Mapped[dict | None] = mapped_column(sa.JSON(), nullable=True) multiple: Mapped[bool | None] = mapped_column(Boolean, unique=False, default=False, nullable=True) params: Mapped[dict] = mapped_column(sa.JSON(), nullable=False, default={}) assignees: Mapped[list[dict[str, str]] | None] = mapped_column(sa.JSON(), nullable=True) created_at: Mapped[datetime] = mapped_column(UtcDateTime, default=timezone.utcnow, nullable=False) # Response Content Detail responded_at: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) responded_by: Mapped[dict | None] = mapped_column(sa.JSON(), nullable=True) chosen_options: Mapped[dict | None] = mapped_column( sa.JSON(), nullable=True, default=None, ) params_input: Mapped[dict] = mapped_column(sa.JSON(), nullable=False, default={}) task_instance = relationship( "TaskInstanceHistory", lazy="joined", back_populates="hitl_detail", ) def __init__(self, hitl_detail: HITLDetail): super().__init__() for column in self.__table__.columns: if column.name == "ti_history_id": setattr(self, column.name, hitl_detail.ti_id) continue setattr(self, column.name, getattr(hitl_detail, column.name)) __table_args__ = ( ForeignKeyConstraint( (ti_history_id,), ["task_instance_history.task_instance_id"], name="hitl_detail_history_tih_fkey", ondelete="CASCADE", onupdate="CASCADE", ), )
python
github
https://github.com/apache/airflow
airflow-core/src/airflow/models/hitl_history.py
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import errno import mock import operator import os import unittest import six.moves.cPickle as pickle from array import array from collections import Counter, defaultdict from math import ceil from tempfile import mkdtemp from shutil import rmtree import sys import random import uuid import itertools from six.moves import range from swift.common import exceptions from swift.common import ring from swift.common.ring import utils from swift.common.ring.builder import MAX_BALANCE def _partition_counts(builder, key='id'): """ Returns a dictionary mapping the given device key to (number of partitions assigned to that key). """ return Counter(builder.devs[dev_id][key] for part2dev_id in builder._replica2part2dev for dev_id in part2dev_id) class TestRingBuilder(unittest.TestCase): def setUp(self): self.testdir = mkdtemp() def tearDown(self): rmtree(self.testdir, ignore_errors=1) def _get_population_by_region(self, builder): """ Returns a dictionary mapping region to number of partitions in that region. """ return _partition_counts(builder, key='region') def test_init(self): rb = ring.RingBuilder(8, 3, 1) self.assertEqual(rb.part_power, 8) self.assertEqual(rb.replicas, 3) self.assertEqual(rb.min_part_hours, 1) self.assertEqual(rb.parts, 2 ** 8) self.assertEqual(rb.devs, []) self.assertFalse(rb.devs_changed) self.assertEqual(rb.version, 0) self.assertIsNotNone(rb._last_part_moves) def test_overlarge_part_powers(self): expected_msg = 'part_power must be at most 32 (was 33)' with self.assertRaises(ValueError) as ctx: ring.RingBuilder(33, 3, 1) self.assertEqual(str(ctx.exception), expected_msg) def test_insufficient_replicas(self): expected_msg = 'replicas must be at least 1 (was 0.999000)' with self.assertRaises(ValueError) as ctx: ring.RingBuilder(8, 0.999, 1) self.assertEqual(str(ctx.exception), expected_msg) def test_negative_min_part_hours(self): expected_msg = 'min_part_hours must be non-negative (was -1)' with self.assertRaises(ValueError) as ctx: ring.RingBuilder(8, 3, -1) self.assertEqual(str(ctx.exception), expected_msg) def test_deepcopy(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sdb1'}) # more devices in zone #1 rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10004, 'device': 'sdc1'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10004, 'device': 'sdd1'}) rb.rebalance() rb_copy = copy.deepcopy(rb) self.assertEqual(rb.to_dict(), rb_copy.to_dict()) self.assertIsNot(rb.devs, rb_copy.devs) self.assertIsNot(rb._replica2part2dev, rb_copy._replica2part2dev) self.assertIsNot(rb._last_part_moves, rb_copy._last_part_moves) self.assertIsNot(rb._remove_devs, rb_copy._remove_devs) self.assertIsNot(rb._dispersion_graph, rb_copy._dispersion_graph) def test_get_ring(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'}) rb.remove_dev(1) rb.rebalance() r = rb.get_ring() self.assertIsInstance(r, ring.RingData) r2 = rb.get_ring() self.assertIs(r, r2) rb.rebalance() r3 = rb.get_ring() self.assertIsNot(r3, r2) r4 = rb.get_ring() self.assertIs(r3, r4) def test_rebalance_with_seed(self): devs = [(0, 10000), (1, 10001), (2, 10002), (1, 10003)] ring_builders = [] for n in range(3): rb = ring.RingBuilder(8, 3, 1) idx = 0 for zone, port in devs: for d in ('sda1', 'sdb1'): rb.add_dev({'id': idx, 'region': 0, 'zone': zone, 'ip': '127.0.0.1', 'port': port, 'device': d, 'weight': 1}) idx += 1 ring_builders.append(rb) rb0 = ring_builders[0] rb1 = ring_builders[1] rb2 = ring_builders[2] r0 = rb0.get_ring() self.assertIs(rb0.get_ring(), r0) rb0.rebalance() # NO SEED rb1.rebalance(seed=10) rb2.rebalance(seed=10) r1 = rb1.get_ring() r2 = rb2.get_ring() self.assertIsNot(rb0.get_ring(), r0) self.assertNotEqual(r0.to_dict(), r1.to_dict()) self.assertEqual(r1.to_dict(), r2.to_dict()) # check that random state is reset pre_state = random.getstate() rb2.rebalance(seed=10) self.assertEqual(pre_state, random.getstate(), "Random state was not reset") pre_state = random.getstate() with mock.patch.object(rb2, "_build_replica_plan", side_effect=Exception()): self.assertRaises(Exception, rb2.rebalance, seed=10) self.assertEqual(pre_state, random.getstate(), "Random state was not reset") def test_rebalance_part_on_deleted_other_part_on_drained(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.add_dev({'id': 4, 'region': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'}) rb.add_dev({'id': 5, 'region': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'}) rb.rebalance(seed=1) # We want a partition where 1 replica is on a removed device, 1 # replica is on a 0-weight device, and 1 on a normal device. To # guarantee we have one, we see where partition 123 is, then # manipulate its devices accordingly. zero_weight_dev_id = rb._replica2part2dev[1][123] delete_dev_id = rb._replica2part2dev[2][123] rb.set_dev_weight(zero_weight_dev_id, 0.0) rb.remove_dev(delete_dev_id) rb.rebalance() def test_set_replicas(self): rb = ring.RingBuilder(8, 3.2, 1) rb.devs_changed = False rb.set_replicas(3.25) self.assertTrue(rb.devs_changed) rb.devs_changed = False rb.set_replicas(3.2500001) self.assertFalse(rb.devs_changed) def test_add_dev(self): rb = ring.RingBuilder(8, 3, 1) dev = {'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000} dev_id = rb.add_dev(dev) self.assertRaises(exceptions.DuplicateDeviceError, rb.add_dev, dev) self.assertEqual(dev_id, 0) rb = ring.RingBuilder(8, 3, 1) # test add new dev with no id dev_id = rb.add_dev({'zone': 0, 'region': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 6200}) self.assertEqual(rb.devs[0]['id'], 0) self.assertEqual(dev_id, 0) # test add another dev with no id dev_id = rb.add_dev({'zone': 3, 'region': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 6200}) self.assertEqual(rb.devs[1]['id'], 1) self.assertEqual(dev_id, 1) # some keys are required self.assertRaises(ValueError, rb.add_dev, {}) stub_dev = {'weight': 1, 'ip': '127.0.0.1', 'port': 7000} for key in (stub_dev.keys()): dev = stub_dev.copy() dev.pop(key) self.assertRaises(ValueError, rb.add_dev, dev) def test_set_dev_weight(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.rebalance() r = rb.get_ring() counts = {} for part2dev_id in r._replica2part2dev_id: for dev_id in part2dev_id: counts[dev_id] = counts.get(dev_id, 0) + 1 self.assertEqual(counts, {0: 128, 1: 128, 2: 256, 3: 256}) rb.set_dev_weight(0, 0.75) rb.set_dev_weight(1, 0.25) rb.pretend_min_part_hours_passed() rb.rebalance() r = rb.get_ring() counts = {} for part2dev_id in r._replica2part2dev_id: for dev_id in part2dev_id: counts[dev_id] = counts.get(dev_id, 0) + 1 self.assertEqual(counts, {0: 192, 1: 64, 2: 256, 3: 256}) def test_remove_dev(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.rebalance() r = rb.get_ring() counts = {} for part2dev_id in r._replica2part2dev_id: for dev_id in part2dev_id: counts[dev_id] = counts.get(dev_id, 0) + 1 self.assertEqual(counts, {0: 192, 1: 192, 2: 192, 3: 192}) rb.remove_dev(1) rb.pretend_min_part_hours_passed() rb.rebalance() r = rb.get_ring() counts = {} for part2dev_id in r._replica2part2dev_id: for dev_id in part2dev_id: counts[dev_id] = counts.get(dev_id, 0) + 1 self.assertEqual(counts, {0: 256, 2: 256, 3: 256}) def test_round_off_error(self): # 3 nodes with 11 disks each is particularly problematic. Probably has # to do with the binary repr. of 1/33? Those ones look suspicious... # # >>> bin(int(struct.pack('!f', 1.0/(33)).encode('hex'), 16)) # '0b111100111110000011111000010000' rb = ring.RingBuilder(8, 3, 1) for dev_id, (region, zone) in enumerate( 11 * [(0, 0), (1, 10), (1, 11)]): rb.add_dev({'id': dev_id, 'region': region, 'zone': zone, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000 + region * 100 + zone, 'device': 'sda%d' % dev_id}) rb.rebalance() self.assertEqual(_partition_counts(rb, 'zone'), {0: 256, 10: 256, 11: 256}) wanted_by_zone = defaultdict(lambda: defaultdict(int)) for dev in rb._iter_devs(): wanted_by_zone[dev['zone']][dev['parts_wanted']] += 1 # We're nicely balanced, but parts_wanted is slightly lumpy # because reasons. self.assertEqual(wanted_by_zone, { 0: {0: 10, 1: 1}, 10: {0: 11}, 11: {0: 10, -1: 1}}) def test_remove_a_lot(self): rb = ring.RingBuilder(3, 3, 1) rb.add_dev({'id': 0, 'device': 'd0', 'ip': '10.0.0.1', 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1}) rb.add_dev({'id': 1, 'device': 'd1', 'ip': '10.0.0.2', 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 2}) rb.add_dev({'id': 2, 'device': 'd2', 'ip': '10.0.0.3', 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3}) rb.add_dev({'id': 3, 'device': 'd3', 'ip': '10.0.0.1', 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1}) rb.add_dev({'id': 4, 'device': 'd4', 'ip': '10.0.0.2', 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 2}) rb.add_dev({'id': 5, 'device': 'd5', 'ip': '10.0.0.3', 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3}) rb.rebalance() rb.validate() # this has to put more than 1/3 of the partitions in the # cluster on removed devices in order to ensure that at least # one partition has multiple replicas that need to move. # # (for an N-replica ring, it's more than 1/N of the # partitions, of course) rb.remove_dev(3) rb.remove_dev(4) rb.remove_dev(5) rb.rebalance() rb.validate() def test_remove_zero_weighted(self): rb = ring.RingBuilder(8, 3, 0) rb.add_dev({'id': 0, 'device': 'd0', 'ip': '10.0.0.1', 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1}) rb.add_dev({'id': 1, 'device': 'd1', 'ip': '10.0.0.2', 'port': 6202, 'weight': 0.0, 'region': 0, 'zone': 2}) rb.add_dev({'id': 2, 'device': 'd2', 'ip': '10.0.0.3', 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3}) rb.add_dev({'id': 3, 'device': 'd3', 'ip': '10.0.0.1', 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1}) rb.rebalance() rb.remove_dev(1) parts, balance, removed = rb.rebalance() self.assertEqual(removed, 1) def test_shuffled_gather(self): if self._shuffled_gather_helper() and \ self._shuffled_gather_helper(): raise AssertionError('It is highly likely the ring is no ' 'longer shuffling the set of partitions ' 'to reassign on a rebalance.') def _shuffled_gather_helper(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.rebalance() rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) replica_plan = rb._build_replica_plan() rb._set_parts_wanted(replica_plan) for dev in rb._iter_devs(): dev['tiers'] = utils.tiers_for_dev(dev) assign_parts = defaultdict(list) rb._gather_parts_for_balance(assign_parts, replica_plan, False) max_run = 0 run = 0 last_part = 0 for part, _ in assign_parts.items(): if part > last_part: run += 1 else: if run > max_run: max_run = run run = 0 last_part = part if run > max_run: max_run = run return max_run > len(assign_parts) / 2 def test_initial_balance(self): # 2 boxes, 2 drives each in zone 1 # 1 box, 2 drives in zone 2 # # This is balanceable, but there used to be some nondeterminism in # rebalance() that would sometimes give you an imbalanced ring. rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0, 'ip': '10.1.1.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0, 'ip': '10.1.1.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0, 'ip': '10.1.1.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0, 'ip': '10.1.1.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'region': 1, 'zone': 2, 'weight': 4000.0, 'ip': '10.1.1.3', 'port': 10000, 'device': 'sda'}) rb.add_dev({'region': 1, 'zone': 2, 'weight': 4000.0, 'ip': '10.1.1.3', 'port': 10000, 'device': 'sdb'}) _, balance, _ = rb.rebalance(seed=2) # maybe not *perfect*, but should be close self.assertLessEqual(balance, 1) def test_multitier_partial(self): # Multitier test, nothing full rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 2, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 3, 'zone': 3, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.rebalance() rb.validate() for part in range(rb.parts): counts = defaultdict(lambda: defaultdict(int)) for replica in range(rb.replicas): dev = rb.devs[rb._replica2part2dev[replica][part]] counts['region'][dev['region']] += 1 counts['zone'][dev['zone']] += 1 if any(c > 1 for c in counts['region'].values()): raise AssertionError( "Partition %d not evenly region-distributed (got %r)" % (part, counts['region'])) if any(c > 1 for c in counts['zone'].values()): raise AssertionError( "Partition %d not evenly zone-distributed (got %r)" % (part, counts['zone'])) # Multitier test, zones full, nodes not full rb = ring.RingBuilder(8, 6, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sdg'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sdh'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sdi'}) rb.rebalance() rb.validate() for part in range(rb.parts): counts = defaultdict(lambda: defaultdict(int)) for replica in range(rb.replicas): dev = rb.devs[rb._replica2part2dev[replica][part]] counts['zone'][dev['zone']] += 1 counts['dev_id'][dev['id']] += 1 if counts['zone'] != {0: 2, 1: 2, 2: 2}: raise AssertionError( "Partition %d not evenly distributed (got %r)" % (part, counts['zone'])) for dev_id, replica_count in counts['dev_id'].items(): if replica_count > 1: raise AssertionError( "Partition %d is on device %d more than once (%r)" % (part, dev_id, counts['dev_id'])) def test_multitier_full(self): # Multitier test, #replicas == #devs rb = ring.RingBuilder(8, 6, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'}) rb.rebalance() rb.validate() for part in range(rb.parts): counts = defaultdict(lambda: defaultdict(int)) for replica in range(rb.replicas): dev = rb.devs[rb._replica2part2dev[replica][part]] counts['zone'][dev['zone']] += 1 counts['dev_id'][dev['id']] += 1 if counts['zone'] != {0: 2, 1: 2, 2: 2}: raise AssertionError( "Partition %d not evenly distributed (got %r)" % (part, counts['zone'])) for dev_id, replica_count in counts['dev_id'].items(): if replica_count != 1: raise AssertionError( "Partition %d is on device %d %d times, not 1 (%r)" % (part, dev_id, replica_count, counts['dev_id'])) def test_multitier_overfull(self): # Multitier test, #replicas > #zones (to prove even distribution) rb = ring.RingBuilder(8, 8, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'}) rb.rebalance() rb.validate() for part in range(rb.parts): counts = defaultdict(lambda: defaultdict(int)) for replica in range(rb.replicas): dev = rb.devs[rb._replica2part2dev[replica][part]] counts['zone'][dev['zone']] += 1 counts['dev_id'][dev['id']] += 1 self.assertEqual(8, sum(counts['zone'].values())) for zone, replica_count in counts['zone'].items(): if replica_count not in (2, 3): raise AssertionError( "Partition %d not evenly distributed (got %r)" % (part, counts['zone'])) for dev_id, replica_count in counts['dev_id'].items(): if replica_count not in (1, 2): raise AssertionError( "Partition %d is on device %d %d times, " "not 1 or 2 (%r)" % (part, dev_id, replica_count, counts['dev_id'])) def test_multitier_expansion_more_devices(self): rb = ring.RingBuilder(8, 6, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.rebalance() rb.validate() rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'}) rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'}) rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'}) for _ in range(5): rb.pretend_min_part_hours_passed() rb.rebalance() rb.validate() for part in range(rb.parts): counts = dict(zone=defaultdict(int), dev_id=defaultdict(int)) for replica in range(rb.replicas): dev = rb.devs[rb._replica2part2dev[replica][part]] counts['zone'][dev['zone']] += 1 counts['dev_id'][dev['id']] += 1 self.assertEqual({0: 2, 1: 2, 2: 2}, dict(counts['zone'])) # each part is assigned once to six unique devices self.assertEqual(list(counts['dev_id'].values()), [1] * 6) self.assertEqual(len(set(counts['dev_id'].keys())), 6) def test_multitier_part_moves_with_0_min_part_hours(self): rb = ring.RingBuilder(8, 3, 0) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'}) rb.rebalance() rb.validate() # min_part_hours is 0, so we're clear to move 2 replicas to # new devs rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'}) rb.rebalance() rb.validate() for part in range(rb.parts): devs = set() for replica in range(rb.replicas): devs.add(rb._replica2part2dev[replica][part]) if len(devs) != 3: raise AssertionError( "Partition %d not on 3 devs (got %r)" % (part, devs)) def test_multitier_part_moves_with_positive_min_part_hours(self): rb = ring.RingBuilder(8, 3, 99) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'}) rb.rebalance() rb.validate() # min_part_hours is >0, so we'll only be able to move 1 # replica to a new home rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'}) rb.pretend_min_part_hours_passed() rb.rebalance() rb.validate() for part in range(rb.parts): devs = set() for replica in range(rb.replicas): devs.add(rb._replica2part2dev[replica][part]) if not any(rb.devs[dev_id]['zone'] == 1 for dev_id in devs): raise AssertionError( "Partition %d did not move (got %r)" % (part, devs)) def test_multitier_dont_move_too_many_replicas(self): rb = ring.RingBuilder(8, 3, 1) # there'll be at least one replica in z0 and z1 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'}) rb.rebalance() rb.validate() # only 1 replica should move rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 4, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf1'}) rb.pretend_min_part_hours_passed() rb.rebalance() rb.validate() for part in range(rb.parts): zones = set() for replica in range(rb.replicas): zones.add(rb.devs[rb._replica2part2dev[replica][part]]['zone']) if len(zones) != 3: raise AssertionError( "Partition %d not in 3 zones (got %r)" % (part, zones)) if 0 not in zones or 1 not in zones: raise AssertionError( "Partition %d not in zones 0 and 1 (got %r)" % (part, zones)) def test_min_part_hours_zero_will_move_one_replica(self): rb = ring.RingBuilder(8, 3, 0) # there'll be at least one replica in z0 and z1 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'}) rb.rebalance(seed=1) rb.validate() rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 4, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf1'}) rb.rebalance(seed=3) rb.validate() self.assertEqual(0, rb.dispersion) # Only one replica could move, so some zones are quite unbalanced self.assertAlmostEqual(rb.get_balance(), 66.66, delta=0.5) # There was only zone 0 and 1 before adding more devices. Only one # replica should have been moved, therefore we expect 256 parts in zone # 0 and 1, and a total of 256 in zone 2,3, and 4 expected = defaultdict(int, {0: 256, 1: 256, 2: 86, 3: 85, 4: 85}) self.assertEqual(expected, _partition_counts(rb, key='zone')) zone_histogram = defaultdict(int) for part in range(rb.parts): zones = [ rb.devs[rb._replica2part2dev[replica][part]]['zone'] for replica in range(rb.replicas)] zone_histogram[tuple(sorted(zones))] += 1 # We expect that every partition moved exactly one replica expected = { (0, 1, 2): 86, (0, 1, 3): 85, (0, 1, 4): 85, } self.assertEqual(zone_histogram, expected) # After rebalancing one more times, we expect that everything is in a # good state rb.rebalance(seed=3) self.assertEqual(0, rb.dispersion) # a balance of w/i a 1% isn't too bad for 3 replicas on 7 # devices when part power is only 8 self.assertAlmostEqual(rb.get_balance(), 0, delta=0.5) # every zone has either 153 or 154 parts for zone, count in _partition_counts( rb, key='zone').items(): self.assertAlmostEqual(153.5, count, delta=1) parts_with_moved_count = defaultdict(int) for part in range(rb.parts): zones = set() for replica in range(rb.replicas): zones.add(rb.devs[rb._replica2part2dev[replica][part]]['zone']) moved_replicas = len(zones - {0, 1}) parts_with_moved_count[moved_replicas] += 1 # as usual, the real numbers depend on the seed, but we want to # validate a few things here: # # 1) every part had to move one replica to hit dispersion (so no # one can have a moved count 0) # # 2) it's quite reasonable that some small percent of parts will # have a replica in {0, 1, X} (meaning only one replica of the # part moved) # # 3) when min_part_hours is 0, more than one replica of a part # can move in a rebalance, and since that movement would get to # better dispersion faster we expect to observe most parts in # {[0,1], X, X} (meaning *two* replicas of the part moved) # # 4) there's plenty of weight in z0 & z1 to hold a whole # replicanth, so there is no reason for any part to have to move # all three replicas out of those zones (meaning no one can have # a moved count 3) # expected = { 1: 52, 2: 204, } self.assertEqual(parts_with_moved_count, expected) def test_ever_rebalanced(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) self.assertFalse(rb.ever_rebalanced) builder_file = os.path.join(self.testdir, 'test.buider') rb.save(builder_file) rb = ring.RingBuilder.load(builder_file) self.assertFalse(rb.ever_rebalanced) rb.rebalance() self.assertTrue(rb.ever_rebalanced) rb.save(builder_file) rb = ring.RingBuilder.load(builder_file) self.assertTrue(rb.ever_rebalanced) def test_rerebalance(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) self.assertFalse(rb.ever_rebalanced) rb.rebalance() self.assertTrue(rb.ever_rebalanced) counts = _partition_counts(rb) self.assertEqual(counts, {0: 256, 1: 256, 2: 256}) rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.pretend_min_part_hours_passed() rb.rebalance() self.assertTrue(rb.ever_rebalanced) counts = _partition_counts(rb) self.assertEqual(counts, {0: 192, 1: 192, 2: 192, 3: 192}) rb.set_dev_weight(3, 100) rb.rebalance() counts = _partition_counts(rb) self.assertEqual(counts[3], 256) def test_add_rebalance_add_rebalance_delete_rebalance(self): # Test for https://bugs.launchpad.net/swift/+bug/845952 # min_part of 0 to allow for rapid rebalancing rb = ring.RingBuilder(8, 3, 0) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.rebalance() rb.validate() rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'}) rb.rebalance() rb.validate() rb.remove_dev(1) # well now we have only one device in z0 rb.set_overload(0.5) rb.rebalance() rb.validate() def test_remove_last_partition_from_zero_weight(self): rb = ring.RingBuilder(4, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 1, 'weight': 1.0, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 3, 'weight': 1.0, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 3, 'weight': 1.0, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 1.0, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 0.4, 'ip': '127.0.0.3', 'port': 10001, 'device': 'zero'}) zero_weight_dev = 3 rb.rebalance(seed=1) # We want at least one partition with replicas only in zone 2 and 3 # due to device weights. It would *like* to spread out into zone 1, # but can't, due to device weight. # # Also, we want such a partition to have a replica on device 3, # which we will then reduce to zero weight. This should cause the # removal of the replica from device 3. # # Getting this to happen by chance is hard, so let's just set up a # builder so that it's in the state we want. This is a synthetic # example; while the bug has happened on a real cluster, that # builder file had a part_power of 16, so its contents are much too # big to include here. rb._replica2part2dev = [ # these are the relevant ones # | | | # v v v array('H', [2, 5, 6, 2, 5, 6, 2, 5, 6, 2, 5, 6, 2, 5, 6, 2]), array('H', [1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4]), array('H', [0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 5, 6, 2, 5, 6])] # fix up bookkeeping new_dev_parts = defaultdict(int) for part2dev_id in rb._replica2part2dev: for dev_id in part2dev_id: new_dev_parts[dev_id] += 1 for dev in rb._iter_devs(): dev['parts'] = new_dev_parts[dev['id']] rb.set_dev_weight(zero_weight_dev, 0.0) rb.pretend_min_part_hours_passed() rb.rebalance(seed=1) node_counts = defaultdict(int) for part2dev_id in rb._replica2part2dev: for dev_id in part2dev_id: node_counts[dev_id] += 1 self.assertEqual(node_counts[zero_weight_dev], 0) # it's as balanced as it gets, so nothing moves anymore rb.pretend_min_part_hours_passed() parts_moved, _balance, _removed = rb.rebalance(seed=1) new_node_counts = defaultdict(int) for part2dev_id in rb._replica2part2dev: for dev_id in part2dev_id: new_node_counts[dev_id] += 1 del node_counts[zero_weight_dev] self.assertEqual(node_counts, new_node_counts) self.assertEqual(parts_moved, 0) def test_part_swapping_problem(self): rb = ring.RingBuilder(4, 3, 1) # 127.0.0.1 (2 devs) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) # 127.0.0.2 (3 devs) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdc'}) expected = { '127.0.0.1': 1.2, '127.0.0.2': 1.7999999999999998, } for wr in (rb._build_weighted_replicas_by_tier(), rb._build_wanted_replicas_by_tier(), rb._build_target_replicas_by_tier()): self.assertEqual(expected, {t[-1]: r for (t, r) in wr.items() if len(t) == 3}) self.assertEqual(rb.get_required_overload(), 0) rb.rebalance(seed=3) # so 127.0.0.1 ended up with... tier = (0, 0, '127.0.0.1') # ... 6 parts with 1 replicas self.assertEqual(rb._dispersion_graph[tier][1], 12) # ... 4 parts with 2 replicas self.assertEqual(rb._dispersion_graph[tier][2], 4) # but since we only have two tiers, this is *totally* dispersed self.assertEqual(0, rb.dispersion) # small rings are hard to balance... expected = {0: 10, 1: 10, 2: 10, 3: 9, 4: 9} self.assertEqual(expected, {d['id']: d['parts'] for d in rb._iter_devs()}) # everyone wants 9.6 parts expected = { 0: 4.166666666666671, 1: 4.166666666666671, 2: 4.166666666666671, 3: -6.25, 4: -6.25, } self.assertEqual(expected, rb._build_balance_per_dev()) # original sorted _replica2part2dev """ rb._replica2part2dev = [ array('H', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]), array('H', [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3]), array('H', [2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4])] """ # now imagine if we came along this _replica2part2dev through no # fault of our own; if instead of the 12 parts with only one # replica on 127.0.0.1 being split evenly (6 and 6) on device's # 0 and 1 - device 1 inexplicitly had 3 extra parts rb._replica2part2dev = [ # these are the relevant one's here # | | | # v v v array('H', [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), array('H', [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3]), array('H', [2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4])] # fix up bookkeeping new_dev_parts = defaultdict(int) for part2dev_id in rb._replica2part2dev: for dev_id in part2dev_id: new_dev_parts[dev_id] += 1 for dev in rb._iter_devs(): dev['parts'] = new_dev_parts[dev['id']] # reset the _last_part_gather_start otherwise # there is a chance it'll unluckly wrap and try and # move one of the device 1's from replica 2 # causing the intermitant failure in bug 1724356 rb._last_part_gather_start = 0 rb.pretend_min_part_hours_passed() rb.rebalance() expected = { 0: 4.166666666666671, 1: 4.166666666666671, 2: 4.166666666666671, 3: -6.25, 4: -6.25, } self.assertEqual(expected, rb._build_balance_per_dev()) self.assertEqual(rb.get_balance(), 6.25) def test_wrong_tier_with_no_where_to_go(self): rb = ring.RingBuilder(4, 3, 1) # 127.0.0.1 (even devices) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 900, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 900, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 900, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) # 127.0.0.2 (odd devices) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdd'}) expected = { '127.0.0.1': 1.75, '127.0.0.2': 1.25, } for wr in (rb._build_weighted_replicas_by_tier(), rb._build_wanted_replicas_by_tier(), rb._build_target_replicas_by_tier()): self.assertEqual(expected, {t[-1]: r for (t, r) in wr.items() if len(t) == 3}) self.assertEqual(rb.get_required_overload(), 0) rb.rebalance(seed=3) # so 127.0.0.1 ended up with... tier = (0, 0, '127.0.0.1') # ... 4 parts with 1 replicas self.assertEqual(rb._dispersion_graph[tier][1], 4) # ... 12 parts with 2 replicas self.assertEqual(rb._dispersion_graph[tier][2], 12) # ... and of course 0 parts with 3 replicas self.assertEqual(rb._dispersion_graph[tier][3], 0) # but since we only have two tiers, this is *totally* dispersed self.assertEqual(0, rb.dispersion) # small rings are hard to balance, but it's possible when # part-replicas (3 * 2 ** 4) can go evenly into device weights # (4800) like we've done here expected = { 0: 1, 2: 9, 4: 9, 6: 9, 1: 5, 3: 5, 5: 5, 7: 5, } self.assertEqual(expected, {d['id']: d['parts'] for d in rb._iter_devs()}) expected = { 0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0, 7: 0.0, } self.assertEqual(expected, rb._build_balance_per_dev()) # all devices have exactly the # of parts they want expected = { 0: 0, 2: 0, 4: 0, 6: 0, 1: 0, 3: 0, 5: 0, 7: 0, } self.assertEqual(expected, {d['id']: d['parts_wanted'] for d in rb._iter_devs()}) # original sorted _replica2part2dev """ rb._replica2part2dev = [ array('H', [0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, ]), array('H', [4, 4, 4, 6, 6, 6, 6, 6, 6, 6, 6, 6, 1, 1, 1, 1, ]), array('H', [1, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7, ])] """ # now imagine if we came along this _replica2part2dev through no # fault of our own; and device 0 had extra parts, but both # copies of the other replicas were already in the other tier! rb._replica2part2dev = [ # these are the relevant one's here # | | # v v array('H', [2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0]), array('H', [4, 4, 4, 4, 6, 6, 6, 6, 6, 6, 6, 6, 6, 1, 1, 1]), array('H', [1, 1, 3, 3, 3, 3, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7])] # fix up bookkeeping new_dev_parts = defaultdict(int) for part2dev_id in rb._replica2part2dev: for dev_id in part2dev_id: new_dev_parts[dev_id] += 1 for dev in rb._iter_devs(): dev['parts'] = new_dev_parts[dev['id']] replica_plan = rb._build_replica_plan() rb._set_parts_wanted(replica_plan) expected = { 0: -1, # this device wants to shed 2: 0, 4: 0, 6: 0, 1: 0, 3: 1, # there's devices with room on the other server 5: 0, 7: 0, } self.assertEqual(expected, {d['id']: d['parts_wanted'] for d in rb._iter_devs()}) self.assertEqual(rb.get_balance(), 100) rb.pretend_min_part_hours_passed() # There's something like a 11% chance that we won't be able to get to # a balance of 0 (and a 6% chance that we won't change anything at all) # Pick a seed to make this pass. rb.rebalance(seed=123) self.assertEqual(rb.get_balance(), 0) def test_multiple_duplicate_device_assignment(self): rb = ring.RingBuilder(4, 4, 1) devs = [ 'r1z1-127.0.0.1:6200/d1', 'r1z1-127.0.0.1:6201/d2', 'r1z1-127.0.0.1:6202/d3', 'r1z1-127.0.0.1:33443/d4', 'r1z1-127.0.0.2:6200/d5', 'r1z1-127.0.0.2:6201/d6', 'r1z1-127.0.0.2:6202/d7', 'r1z1-127.0.0.2:6202/d8', ] for add_value in devs: dev = utils.parse_add_value(add_value) dev['weight'] = 1.0 rb.add_dev(dev) rb.rebalance() rb._replica2part2dev = [ # these are the relevant one's here # | | | | | # v v v v v array('H', [0, 1, 2, 3, 3, 0, 0, 0, 4, 6, 4, 4, 4, 4, 4, 4]), array('H', [0, 1, 3, 1, 1, 1, 1, 1, 5, 7, 5, 5, 5, 5, 5, 5]), array('H', [0, 1, 2, 2, 2, 2, 2, 2, 4, 6, 6, 6, 6, 6, 6, 6]), array('H', [0, 3, 2, 3, 3, 3, 3, 3, 5, 7, 7, 7, 7, 7, 7, 7]) # ^ # | # this sort of thing worked already ] # fix up bookkeeping new_dev_parts = defaultdict(int) for part2dev_id in rb._replica2part2dev: for dev_id in part2dev_id: new_dev_parts[dev_id] += 1 for dev in rb._iter_devs(): dev['parts'] = new_dev_parts[dev['id']] rb.pretend_min_part_hours_passed() rb.rebalance() rb.validate() def test_region_fullness_with_balanceable_ring(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'}) rb.add_dev({'id': 4, 'region': 2, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'}) rb.add_dev({'id': 5, 'region': 2, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10006, 'device': 'sda1'}) rb.add_dev({'id': 6, 'region': 3, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10007, 'device': 'sda1'}) rb.add_dev({'id': 7, 'region': 3, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10008, 'device': 'sda1'}) rb.rebalance(seed=2) population_by_region = self._get_population_by_region(rb) self.assertEqual(population_by_region, {0: 192, 1: 192, 2: 192, 3: 192}) def test_region_fullness_with_unbalanceable_ring(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 2, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'}) rb.rebalance(seed=2) population_by_region = self._get_population_by_region(rb) self.assertEqual(population_by_region, {0: 512, 1: 256}) def test_adding_region_slowly_with_unbalanceable_ring(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc1'}) rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'weight': 0.5, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd1'}) rb.rebalance(seed=2) rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 0.25, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 0.25, 'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'}) rb.pretend_min_part_hours_passed() changed_parts, _balance, _removed = rb.rebalance(seed=2) # there's not enough room in r1 for every partition to have a replica # in it, so only 86 assignments occur in r1 (that's ~1/5 of the total, # since r1 has 1/5 of the weight). population_by_region = self._get_population_by_region(rb) self.assertEqual(population_by_region, {0: 682, 1: 86}) # really 86 parts *should* move (to the new region) but to avoid # accidentally picking up too many and causing some parts to randomly # flop around devices in the original region - our gather algorithm # is conservative when picking up only from devices that are for sure # holding more parts than they want (math.ceil() of the replica_plan) # which guarantees any parts picked up will have new homes in a better # tier or failure_domain. self.assertEqual(86, changed_parts) # and since there's not enough room, subsequent rebalances will not # cause additional assignments to r1 rb.pretend_min_part_hours_passed() rb.rebalance(seed=2) rb.validate() population_by_region = self._get_population_by_region(rb) self.assertEqual(population_by_region, {0: 682, 1: 86}) # after you add more weight, more partition assignments move rb.set_dev_weight(2, 0.5) rb.set_dev_weight(3, 0.5) rb.pretend_min_part_hours_passed() rb.rebalance(seed=2) rb.validate() population_by_region = self._get_population_by_region(rb) self.assertEqual(population_by_region, {0: 614, 1: 154}) rb.set_dev_weight(2, 1.0) rb.set_dev_weight(3, 1.0) rb.pretend_min_part_hours_passed() rb.rebalance(seed=2) rb.validate() population_by_region = self._get_population_by_region(rb) self.assertEqual(population_by_region, {0: 512, 1: 256}) def test_avoid_tier_change_new_region(self): rb = ring.RingBuilder(8, 3, 1) for i in range(5): rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.1', 'port': i, 'device': 'sda1'}) rb.rebalance(seed=2) # Add a new device in new region to a balanced ring rb.add_dev({'id': 5, 'region': 1, 'zone': 0, 'weight': 0, 'ip': '127.0.0.5', 'port': 10000, 'device': 'sda1'}) # Increase the weight of region 1 slowly moved_partitions = [] errors = [] for weight in range(0, 101, 10): rb.set_dev_weight(5, weight) rb.pretend_min_part_hours_passed() changed_parts, _balance, _removed = rb.rebalance(seed=2) rb.validate() moved_partitions.append(changed_parts) # Ensure that the second region has enough partitions # Otherwise there will be replicas at risk min_parts_for_r1 = ceil(weight / (500.0 + weight) * 768) parts_for_r1 = self._get_population_by_region(rb).get(1, 0) try: self.assertEqual(min_parts_for_r1, parts_for_r1) except AssertionError: errors.append('weight %s got %s parts but expected %s' % ( weight, parts_for_r1, min_parts_for_r1)) self.assertFalse(errors) # Number of partitions moved on each rebalance # 10/510 * 768 ~ 15.06 -> move at least 15 partitions in first step ref = [0, 16, 14, 14, 13, 13, 13, 12, 11, 12, 10] self.assertEqual(ref, moved_partitions) def test_set_replicas_increase(self): rb = ring.RingBuilder(8, 2, 0) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.rebalance() rb.validate() rb.replicas = 2.1 rb.rebalance() rb.validate() self.assertEqual([len(p2d) for p2d in rb._replica2part2dev], [256, 256, 25]) rb.replicas = 2.2 rb.rebalance() rb.validate() self.assertEqual([len(p2d) for p2d in rb._replica2part2dev], [256, 256, 51]) def test_set_replicas_decrease(self): rb = ring.RingBuilder(4, 5, 0) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.rebalance() rb.validate() rb.replicas = 4.9 rb.rebalance() rb.validate() self.assertEqual([len(p2d) for p2d in rb._replica2part2dev], [16, 16, 16, 16, 14]) # cross a couple of integer thresholds (4 and 3) rb.replicas = 2.5 rb.rebalance() rb.validate() self.assertEqual([len(p2d) for p2d in rb._replica2part2dev], [16, 16, 8]) def test_fractional_replicas_rebalance(self): rb = ring.RingBuilder(8, 2.5, 0) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.rebalance() # passes by not crashing rb.validate() # also passes by not crashing self.assertEqual([len(p2d) for p2d in rb._replica2part2dev], [256, 256, 128]) def test_create_add_dev_add_replica_rebalance(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.set_replicas(4) rb.rebalance() # this would crash since parts_wanted was not set rb.validate() def test_reduce_replicas_after_remove_device(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.rebalance() rb.remove_dev(0) self.assertRaises(exceptions.RingValidationError, rb.rebalance) rb.set_replicas(2) rb.rebalance() rb.validate() def test_rebalance_post_upgrade(self): rb = ring.RingBuilder(8, 3, 1) # 5 devices: 5 is the smallest number that does not divide 3 * 2^8, # which forces some rounding to happen. rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'}) rb.rebalance() rb.validate() # Older versions of the ring builder code would round down when # computing parts_wanted, while the new code rounds up. Make sure we # can handle a ring built by the old method. # # This code mimics the old _set_parts_wanted. weight_of_one_part = rb.weight_of_one_part() for dev in rb._iter_devs(): if not dev['weight']: dev['parts_wanted'] = -rb.parts * rb.replicas else: dev['parts_wanted'] = ( int(weight_of_one_part * dev['weight']) - dev['parts']) rb.pretend_min_part_hours_passed() rb.rebalance() # this crashes unless rebalance resets parts_wanted rb.validate() def test_add_replicas_then_rebalance_respects_weight(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdh'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdi'}) rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdj'}) rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdk'}) rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdl'}) rb.rebalance(seed=1) r = rb.get_ring() counts = {} for part2dev_id in r._replica2part2dev_id: for dev_id in part2dev_id: counts[dev_id] = counts.get(dev_id, 0) + 1 self.assertEqual(counts, {0: 96, 1: 96, 2: 32, 3: 32, 4: 96, 5: 96, 6: 32, 7: 32, 8: 96, 9: 96, 10: 32, 11: 32}) rb.replicas *= 2 rb.rebalance(seed=1) r = rb.get_ring() counts = {} for part2dev_id in r._replica2part2dev_id: for dev_id in part2dev_id: counts[dev_id] = counts.get(dev_id, 0) + 1 self.assertEqual(counts, {0: 192, 1: 192, 2: 64, 3: 64, 4: 192, 5: 192, 6: 64, 7: 64, 8: 192, 9: 192, 10: 64, 11: 64}) def test_overload(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdg'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc'}) rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdj'}) rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdk'}) rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdl'}) rb.rebalance(seed=12345) rb.validate() # sanity check: balance respects weights, so default part_counts = _partition_counts(rb, key='zone') self.assertEqual(part_counts[0], 192) self.assertEqual(part_counts[1], 192) self.assertEqual(part_counts[2], 384) # Devices 0 and 1 take 10% more than their fair shares by weight since # overload is 10% (0.1). rb.set_overload(0.1) rb.pretend_min_part_hours_passed() rb.rebalance(seed=12345) part_counts = _partition_counts(rb, key='zone') self.assertEqual({0: 212, 1: 211, 2: 345}, part_counts) # Now, devices 0 and 1 take 50% more than their fair shares by # weight. rb.set_overload(0.5) for _ in range(3): rb.pretend_min_part_hours_passed() rb.rebalance(seed=12345) part_counts = _partition_counts(rb, key='zone') self.assertEqual({0: 256, 1: 256, 2: 256}, part_counts) # Devices 0 and 1 may take up to 75% over their fair share, but the # placement algorithm only wants to spread things out evenly between # all drives, so the devices stay at 50% more. rb.set_overload(0.75) for _ in range(3): rb.pretend_min_part_hours_passed() rb.rebalance(seed=12345) part_counts = _partition_counts(rb, key='zone') self.assertEqual(part_counts[0], 256) self.assertEqual(part_counts[1], 256) self.assertEqual(part_counts[2], 256) def test_unoverload(self): # Start off needing overload to balance, then add capacity until we # don't need overload any more and see that things still balance. # Overload doesn't prevent optimal balancing. rb = ring.RingBuilder(8, 3, 1) rb.set_overload(0.125) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 10, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 11, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'}) rb.rebalance(seed=12345) # sanity check: our overload is big enough to balance things part_counts = _partition_counts(rb, key='ip') self.assertEqual(part_counts['127.0.0.1'], 216) self.assertEqual(part_counts['127.0.0.2'], 216) self.assertEqual(part_counts['127.0.0.3'], 336) # Add some weight: balance improves for dev in rb.devs: if dev['ip'] in ('127.0.0.1', '127.0.0.2'): rb.set_dev_weight(dev['id'], 1.22) rb.pretend_min_part_hours_passed() rb.rebalance(seed=12345) part_counts = _partition_counts(rb, key='ip') self.assertEqual({ '127.0.0.1': 237, '127.0.0.2': 237, '127.0.0.3': 294, }, part_counts) # Even out the weights: balance becomes perfect for dev in rb.devs: if dev['ip'] in ('127.0.0.1', '127.0.0.2'): rb.set_dev_weight(dev['id'], 2) rb.pretend_min_part_hours_passed() rb.rebalance(seed=12345) part_counts = _partition_counts(rb, key='ip') self.assertEqual(part_counts['127.0.0.1'], 256) self.assertEqual(part_counts['127.0.0.2'], 256) self.assertEqual(part_counts['127.0.0.3'], 256) # Add a new server: balance stays optimal rb.add_dev({'id': 12, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.4', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 13, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.4', 'port': 10000, 'device': 'sde'}) rb.add_dev({'id': 14, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'}) rb.add_dev({'id': 15, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'}) # we're moving more than 1/3 of the replicas but fewer than 2/3, so # we have to do this twice rb.pretend_min_part_hours_passed() rb.rebalance(seed=12345) rb.pretend_min_part_hours_passed() rb.rebalance(seed=12345) expected = { '127.0.0.1': 192, '127.0.0.2': 192, '127.0.0.3': 192, '127.0.0.4': 192, } part_counts = _partition_counts(rb, key='ip') self.assertEqual(part_counts, expected) def test_overload_keeps_balanceable_things_balanced_initially(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8, 'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8, 'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'}) rb.set_overload(99999) rb.rebalance(seed=12345) part_counts = _partition_counts(rb) self.assertEqual(part_counts, { 0: 128, 1: 128, 2: 64, 3: 64, 4: 64, 5: 64, 6: 64, 7: 64, 8: 64, 9: 64, }) def test_overload_keeps_balanceable_things_balanced_on_rebalance(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8, 'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8, 'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'}) rb.set_overload(99999) rb.rebalance(seed=123) part_counts = _partition_counts(rb) self.assertEqual(part_counts, { 0: 128, 1: 128, 2: 64, 3: 64, 4: 64, 5: 64, 6: 64, 7: 64, 8: 64, 9: 64, }) # swap weights between 10.0.0.1 and 10.0.0.2 rb.set_dev_weight(0, 4) rb.set_dev_weight(1, 4) rb.set_dev_weight(2, 8) rb.set_dev_weight(1, 8) rb.rebalance(seed=456) part_counts = _partition_counts(rb) self.assertEqual(part_counts, { 0: 128, 1: 128, 2: 64, 3: 64, 4: 64, 5: 64, 6: 64, 7: 64, 8: 64, 9: 64, }) def test_server_per_port(self): # 3 servers, 3 disks each, with each disk on its own port rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.1', 'port': 10000, 'device': 'sdx'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.1', 'port': 10001, 'device': 'sdy'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.2', 'port': 10000, 'device': 'sdx'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.2', 'port': 10001, 'device': 'sdy'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.3', 'port': 10000, 'device': 'sdx'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.3', 'port': 10001, 'device': 'sdy'}) rb.rebalance(seed=1) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.1', 'port': 10002, 'device': 'sdz'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.2', 'port': 10002, 'device': 'sdz'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.3', 'port': 10002, 'device': 'sdz'}) rb.pretend_min_part_hours_passed() rb.rebalance(seed=1) poorly_dispersed = [] for part in range(rb.parts): on_nodes = set() for replica in range(rb.replicas): dev_id = rb._replica2part2dev[replica][part] on_nodes.add(rb.devs[dev_id]['ip']) if len(on_nodes) < rb.replicas: poorly_dispersed.append(part) self.assertEqual(poorly_dispersed, []) def test_load(self): rb = ring.RingBuilder(8, 3, 1) devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1', 'meta': 'meta0'}, {'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1', 'meta': 'meta1'}, {'id': 2, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1', 'meta': 'meta2'}, {'id': 3, 'region': 0, 'zone': 3, 'weight': 2, 'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1'}] for d in devs: rb.add_dev(d) rb.rebalance() real_pickle = pickle.load fake_open = mock.mock_open() io_error_not_found = IOError() io_error_not_found.errno = errno.ENOENT io_error_no_perm = IOError() io_error_no_perm.errno = errno.EPERM io_error_generic = IOError() io_error_generic.errno = errno.EOPNOTSUPP try: # test a legit builder fake_pickle = mock.Mock(return_value=rb) pickle.load = fake_pickle builder = ring.RingBuilder.load('fake.builder', open=fake_open) self.assertEqual(fake_pickle.call_count, 1) fake_open.assert_has_calls([mock.call('fake.builder', 'rb')]) self.assertEqual(builder, rb) fake_pickle.reset_mock() # test old style builder fake_pickle.return_value = rb.to_dict() pickle.load = fake_pickle builder = ring.RingBuilder.load('fake.builder', open=fake_open) fake_open.assert_has_calls([mock.call('fake.builder', 'rb')]) self.assertEqual(builder.devs, rb.devs) fake_pickle.reset_mock() # test old devs but no meta no_meta_builder = rb for dev in no_meta_builder.devs: del(dev['meta']) fake_pickle.return_value = no_meta_builder pickle.load = fake_pickle builder = ring.RingBuilder.load('fake.builder', open=fake_open) fake_open.assert_has_calls([mock.call('fake.builder', 'rb')]) self.assertEqual(builder.devs, rb.devs) # test an empty builder fake_pickle.side_effect = EOFError pickle.load = fake_pickle self.assertRaises(exceptions.UnPicklingError, ring.RingBuilder.load, 'fake.builder', open=fake_open) # test a corrupted builder fake_pickle.side_effect = pickle.UnpicklingError pickle.load = fake_pickle self.assertRaises(exceptions.UnPicklingError, ring.RingBuilder.load, 'fake.builder', open=fake_open) # test some error fake_pickle.side_effect = AttributeError pickle.load = fake_pickle self.assertRaises(exceptions.UnPicklingError, ring.RingBuilder.load, 'fake.builder', open=fake_open) finally: pickle.load = real_pickle # test non existent builder file fake_open.side_effect = io_error_not_found self.assertRaises(exceptions.FileNotFoundError, ring.RingBuilder.load, 'fake.builder', open=fake_open) # test non accessible builder file fake_open.side_effect = io_error_no_perm self.assertRaises(exceptions.PermissionError, ring.RingBuilder.load, 'fake.builder', open=fake_open) # test an error other then ENOENT and ENOPERM fake_open.side_effect = io_error_generic self.assertRaises(IOError, ring.RingBuilder.load, 'fake.builder', open=fake_open) def test_save_load(self): rb = ring.RingBuilder(8, 3, 1) devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.0', 'port': 10000, 'replication_ip': '127.0.0.0', 'replication_port': 10000, 'device': 'sda1', 'meta': 'meta0'}, {'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'replication_ip': '127.0.0.1', 'replication_port': 10001, 'device': 'sdb1', 'meta': 'meta1'}, {'id': 2, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'replication_ip': '127.0.0.2', 'replication_port': 10002, 'device': 'sdc1', 'meta': 'meta2'}, {'id': 3, 'region': 0, 'zone': 3, 'weight': 2, 'ip': '127.0.0.3', 'port': 10003, 'replication_ip': '127.0.0.3', 'replication_port': 10003, 'device': 'sdd1', 'meta': ''}] rb.set_overload(3.14159) for d in devs: rb.add_dev(d) rb.rebalance() builder_file = os.path.join(self.testdir, 'test_save.builder') rb.save(builder_file) loaded_rb = ring.RingBuilder.load(builder_file) self.maxDiff = None self.assertEqual(loaded_rb.to_dict(), rb.to_dict()) self.assertEqual(loaded_rb.overload, 3.14159) @mock.patch('six.moves.builtins.open', autospec=True) @mock.patch('swift.common.ring.builder.pickle.dump', autospec=True) def test_save(self, mock_pickle_dump, mock_open): mock_open.return_value = mock_fh = mock.MagicMock() rb = ring.RingBuilder(8, 3, 1) devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1', 'meta': 'meta0'}, {'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1', 'meta': 'meta1'}, {'id': 2, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1', 'meta': 'meta2'}, {'id': 3, 'region': 0, 'zone': 3, 'weight': 2, 'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1'}] for d in devs: rb.add_dev(d) rb.rebalance() rb.save('some.builder') mock_open.assert_called_once_with('some.builder', 'wb') mock_pickle_dump.assert_called_once_with(rb.to_dict(), mock_fh.__enter__(), protocol=2) def test_id(self): rb = ring.RingBuilder(8, 3, 1) # check id is assigned after save builder_file = os.path.join(self.testdir, 'test_save.builder') rb.save(builder_file) assigned_id = rb.id # check id doesn't change when builder is saved again rb.save(builder_file) self.assertEqual(assigned_id, rb.id) # check same id after loading loaded_rb = ring.RingBuilder.load(builder_file) self.assertEqual(assigned_id, loaded_rb.id) # check id doesn't change when loaded builder is saved rb.save(builder_file) self.assertEqual(assigned_id, rb.id) # check same id after loading again loaded_rb = ring.RingBuilder.load(builder_file) self.assertEqual(assigned_id, loaded_rb.id) # check id remains once assigned, even when save fails with self.assertRaises(IOError): rb.save(os.path.join( self.testdir, 'non_existent_dir', 'test_save.file')) self.assertEqual(assigned_id, rb.id) # sanity check that different builders get different id's other_rb = ring.RingBuilder(8, 3, 1) other_builder_file = os.path.join(self.testdir, 'test_save_2.builder') other_rb.save(other_builder_file) self.assertNotEqual(assigned_id, other_rb.id) def test_id_copy_from(self): # copy_from preserves the same id orig_rb = ring.RingBuilder(8, 3, 1) copy_rb = ring.RingBuilder(8, 3, 1) copy_rb.copy_from(orig_rb) for rb in(orig_rb, copy_rb): with self.assertRaises(AttributeError) as cm: rb.id self.assertIn('id attribute has not been initialised', cm.exception.args[0]) builder_file = os.path.join(self.testdir, 'test_save.builder') orig_rb.save(builder_file) copy_rb = ring.RingBuilder(8, 3, 1) copy_rb.copy_from(orig_rb) self.assertEqual(orig_rb.id, copy_rb.id) def test_id_legacy_builder_file(self): builder_file = os.path.join(self.testdir, 'legacy.builder') def do_test(): # load legacy file loaded_rb = ring.RingBuilder.load(builder_file) with self.assertRaises(AttributeError) as cm: loaded_rb.id self.assertIn('id attribute has not been initialised', cm.exception.args[0]) # check saving assigns an id, and that it is persisted loaded_rb.save(builder_file) assigned_id = loaded_rb.id self.assertIsNotNone(assigned_id) loaded_rb = ring.RingBuilder.load(builder_file) self.assertEqual(assigned_id, loaded_rb.id) # older builders had no id so the pickled builder dict had no id key rb = ring.RingBuilder(8, 3, 1) orig_to_dict = rb.to_dict def mock_to_dict(): result = orig_to_dict() result.pop('id') return result with mock.patch.object(rb, 'to_dict', mock_to_dict): rb.save(builder_file) do_test() # even older builders pickled the class instance, which would have had # no _id attribute rb = ring.RingBuilder(8, 3, 1) del rb.logger # logger type cannot be pickled del rb._id builder_file = os.path.join(self.testdir, 'legacy.builder') with open(builder_file, 'wb') as f: pickle.dump(rb, f, protocol=2) do_test() def test_id_not_initialised_errors(self): rb = ring.RingBuilder(8, 3, 1) # id is not set until builder has been saved with self.assertRaises(AttributeError) as cm: rb.id self.assertIn('id attribute has not been initialised', cm.exception.args[0]) # save must succeed for id to be assigned with self.assertRaises(IOError): rb.save(os.path.join( self.testdir, 'non-existent-dir', 'foo.builder')) with self.assertRaises(AttributeError) as cm: rb.id self.assertIn('id attribute has not been initialised', cm.exception.args[0]) def test_search_devs(self): rb = ring.RingBuilder(8, 3, 1) devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1', 'meta': 'meta0'}, {'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1', 'meta': 'meta1'}, {'id': 2, 'region': 1, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1', 'meta': 'meta2'}, {'id': 3, 'region': 1, 'zone': 3, 'weight': 2, 'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1', 'meta': 'meta3'}, {'id': 4, 'region': 2, 'zone': 4, 'weight': 1, 'ip': '127.0.0.4', 'port': 10004, 'device': 'sde1', 'meta': 'meta4', 'replication_ip': '127.0.0.10', 'replication_port': 20000}, {'id': 5, 'region': 2, 'zone': 5, 'weight': 2, 'ip': '127.0.0.5', 'port': 10005, 'device': 'sdf1', 'meta': 'meta5', 'replication_ip': '127.0.0.11', 'replication_port': 20001}, {'id': 6, 'region': 2, 'zone': 6, 'weight': 2, 'ip': '127.0.0.6', 'port': 10006, 'device': 'sdg1', 'meta': 'meta6', 'replication_ip': '127.0.0.12', 'replication_port': 20002}] for d in devs: rb.add_dev(d) rb.rebalance() res = rb.search_devs({'region': 0}) self.assertEqual(res, [devs[0], devs[1]]) res = rb.search_devs({'region': 1}) self.assertEqual(res, [devs[2], devs[3]]) res = rb.search_devs({'region': 1, 'zone': 2}) self.assertEqual(res, [devs[2]]) res = rb.search_devs({'id': 1}) self.assertEqual(res, [devs[1]]) res = rb.search_devs({'zone': 1}) self.assertEqual(res, [devs[1]]) res = rb.search_devs({'ip': '127.0.0.1'}) self.assertEqual(res, [devs[1]]) res = rb.search_devs({'ip': '127.0.0.1', 'port': 10001}) self.assertEqual(res, [devs[1]]) res = rb.search_devs({'port': 10001}) self.assertEqual(res, [devs[1]]) res = rb.search_devs({'replication_ip': '127.0.0.10'}) self.assertEqual(res, [devs[4]]) res = rb.search_devs({'replication_ip': '127.0.0.10', 'replication_port': 20000}) self.assertEqual(res, [devs[4]]) res = rb.search_devs({'replication_port': 20000}) self.assertEqual(res, [devs[4]]) res = rb.search_devs({'device': 'sdb1'}) self.assertEqual(res, [devs[1]]) res = rb.search_devs({'meta': 'meta1'}) self.assertEqual(res, [devs[1]]) def test_validate(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.add_dev({'id': 12, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 2, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.add_dev({'id': 13, 'region': 0, 'zone': 3, 'weight': 2, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.add_dev({'id': 14, 'region': 0, 'zone': 3, 'weight': 2, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) rb.add_dev({'id': 15, 'region': 0, 'zone': 3, 'weight': 2, 'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'}) # Degenerate case: devices added but not rebalanced yet self.assertRaises(exceptions.RingValidationError, rb.validate) rb.rebalance() counts = _partition_counts(rb, key='zone') self.assertEqual(counts, {0: 128, 1: 128, 2: 256, 3: 256}) dev_usage, worst = rb.validate() self.assertIsNone(dev_usage) self.assertIsNone(worst) dev_usage, worst = rb.validate(stats=True) self.assertEqual(list(dev_usage), [32, 32, 64, 64, 32, 32, 32, # added zone0 32, 32, 32, # added zone1 64, 64, 64, # added zone2 64, 64, 64, # added zone3 ]) self.assertEqual(int(worst), 0) # min part hours should pin all the parts assigned to this zero # weight device onto it such that the balance will look horrible rb.set_dev_weight(2, 0) rb.rebalance() self.assertEqual(rb.validate(stats=True)[1], MAX_BALANCE) # Test not all partitions doubly accounted for rb.devs[1]['parts'] -= 1 self.assertRaises(exceptions.RingValidationError, rb.validate) rb.devs[1]['parts'] += 1 # Test non-numeric port rb.devs[1]['port'] = '10001' self.assertRaises(exceptions.RingValidationError, rb.validate) rb.devs[1]['port'] = 10001 # Test partition on nonexistent device rb.pretend_min_part_hours_passed() orig_dev_id = rb._replica2part2dev[0][0] rb._replica2part2dev[0][0] = len(rb.devs) self.assertRaises(exceptions.RingValidationError, rb.validate) rb._replica2part2dev[0][0] = orig_dev_id # Tests that validate can handle 'holes' in .devs rb.remove_dev(2) rb.pretend_min_part_hours_passed() rb.rebalance() rb.validate(stats=True) # Test partition assigned to a hole if rb.devs[2]: rb.remove_dev(2) rb.pretend_min_part_hours_passed() orig_dev_id = rb._replica2part2dev[0][0] rb._replica2part2dev[0][0] = 2 self.assertRaises(exceptions.RingValidationError, rb.validate) rb._replica2part2dev[0][0] = orig_dev_id # Validate that zero weight devices with no partitions don't count on # the 'worst' value. self.assertNotEqual(rb.validate(stats=True)[1], MAX_BALANCE) rb.add_dev({'id': 16, 'region': 0, 'zone': 0, 'weight': 0, 'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'}) rb.pretend_min_part_hours_passed() rb.rebalance() self.assertNotEqual(rb.validate(stats=True)[1], MAX_BALANCE) def test_validate_partial_replica(self): rb = ring.RingBuilder(8, 2.5, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc'}) rb.rebalance() rb.validate() # sanity self.assertEqual(len(rb._replica2part2dev[0]), 256) self.assertEqual(len(rb._replica2part2dev[1]), 256) self.assertEqual(len(rb._replica2part2dev[2]), 128) # now swap partial replica part maps rb._replica2part2dev[1], rb._replica2part2dev[2] = \ rb._replica2part2dev[2], rb._replica2part2dev[1] self.assertRaises(exceptions.RingValidationError, rb.validate) def test_validate_duplicate_part_assignment(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc'}) rb.rebalance() rb.validate() # sanity # now double up a device assignment rb._replica2part2dev[1][200] = rb._replica2part2dev[2][200] with self.assertRaises(exceptions.RingValidationError) as e: rb.validate() expected = 'The partition 200 has been assigned to duplicate devices' self.assertIn(expected, str(e.exception)) def test_get_part_devices(self): rb = ring.RingBuilder(8, 3, 1) self.assertEqual(rb.get_part_devices(0), []) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.rebalance() part_devs = sorted(rb.get_part_devices(0), key=operator.itemgetter('id')) self.assertEqual(part_devs, [rb.devs[0], rb.devs[1], rb.devs[2]]) def test_get_part_devices_partial_replicas(self): rb = ring.RingBuilder(8, 2.5, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.rebalance(seed=4) # note: partition 255 will only have 2 replicas part_devs = sorted(rb.get_part_devices(255), key=operator.itemgetter('id')) self.assertEqual(part_devs, [rb.devs[1], rb.devs[2]]) def test_dispersion_with_zero_weight_devices(self): rb = ring.RingBuilder(8, 3.0, 0) # add two devices to a single server in a single zone rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) # and a zero weight device rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 0, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.rebalance() self.assertEqual(rb.dispersion, 0.0) self.assertEqual(rb._dispersion_graph, { (0,): [0, 0, 0, 256], (0, 0): [0, 0, 0, 256], (0, 0, '127.0.0.1'): [0, 0, 0, 256], (0, 0, '127.0.0.1', 0): [0, 256, 0, 0], (0, 0, '127.0.0.1', 1): [0, 256, 0, 0], (0, 0, '127.0.0.1', 2): [0, 256, 0, 0], }) def test_dispersion_with_zero_weight_devices_with_parts(self): rb = ring.RingBuilder(8, 3.0, 1) # add four devices to a single server in a single zone rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.rebalance(seed=1) self.assertEqual(rb.dispersion, 0.0) self.assertEqual(rb._dispersion_graph, { (0,): [0, 0, 0, 256], (0, 0): [0, 0, 0, 256], (0, 0, '127.0.0.1'): [0, 0, 0, 256], (0, 0, '127.0.0.1', 0): [64, 192, 0, 0], (0, 0, '127.0.0.1', 1): [64, 192, 0, 0], (0, 0, '127.0.0.1', 2): [64, 192, 0, 0], (0, 0, '127.0.0.1', 3): [64, 192, 0, 0], }) # now mark a device 2 for decom rb.set_dev_weight(2, 0.0) # we'll rebalance but can't move any parts rb.rebalance(seed=1) # zero weight tier has one copy of 1/4 part-replica self.assertEqual(rb.dispersion, 25.0) self.assertEqual(rb._dispersion_graph, { (0,): [0, 0, 0, 256], (0, 0): [0, 0, 0, 256], (0, 0, '127.0.0.1'): [0, 0, 0, 256], (0, 0, '127.0.0.1', 0): [64, 192, 0, 0], (0, 0, '127.0.0.1', 1): [64, 192, 0, 0], (0, 0, '127.0.0.1', 2): [64, 192, 0, 0], (0, 0, '127.0.0.1', 3): [64, 192, 0, 0], }) # unlock the stuck parts rb.pretend_min_part_hours_passed() rb.rebalance(seed=3) self.assertEqual(rb.dispersion, 0.0) self.assertEqual(rb._dispersion_graph, { (0,): [0, 0, 0, 256], (0, 0): [0, 0, 0, 256], (0, 0, '127.0.0.1'): [0, 0, 0, 256], (0, 0, '127.0.0.1', 0): [0, 256, 0, 0], (0, 0, '127.0.0.1', 1): [0, 256, 0, 0], (0, 0, '127.0.0.1', 3): [0, 256, 0, 0], }) @unittest.skipIf(sys.version_info >= (3,), "Seed-specific tests don't work well on py3") def test_undispersable_zone_converge_on_balance(self): rb = ring.RingBuilder(8, 6, 0) dev_id = 0 # 3 regions, 2 zone for each region, 1 server with only *one* device in # each zone (this is an absolutely pathological case) for r in range(3): for z in range(2): ip = '127.%s.%s.1' % (r, z) dev_id += 1 rb.add_dev({'id': dev_id, 'region': r, 'zone': z, 'weight': 1000, 'ip': ip, 'port': 10000, 'device': 'd%s' % dev_id}) rb.rebalance(seed=7) # sanity, all balanced and 0 dispersion self.assertEqual(rb.get_balance(), 0) self.assertEqual(rb.dispersion, 0) # add one device to the server in z1 for each region, N.B. when we # *balance* this topology we will have very bad dispersion (too much # weight in z1 compared to z2!) for r in range(3): z = 0 ip = '127.%s.%s.1' % (r, z) dev_id += 1 rb.add_dev({'id': dev_id, 'region': r, 'zone': z, 'weight': 1000, 'ip': ip, 'port': 10000, 'device': 'd%s' % dev_id}) changed_part, _, _ = rb.rebalance(seed=7) # sanity, all part but only one replica moved to new devices self.assertEqual(changed_part, 2 ** 8) # so the first time, rings are still unbalanced becase we'll only move # one replica of each part. self.assertEqual(rb.get_balance(), 50.1953125) self.assertEqual(rb.dispersion, 16.6015625) # N.B. since we mostly end up grabbing parts by "weight forced" some # seeds given some specific ring state will randomly pick bad # part-replicas that end up going back down onto the same devices changed_part, _, _ = rb.rebalance(seed=7) self.assertEqual(changed_part, 14) # ... this isn't a really "desirable" behavior, but even with bad luck, # things do get better self.assertEqual(rb.get_balance(), 47.265625) self.assertEqual(rb.dispersion, 16.6015625) # but if you stick with it, eventually the next rebalance, will get to # move "the right" part-replicas, resulting in near optimal balance changed_part, _, _ = rb.rebalance(seed=7) self.assertEqual(changed_part, 240) self.assertEqual(rb.get_balance(), 0.390625) self.assertEqual(rb.dispersion, 16.6015625) @unittest.skipIf(sys.version_info >= (3,), "Seed-specific tests don't work well on py3") def test_undispersable_server_converge_on_balance(self): rb = ring.RingBuilder(8, 6, 0) dev_id = 0 # 3 zones, 2 server for each zone, 2 device for each server for z in range(3): for i in range(2): ip = '127.0.%s.%s' % (z, i + 1) for d in range(2): dev_id += 1 rb.add_dev({'id': dev_id, 'region': 1, 'zone': z, 'weight': 1000, 'ip': ip, 'port': 10000, 'device': 'd%s' % dev_id}) rb.rebalance(seed=7) # sanity, all balanced and 0 dispersion self.assertEqual(rb.get_balance(), 0) self.assertEqual(rb.dispersion, 0) # add one device for first server for each zone for z in range(3): ip = '127.0.%s.1' % z dev_id += 1 rb.add_dev({'id': dev_id, 'region': 1, 'zone': z, 'weight': 1000, 'ip': ip, 'port': 10000, 'device': 'd%s' % dev_id}) changed_part, _, _ = rb.rebalance(seed=7) # sanity, all part but only one replica moved to new devices self.assertEqual(changed_part, 2 ** 8) # but the first time, those are still unbalance becase ring builder # can move only one replica for each part self.assertEqual(rb.get_balance(), 16.9921875) self.assertEqual(rb.dispersion, 9.9609375) rb.rebalance(seed=7) # converge into around 0~1 self.assertGreaterEqual(rb.get_balance(), 0) self.assertLess(rb.get_balance(), 1) # dispersion doesn't get any worse self.assertEqual(rb.dispersion, 9.9609375) def test_effective_overload(self): rb = ring.RingBuilder(8, 3, 1) # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) # z1 rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) # z2 rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 100, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 100, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) # this ring requires overload required = rb.get_required_overload() self.assertGreater(required, 0.1) # and we'll use a little bit rb.set_overload(0.1) rb.rebalance(seed=7) rb.validate() # but with-out enough overload we're not dispersed self.assertGreater(rb.dispersion, 0) # add the other dev to z2 rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 100, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdc'}) # but also fail another device in the same! rb.remove_dev(6) # we still require overload required = rb.get_required_overload() self.assertGreater(required, 0.1) rb.pretend_min_part_hours_passed() rb.rebalance(seed=7) rb.validate() # ... and without enough we're full dispersed self.assertGreater(rb.dispersion, 0) # ok, let's fix z2's weight for real rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 100, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) # ... technically, we no longer require overload self.assertEqual(rb.get_required_overload(), 0.0) # so let's rebalance w/o resetting min_part_hours rb.rebalance(seed=7) rb.validate() # ... and that got it in one pass boo-yah! self.assertEqual(rb.dispersion, 0) def zone_weights_over_device_count(self): rb = ring.RingBuilder(8, 3, 1) # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) # z1 rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) # z2 rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 200, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) rb.rebalance(seed=7) rb.validate() self.assertEqual(rb.dispersion, 0) self.assertAlmostEqual(rb.get_balance(), (1.0 / 3.0) * 100) def test_more_devices_than_replicas_validation_when_removed_dev(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': 'sdc'}) rb.rebalance() rb.remove_dev(2) with self.assertRaises(ValueError) as e: rb.set_dev_weight(2, 1) msg = "Can not set weight of dev_id 2 because it is marked " \ "for removal" self.assertIn(msg, str(e.exception)) with self.assertRaises(exceptions.RingValidationError) as e: rb.rebalance() msg = 'Replica count of 3 requires more than 2 devices' self.assertIn(msg, str(e.exception)) def _add_dev_delete_first_n(self, add_dev_count, n): rb = ring.RingBuilder(8, 3, 1) dev_names = ['sda', 'sdb', 'sdc', 'sdd', 'sde', 'sdf'] for i in range(add_dev_count): if i < len(dev_names): dev_name = dev_names[i] else: dev_name = 'sda' rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': dev_name}) rb.rebalance() if (n > 0): rb.pretend_min_part_hours_passed() # remove first n for i in range(n): rb.remove_dev(i) rb.pretend_min_part_hours_passed() rb.rebalance() return rb def test_reuse_of_dev_holes_without_id(self): # try with contiguous holes at beginning add_dev_count = 6 rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3) new_dev_id = rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': 'sda'}) self.assertLess(new_dev_id, add_dev_count) # try with non-contiguous holes # [0, 1, None, 3, 4, None] rb2 = ring.RingBuilder(8, 3, 1) for i in range(6): rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': 'sda'}) rb2.rebalance() rb2.pretend_min_part_hours_passed() rb2.remove_dev(2) rb2.remove_dev(5) rb2.pretend_min_part_hours_passed() rb2.rebalance() first = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': 'sda'}) second = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': 'sda'}) # add a new one (without reusing a hole) third = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': 'sda'}) self.assertEqual(first, 2) self.assertEqual(second, 5) self.assertEqual(third, 6) def test_reuse_of_dev_holes_with_id(self): add_dev_count = 6 rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3) # add specifying id exp_new_dev_id = 2 # [dev, dev, None, dev, dev, None] try: new_dev_id = rb.add_dev({'id': exp_new_dev_id, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': 'sda'}) self.assertEqual(new_dev_id, exp_new_dev_id) except exceptions.DuplicateDeviceError: self.fail("device hole not reused") def test_prepare_increase_partition_power(self): ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz') rb = ring.RingBuilder(8, 3.0, 1) self.assertEqual(rb.part_power, 8) # add more devices than replicas to the ring for i in range(10): dev = "sdx%s" % i rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': dev}) rb.rebalance(seed=1) self.assertFalse(rb.cancel_increase_partition_power()) self.assertEqual(rb.part_power, 8) self.assertIsNone(rb.next_part_power) self.assertFalse(rb.finish_increase_partition_power()) self.assertEqual(rb.part_power, 8) self.assertIsNone(rb.next_part_power) self.assertTrue(rb.prepare_increase_partition_power()) self.assertEqual(rb.part_power, 8) self.assertEqual(rb.next_part_power, 9) # Save .ring.gz, and load ring from it to ensure prev/next is set rd = rb.get_ring() rd.save(ring_file) r = ring.Ring(ring_file) expected_part_shift = 32 - 8 self.assertEqual(expected_part_shift, r._part_shift) self.assertEqual(9, r.next_part_power) def test_increase_partition_power(self): rb = ring.RingBuilder(8, 3.0, 1) self.assertEqual(rb.part_power, 8) # add more devices than replicas to the ring for i in range(10): dev = "sdx%s" % i rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': dev}) rb.rebalance(seed=1) # Let's save the ring, and get the nodes for an object ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz') rd = rb.get_ring() rd.save(ring_file) r = ring.Ring(ring_file) old_part, old_nodes = r.get_nodes("acc", "cont", "obj") old_version = rb.version self.assertTrue(rb.prepare_increase_partition_power()) self.assertTrue(rb.increase_partition_power()) rb.validate() changed_parts, _balance, removed_devs = rb.rebalance() self.assertEqual(changed_parts, 0) self.assertEqual(removed_devs, 0) # Make sure cancellation is not possible # after increasing the partition power self.assertFalse(rb.cancel_increase_partition_power()) old_ring = r rd = rb.get_ring() rd.save(ring_file) r = ring.Ring(ring_file) new_part, new_nodes = r.get_nodes("acc", "cont", "obj") # sanity checks self.assertEqual(9, rb.part_power) self.assertEqual(9, rb.next_part_power) self.assertEqual(rb.version, old_version + 3) # make sure there is always the same device assigned to every pair of # partitions for replica in rb._replica2part2dev: for part in range(0, len(replica), 2): dev = replica[part] next_dev = replica[part + 1] self.assertEqual(dev, next_dev) # same for last_part moves for part in range(0, rb.parts, 2): this_last_moved = rb._last_part_moves[part] next_last_moved = rb._last_part_moves[part + 1] self.assertEqual(this_last_moved, next_last_moved) for i in range(100): suffix = uuid.uuid4() account = 'account_%s' % suffix container = 'container_%s' % suffix obj = 'obj_%s' % suffix old_part, old_nodes = old_ring.get_nodes(account, container, obj) new_part, new_nodes = r.get_nodes(account, container, obj) # Due to the increased partition power, the partition each object # is assigned to has changed. If the old partition was X, it will # now be either located in 2*X or 2*X+1 self.assertIn(new_part, [old_part * 2, old_part * 2 + 1]) # Importantly, we expect the objects to be placed on the same # nodes after increasing the partition power self.assertEqual(old_nodes, new_nodes) def test_finalize_increase_partition_power(self): ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz') rb = ring.RingBuilder(8, 3.0, 1) self.assertEqual(rb.part_power, 8) # add more devices than replicas to the ring for i in range(10): dev = "sdx%s" % i rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': dev}) rb.rebalance(seed=1) self.assertTrue(rb.prepare_increase_partition_power()) # Make sure this doesn't do any harm before actually increasing the # partition power self.assertFalse(rb.finish_increase_partition_power()) self.assertEqual(rb.next_part_power, 9) self.assertTrue(rb.increase_partition_power()) self.assertFalse(rb.prepare_increase_partition_power()) self.assertEqual(rb.part_power, 9) self.assertEqual(rb.next_part_power, 9) self.assertTrue(rb.finish_increase_partition_power()) self.assertEqual(rb.part_power, 9) self.assertIsNone(rb.next_part_power) # Save .ring.gz, and load ring from it to ensure prev/next is set rd = rb.get_ring() rd.save(ring_file) r = ring.Ring(ring_file) expected_part_shift = 32 - 9 self.assertEqual(expected_part_shift, r._part_shift) self.assertIsNone(r.next_part_power) def test_prepare_increase_partition_power_failed(self): rb = ring.RingBuilder(8, 3.0, 1) self.assertEqual(rb.part_power, 8) self.assertTrue(rb.prepare_increase_partition_power()) self.assertEqual(rb.next_part_power, 9) # next_part_power is still set, do not increase again self.assertFalse(rb.prepare_increase_partition_power()) self.assertEqual(rb.next_part_power, 9) def test_increase_partition_power_failed(self): rb = ring.RingBuilder(8, 3.0, 1) self.assertEqual(rb.part_power, 8) # add more devices than replicas to the ring for i in range(10): dev = "sdx%s" % i rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': dev}) rb.rebalance(seed=1) # next_part_power not set, can't increase the part power self.assertFalse(rb.increase_partition_power()) self.assertEqual(rb.part_power, 8) self.assertTrue(rb.prepare_increase_partition_power()) self.assertTrue(rb.increase_partition_power()) self.assertEqual(rb.part_power, 9) # part_power already increased self.assertFalse(rb.increase_partition_power()) self.assertEqual(rb.part_power, 9) def test_cancel_increase_partition_power(self): rb = ring.RingBuilder(8, 3.0, 1) self.assertEqual(rb.part_power, 8) # add more devices than replicas to the ring for i in range(10): dev = "sdx%s" % i rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': dev}) rb.rebalance(seed=1) old_version = rb.version self.assertTrue(rb.prepare_increase_partition_power()) # sanity checks self.assertEqual(8, rb.part_power) self.assertEqual(9, rb.next_part_power) self.assertEqual(rb.version, old_version + 1) self.assertTrue(rb.cancel_increase_partition_power()) rb.validate() self.assertEqual(8, rb.part_power) self.assertEqual(8, rb.next_part_power) self.assertEqual(rb.version, old_version + 2) class TestGetRequiredOverload(unittest.TestCase): maxDiff = None def test_none_needed(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) # 4 equal-weight devs and 3 replicas: this can be balanced without # resorting to overload at all self.assertAlmostEqual(rb.get_required_overload(), 0) expected = { (0, 0, '127.0.0.1', 0): 0.75, (0, 0, '127.0.0.1', 1): 0.75, (0, 0, '127.0.0.1', 2): 0.75, (0, 0, '127.0.0.1', 3): 0.75, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, { tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 4}) wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 4}) # since no overload is needed, target_replicas is the same rb.set_overload(0.10) target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 4}) # ... no matter how high you go! rb.set_overload(100.0) target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 4}) # 3 equal-weight devs and 3 replicas: this can also be balanced rb.remove_dev(3) self.assertAlmostEqual(rb.get_required_overload(), 0) expected = { (0, 0, '127.0.0.1', 0): 1.0, (0, 0, '127.0.0.1', 1): 1.0, (0, 0, '127.0.0.1', 2): 1.0, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 4}) wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 4}) # ... still no overload rb.set_overload(100.0) target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 4}) def test_equal_replica_and_devices_count_ignore_weights(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 7.47, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 5.91, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 6.44, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) expected = { 0: 1.0, 1: 1.0, 2: 1.0, } # simplicity itself self.assertEqual(expected, { t[-1]: r for (t, r) in rb._build_weighted_replicas_by_tier().items() if len(t) == 4}) self.assertEqual(expected, { t[-1]: r for (t, r) in rb._build_wanted_replicas_by_tier().items() if len(t) == 4}) self.assertEqual(expected, { t[-1]: r for (t, r) in rb._build_target_replicas_by_tier().items() if len(t) == 4}) # ... no overload required! self.assertEqual(0, rb.get_required_overload()) rb.rebalance() expected = { 0: 256, 1: 256, 2: 256, } self.assertEqual(expected, {d['id']: d['parts'] for d in rb._iter_devs()}) def test_small_zone(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 4, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 4, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 4, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) expected = { (0, 0): 1.0434782608695652, (0, 1): 1.0434782608695652, (0, 2): 0.9130434782608695, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 2}) expected = { (0, 0): 1.0, (0, 1): 1.0, (0, 2): 1.0, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 2}) # the device tier is interesting because one of the devices in zone # two has a different weight expected = { 0: 0.5217391304347826, 1: 0.5217391304347826, 2: 0.5217391304347826, 3: 0.5217391304347826, 4: 0.5217391304347826, 5: 0.3913043478260869, } self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 4}) # ... but, each pair of devices still needs to hold a whole # replicanth; which we'll try distribute fairly among devices in # zone 2, so that they can share the burden and ultimately the # required overload will be as small as possible. expected = { 0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0.5714285714285715, 5: 0.42857142857142855, } self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 4}) # full dispersion requires zone two's devices to eat more than # they're weighted for self.assertAlmostEqual(rb.get_required_overload(), 0.095238, delta=1e-5) # so... if we give it enough overload it we should get full dispersion rb.set_overload(0.1) target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 4}) def test_multiple_small_zones(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 150, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 150, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'weight': 150, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) expected = { (0, 0): 2.1052631578947367, (0, 1): 0.47368421052631576, (0, 2): 0.21052631578947367, (0, 3): 0.21052631578947367, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 2}) # without any overload, we get weight target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {tier: r for (tier, r) in target_replicas.items() if len(tier) == 2}) expected = { (0, 0): 1.0, (0, 1): 1.0, (0, 2): 0.49999999999999994, (0, 3): 0.49999999999999994, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {t: r for (t, r) in wanted_replicas.items() if len(t) == 2}) self.assertEqual(1.3750000000000002, rb.get_required_overload()) # with enough overload we get the full dispersion rb.set_overload(1.5) target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {tier: r for (tier, r) in target_replicas.items() if len(tier) == 2}) # with not enough overload, we get somewhere in the middle rb.set_overload(1.0) expected = { (0, 0): 1.3014354066985647, (0, 1): 0.8564593301435406, (0, 2): 0.4210526315789473, (0, 3): 0.4210526315789473, } target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {tier: r for (tier, r) in target_replicas.items() if len(tier) == 2}) def test_big_zone(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 60, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 60, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 60, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 60, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 60, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 60, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdb'}) expected = { (0, 0): 1.0714285714285714, (0, 1): 0.6428571428571429, (0, 2): 0.6428571428571429, (0, 3): 0.6428571428571429, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 2}) expected = { (0, 0): 1.0, (0, 1): 0.6666666666666667, (0, 2): 0.6666666666666667, (0, 3): 0.6666666666666667, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 2}) # when all the devices and servers in a zone are evenly weighted # it will accurately proxy their required overload, all the # zones besides 0 require the same overload t = random.choice([t for t in weighted_replicas if len(t) == 2 and t[1] != 0]) expected_overload = ((wanted_replicas[t] - weighted_replicas[t]) / weighted_replicas[t]) self.assertAlmostEqual(rb.get_required_overload(), expected_overload) # but if you only give it out half of that rb.set_overload(expected_overload / 2.0) # ... you can expect it's not going to full disperse expected = { (0, 0): 1.0357142857142856, (0, 1): 0.6547619047619049, (0, 2): 0.6547619047619049, (0, 3): 0.6547619047619049, } target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 2}) def test_enormous_zone(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 500, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 60, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 60, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 60, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 60, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 3, 'weight': 60, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 9, 'region': 0, 'zone': 3, 'weight': 60, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) expected = { (0, 0): 2.542372881355932, (0, 1): 0.15254237288135591, (0, 2): 0.15254237288135591, (0, 3): 0.15254237288135591, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 2}) expected = { (0, 0): 1.0, (0, 1): 0.6666666666666667, (0, 2): 0.6666666666666667, (0, 3): 0.6666666666666667, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 2}) # ouch, those "tiny" devices need to hold 3x more than their # weighted for! self.assertAlmostEqual(rb.get_required_overload(), 3.370370, delta=1e-5) # let's get a little crazy, and let devices eat up to 1x more than # their capacity is weighted for - see how far that gets us... rb.set_overload(1) target_replicas = rb._build_target_replicas_by_tier() expected = { (0, 0): 2.084745762711864, (0, 1): 0.30508474576271183, (0, 2): 0.30508474576271183, (0, 3): 0.30508474576271183, } self.assertEqual(expected, {tier: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 2}) def test_two_big_two_small(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 100, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 45, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 45, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 35, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 35, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) expected = { (0, 0): 1.0714285714285714, (0, 1): 1.0714285714285714, (0, 2): 0.48214285714285715, (0, 3): 0.375, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 2}) expected = { (0, 0): 1.0, (0, 1): 1.0, (0, 2): 0.5625, (0, 3): 0.43749999999999994, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 2}) # I'm not sure it's significant or coincidental that the devices # in zone 2 & 3 who end up splitting the 3rd replica turn out to # need to eat ~1/6th extra replicanths self.assertAlmostEqual(rb.get_required_overload(), 1.0 / 6.0) # ... *so* 10% isn't *quite* enough rb.set_overload(0.1) target_replicas = rb._build_target_replicas_by_tier() expected = { (0, 0): 1.0285714285714285, (0, 1): 1.0285714285714285, (0, 2): 0.5303571428571429, (0, 3): 0.4125, } self.assertEqual(expected, {tier: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 2}) # ... but 20% will do the trick! rb.set_overload(0.2) target_replicas = rb._build_target_replicas_by_tier() expected = { (0, 0): 1.0, (0, 1): 1.0, (0, 2): 0.5625, (0, 3): 0.43749999999999994, } self.assertEqual(expected, {tier: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 2}) def test_multiple_replicas_each(self): rb = ring.RingBuilder(8, 7, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 80, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 80, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 80, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 80, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdd'}) rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 80, 'ip': '127.0.0.0', 'port': 10000, 'device': 'sde'}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 70, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 70, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 70, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 70, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) expected = { (0, 0): 4.117647058823529, (0, 1): 2.8823529411764706, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 2}) expected = { (0, 0): 4.0, (0, 1): 3.0, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 2}) # I guess 2.88 => 3.0 is about a 4% increase self.assertAlmostEqual(rb.get_required_overload(), 0.040816326530612256) # ... 10% is plenty enough here rb.set_overload(0.1) target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 2}) def test_small_extra_server_in_zone_with_multiple_replicas(self): rb = ring.RingBuilder(8, 5, 1) # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 1000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sdb', 'weight': 1000}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sdc', 'weight': 1000}) # z1 rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 1000}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sdb', 'weight': 1000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sdc', 'weight': 1000}) # z1 - extra small server rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sda', 'weight': 50}) expected = { (0, 0): 2.479338842975207, (0, 1): 2.5206611570247937, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {t: r for (t, r) in weighted_replicas.items() if len(t) == 2}) # dispersion is fine with this at the zone tier wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {t: r for (t, r) in wanted_replicas.items() if len(t) == 2}) # ... but not ok with that tiny server expected = { '127.0.0.1': 2.479338842975207, '127.0.0.2': 1.5206611570247937, '127.0.0.3': 1.0, } self.assertEqual(expected, {t[-1]: r for (t, r) in wanted_replicas.items() if len(t) == 3}) self.assertAlmostEqual(23.2, rb.get_required_overload()) def test_multiple_replicas_in_zone_with_single_device(self): rb = ring.RingBuilder(8, 5, 0) # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 100}) # z1 rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', 'port': 6200, 'device': 'sdb', 'weight': 100}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', 'port': 6200, 'device': 'sdc', 'weight': 100}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', 'port': 6200, 'device': 'sdd', 'weight': 100}) # first things first, make sure we do this right rb.rebalance() # each device get's a sing replica of every part expected = { 0: 256, 1: 256, 2: 256, 3: 256, 4: 256, } self.assertEqual(expected, {d['id']: d['parts'] for d in rb._iter_devs()}) # but let's make sure we're thinking about it right too expected = { 0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, } # by weight everyone is equal weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in weighted_replicas.items() if len(t) == 4}) # wanted might have liked to have fewer replicas in z1, but the # single device in z0 limits us one replica per device with rb.debug(): wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in wanted_replicas.items() if len(t) == 4}) # even with some overload - still one replica per device rb.set_overload(1.0) target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in target_replicas.items() if len(t) == 4}) # when overload can not change the outcome none is required self.assertEqual(0.0, rb.get_required_overload()) # even though dispersion is terrible (in z1 particularly) self.assertEqual(20.0, rb.dispersion) def test_one_big_guy_does_not_spoil_his_buddy(self): rb = ring.RingBuilder(8, 3, 0) # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 100}) # z1 rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', 'port': 6200, 'device': 'sda', 'weight': 100}) # z2 rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.2.1', 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.2', 'port': 6200, 'device': 'sda', 'weight': 10000}) # obviously d5 gets one whole replica; the other two replicas # are split evenly among the five other devices # (i.e. ~0.4 replicanths for each 100 units of weight) expected = { 0: 0.39999999999999997, 1: 0.39999999999999997, 2: 0.39999999999999997, 3: 0.39999999999999997, 4: 0.39999999999999997, 5: 1.0, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in weighted_replicas.items() if len(t) == 4}) # with no overload we get the "balanced" placement target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in target_replicas.items() if len(t) == 4}) # but in reality, these devices having such disparate weights # leads to a *terrible* balance even w/o overload! rb.rebalance(seed=9) self.assertEqual(rb.get_balance(), 1308.2031249999998) # even though part assignment is pretty reasonable expected = { 0: 103, 1: 102, 2: 103, 3: 102, 4: 102, 5: 256, } self.assertEqual(expected, { d['id']: d['parts'] for d in rb._iter_devs()}) # so whats happening is the small devices are holding *way* more # *real* parts than their *relative* portion of the weight would # like them too! expected = { 0: 1308.2031249999998, 1: 1294.5312499999998, 2: 1308.2031249999998, 3: 1294.5312499999998, 4: 1294.5312499999998, 5: -65.0, } self.assertEqual(expected, rb._build_balance_per_dev()) # increasing overload moves towards one replica in each tier rb.set_overload(0.20) expected = { 0: 0.48, 1: 0.48, 2: 0.48, 3: 0.48, 4: 0.30857142857142855, 5: 0.7714285714285714, } target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in target_replicas.items() if len(t) == 4}) # ... and as always increasing overload makes balance *worse* rb.rebalance(seed=17) self.assertEqual(rb.get_balance(), 1581.6406249999998) # but despite the overall trend toward imbalance, in the tier with the # huge device, we want to see the small device (d4) try to shed parts # as effectively as it can to the huge device in the same tier (d5) # this is a useful behavior anytime when for whatever reason a device # w/i a tier wants parts from another device already in the same tier # another example is `test_one_small_guy_does_not_spoil_his_buddy` expected = { 0: 123, 1: 123, 2: 123, 3: 123, 4: 79, 5: 197, } self.assertEqual(expected, { d['id']: d['parts'] for d in rb._iter_devs()}) # *see*, at least *someones* balance is getting better! expected = { 0: 1581.6406249999998, 1: 1581.6406249999998, 2: 1581.6406249999998, 3: 1581.6406249999998, 4: 980.078125, 5: -73.06640625, } self.assertEqual(expected, rb._build_balance_per_dev()) def test_one_small_guy_does_not_spoil_his_buddy(self): rb = ring.RingBuilder(8, 3, 0) # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 10000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 10000}) # z1 rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', 'port': 6200, 'device': 'sda', 'weight': 10000}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', 'port': 6200, 'device': 'sda', 'weight': 10000}) # z2 rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.2.1', 'port': 6200, 'device': 'sda', 'weight': 10000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.2', 'port': 6200, 'device': 'sda', 'weight': 100}) # it's almost like 3.0 / 5 ~= 0.6, but that one little guy get's # his fair share expected = { 0: 0.5988023952095808, 1: 0.5988023952095808, 2: 0.5988023952095808, 3: 0.5988023952095808, 4: 0.5988023952095808, 5: 0.005988023952095809, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in weighted_replicas.items() if len(t) == 4}) # with no overload we get a nice balanced placement target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in target_replicas.items() if len(t) == 4}) rb.rebalance(seed=9) # part placement looks goods expected = { 0: 154, 1: 153, 2: 153, 3: 153, 4: 153, 5: 2, } self.assertEqual(expected, { d['id']: d['parts'] for d in rb._iter_devs()}) # ... balance is a little lumpy on the small guy since he wants # one and a half parts :\ expected = { 0: 0.4609375000000142, 1: -0.1914062499999858, 2: -0.1914062499999858, 3: -0.1914062499999858, 4: -0.1914062499999858, 5: 30.46875, } self.assertEqual(expected, rb._build_balance_per_dev()) self.assertEqual(rb.get_balance(), 30.46875) # increasing overload moves towards one replica in each tier rb.set_overload(0.3) expected = { 0: 0.553443113772455, 1: 0.553443113772455, 2: 0.553443113772455, 3: 0.553443113772455, 4: 0.778443113772455, 5: 0.007784431137724551, } target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in target_replicas.items() if len(t) == 4}) # ... and as always increasing overload makes balance *worse* rb.rebalance(seed=12) self.assertEqual(rb.get_balance(), 30.46875) # the little guy it really struggling to take his share tho expected = { 0: 142, 1: 141, 2: 142, 3: 141, 4: 200, 5: 2, } self.assertEqual(expected, { d['id']: d['parts'] for d in rb._iter_devs()}) # ... and you can see it in the balance! expected = { 0: -7.367187499999986, 1: -8.019531249999986, 2: -7.367187499999986, 3: -8.019531249999986, 4: 30.46875, 5: 30.46875, } self.assertEqual(expected, rb._build_balance_per_dev()) rb.set_overload(0.5) expected = { 0: 0.5232035928143712, 1: 0.5232035928143712, 2: 0.5232035928143712, 3: 0.5232035928143712, 4: 0.8982035928143712, 5: 0.008982035928143714, } target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in target_replicas.items() if len(t) == 4}) # because the device is so small, balance get's bad quick rb.rebalance(seed=17) self.assertEqual(rb.get_balance(), 95.703125) # but despite the overall trend toward imbalance, the little guy # isn't really taking on many new parts! expected = { 0: 134, 1: 134, 2: 134, 3: 133, 4: 230, 5: 3, } self.assertEqual(expected, { d['id']: d['parts'] for d in rb._iter_devs()}) # *see*, at everyone's balance is getting worse *together*! expected = { 0: -12.585937499999986, 1: -12.585937499999986, 2: -12.585937499999986, 3: -13.238281249999986, 4: 50.0390625, 5: 95.703125, } self.assertEqual(expected, rb._build_balance_per_dev()) def test_two_servers_with_more_than_one_replica(self): rb = ring.RingBuilder(8, 3, 0) # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 60}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 60}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sda', 'weight': 60}) # z1 rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', 'port': 6200, 'device': 'sda', 'weight': 80}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', 'port': 6200, 'device': 'sda', 'weight': 128}) # z2 rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.1', 'port': 6200, 'device': 'sda', 'weight': 80}) rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'ip': '127.0.2.2', 'port': 6200, 'device': 'sda', 'weight': 240}) rb.set_overload(0.1) rb.rebalance() self.assertEqual(12.161458333333343, rb.get_balance()) replica_plan = rb._build_target_replicas_by_tier() for dev in rb._iter_devs(): tier = (dev['region'], dev['zone'], dev['ip'], dev['id']) expected_parts = replica_plan[tier] * rb.parts self.assertAlmostEqual(dev['parts'], expected_parts, delta=1) def test_multi_zone_with_failed_device(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sdb', 'weight': 2000}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sdb', 'weight': 2000}) rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sdb', 'weight': 2000}) # sanity, balanced and dispersed expected = { (0, 0): 1.0, (0, 1): 1.0, (0, 2): 1.0, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 2}) wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 2}) self.assertEqual(rb.get_required_overload(), 0.0) # fail a device in zone 2 rb.remove_dev(4) expected = { 0: 0.6, 1: 0.6, 2: 0.6, 3: 0.6, 5: 0.6, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 4}) expected = { 0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5, 5: 1.0, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 4}) # does this make sense? every zone was holding 1/3rd of the # replicas, so each device was 1/6th, remove a device and # suddenly it's holding *both* sixths which is 2/3rds? self.assertAlmostEqual(rb.get_required_overload(), 2.0 / 3.0) # 10% isn't nearly enough rb.set_overload(0.1) target_replicas = rb._build_target_replicas_by_tier() expected = { 0: 0.585, 1: 0.585, 2: 0.585, 3: 0.585, 5: 0.6599999999999999, } self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 4}) # 50% isn't even enough rb.set_overload(0.5) target_replicas = rb._build_target_replicas_by_tier() expected = { 0: 0.525, 1: 0.525, 2: 0.525, 3: 0.525, 5: 0.8999999999999999, } self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 4}) # even 65% isn't enough (but it's getting closer) rb.set_overload(0.65) target_replicas = rb._build_target_replicas_by_tier() expected = { 0: 0.5025000000000001, 1: 0.5025000000000001, 2: 0.5025000000000001, 3: 0.5025000000000001, 5: 0.99, } self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 4}) def test_balanced_zones_unbalanced_servers(self): rb = ring.RingBuilder(8, 3, 1) # zone 0 server 127.0.0.1 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 3000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sdb', 'weight': 3000}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 3000}) # zone 1 server 127.0.0.2 rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 4000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sdb', 'weight': 4000}) # zone 1 (again) server 127.0.0.3 rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sda', 'weight': 1000}) weighted_replicas = rb._build_weighted_replicas_by_tier() # zones are evenly weighted expected = { (0, 0): 1.5, (0, 1): 1.5, } self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 2}) # ... but servers are not expected = { '127.0.0.1': 1.5, '127.0.0.2': 1.3333333333333333, '127.0.0.3': 0.16666666666666666, } self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 3}) # make sure wanted will even it out expected = { '127.0.0.1': 1.5, '127.0.0.2': 1.0, '127.0.0.3': 0.4999999999999999, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 3}) # so it wants 1/6th and eats 1/2 - that's 2/6ths more than it # wants which is a 200% increase self.assertAlmostEqual(rb.get_required_overload(), 2.0) # the overload doesn't effect the tiers that are already dispersed rb.set_overload(1) target_replicas = rb._build_target_replicas_by_tier() expected = { '127.0.0.1': 1.5, # notice with half the overload 1/6th replicanth swapped servers '127.0.0.2': 1.1666666666666665, '127.0.0.3': 0.3333333333333333, } self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 3}) def test_adding_second_zone(self): rb = ring.RingBuilder(3, 3, 1) # zone 0 server 127.0.0.1 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sdb', 'weight': 2000}) # zone 0 server 127.0.0.2 rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sdb', 'weight': 2000}) # zone 0 server 127.0.0.3 rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sdb', 'weight': 2000}) # sanity, balanced and dispersed expected = { '127.0.0.1': 1.0, '127.0.0.2': 1.0, '127.0.0.3': 1.0, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 3}) wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 3}) self.assertEqual(rb.get_required_overload(), 0) # start adding a second zone # zone 1 server 127.0.1.1 rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', 'port': 6200, 'device': 'sdb', 'weight': 100}) # zone 1 server 127.0.1.2 rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', 'port': 6200, 'device': 'sdb', 'weight': 100}) # zone 1 server 127.0.1.3 rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'ip': '127.0.1.3', 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 11, 'region': 0, 'zone': 1, 'ip': '127.0.1.3', 'port': 6200, 'device': 'sdb', 'weight': 100}) # this messes things up pretty royally expected = { '127.0.0.1': 0.9523809523809523, '127.0.0.2': 0.9523809523809523, '127.0.0.3': 0.9523809523809523, '127.0.1.1': 0.047619047619047616, '127.0.1.2': 0.047619047619047616, '127.0.1.3': 0.047619047619047616, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 3}) expected = { '127.0.0.1': 0.6666666666666667, '127.0.0.2': 0.6666666666666667, '127.0.0.3': 0.6666666666666667, '127.0.1.1': 0.3333333333333333, '127.0.1.2': 0.3333333333333333, '127.0.1.3': 0.3333333333333333, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 3}) # so dispersion would require these devices hold 6x more than # prescribed by weight, defeating any attempt at gradually # anything self.assertAlmostEqual(rb.get_required_overload(), 6.0) # so let's suppose we only allow for 10% overload rb.set_overload(0.10) target_replicas = rb._build_target_replicas_by_tier() expected = { # we expect servers in zone 0 to be between 0.952 and 0.666 '127.0.0.1': 0.9476190476190476, '127.0.0.2': 0.9476190476190476, '127.0.0.3': 0.9476190476190476, # we expect servers in zone 1 to be between 0.0476 and 0.333 # and in fact its ~10% increase (very little compared to 6x!) '127.0.1.1': 0.052380952380952375, '127.0.1.2': 0.052380952380952375, '127.0.1.3': 0.052380952380952375, } self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 3}) def test_gradual_replica_count(self): rb = ring.RingBuilder(3, 2.5, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sdb', 'weight': 2000}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sdb', 'weight': 2000}) expected = { 0: 0.625, 1: 0.625, 2: 0.625, 3: 0.625, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, { tier[3]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 4}) wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, { tier[3]: wanted for (tier, wanted) in wanted_replicas.items() if len(tier) == 4}) self.assertEqual(rb.get_required_overload(), 0) # server 127.0.0.2 will have only one device rb.remove_dev(2) # server 127.0.0.1 has twice the capacity of 127.0.0.2 expected = { '127.0.0.1': 1.6666666666666667, '127.0.0.2': 0.8333333333333334, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, { tier[2]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 3}) # dispersion requirements extend only to whole replicas expected = { '127.0.0.1': 1.4999999999999998, '127.0.0.2': 1.0, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, { tier[2]: wanted for (tier, wanted) in wanted_replicas.items() if len(tier) == 3}) # 5/6ths to a whole replicanth is a 20% increase self.assertAlmostEqual(rb.get_required_overload(), 0.2) # so let's suppose we only allow for 10% overload rb.set_overload(0.1) target_replicas = rb._build_target_replicas_by_tier() expected = { '127.0.0.1': 1.5833333333333333, '127.0.0.2': 0.9166666666666667, } self.assertEqual(expected, { tier[2]: wanted for (tier, wanted) in target_replicas.items() if len(tier) == 3}) def test_perfect_four_zone_four_replica_bad_placement(self): rb = ring.RingBuilder(4, 4, 1) # this weight is sorta nuts, but it's really just to help the # weight_of_one_part hit a magic number where floats mess up # like they would on ring with a part power of 19 and 100's of # 1000's of units of weight. weight = 21739130434795e-11 # r0z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': weight, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': weight, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) # r0z1 rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': weight, 'ip': '127.0.1.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': weight, 'ip': '127.0.1.2', 'port': 10000, 'device': 'sdb'}) # r1z0 rb.add_dev({'id': 4, 'region': 1, 'zone': 0, 'weight': weight, 'ip': '127.1.0.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 5, 'region': 1, 'zone': 0, 'weight': weight, 'ip': '127.1.0.2', 'port': 10000, 'device': 'sdb'}) # r1z1 rb.add_dev({'id': 6, 'region': 1, 'zone': 1, 'weight': weight, 'ip': '127.1.1.1', 'port': 10000, 'device': 'sda'}) rb.add_dev({'id': 7, 'region': 1, 'zone': 1, 'weight': weight, 'ip': '127.1.1.2', 'port': 10000, 'device': 'sdb'}) # the replica plan is sound expectations = { # tier_len => expected replicas 1: { (0,): 2.0, (1,): 2.0, }, 2: { (0, 0): 1.0, (0, 1): 1.0, (1, 0): 1.0, (1, 1): 1.0, } } wr = rb._build_replica_plan() for tier_len, expected in expectations.items(): self.assertEqual(expected, {t: r['max'] for (t, r) in wr.items() if len(t) == tier_len}) # even thought a naive ceil of weights is surprisingly wrong expectations = { # tier_len => expected replicas 1: { (0,): 3.0, (1,): 3.0, }, 2: { (0, 0): 2.0, (0, 1): 2.0, (1, 0): 2.0, (1, 1): 2.0, } } wr = rb._build_weighted_replicas_by_tier() for tier_len, expected in expectations.items(): self.assertEqual(expected, {t: ceil(r) for (t, r) in wr.items() if len(t) == tier_len}) class TestRingBuilderDispersion(unittest.TestCase): def setUp(self): self.devs = ('d%s' % i for i in itertools.count()) def assertAlmostPartCount(self, counts, expected, delta=3): msgs = [] failed = False for k, p in sorted(expected.items()): try: self.assertAlmostEqual(counts[k], p, delta=delta) except KeyError: self.fail('%r is missing the key %r' % (counts, k)) except AssertionError: failed = True state = '!=' else: state = 'ok' msgs.append('parts in %s was %s expected %s (%s)' % ( k, counts[k], p, state)) if failed: self.fail('some part counts not close enough ' 'to expected:\n' + '\n'.join(msgs)) def test_rebalance_dispersion(self): rb = ring.RingBuilder(8, 6, 0) for i in range(6): rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6000, 'weight': 1.0, 'device': next(self.devs)}) rb.rebalance() self.assertEqual(0, rb.dispersion) for z in range(2): for i in range(6): rb.add_dev({'region': 0, 'zone': z + 1, 'ip': '127.0.1.1', 'port': 6000, 'weight': 1.0, 'device': next(self.devs)}) self.assertAlmostPartCount(_partition_counts(rb, 'zone'), {0: 1536, 1: 0, 2: 0}) rb.rebalance() self.assertEqual(rb.dispersion, 50.0) expected = {0: 1280, 1: 128, 2: 128} self.assertAlmostPartCount(_partition_counts(rb, 'zone'), expected) report = dict(utils.dispersion_report( rb, r'r\d+z\d+$', verbose=True)['graph']) counts = {int(k.split('z')[1]): d['placed_parts'] for k, d in report.items()} self.assertAlmostPartCount(counts, expected) rb.rebalance() self.assertEqual(rb.dispersion, 33.333333333333336) expected = {0: 1024, 1: 256, 2: 256} self.assertAlmostPartCount(_partition_counts(rb, 'zone'), expected) report = dict(utils.dispersion_report( rb, r'r\d+z\d+$', verbose=True)['graph']) counts = {int(k.split('z')[1]): d['placed_parts'] for k, d in report.items()} self.assertAlmostPartCount(counts, expected) rb.rebalance() self.assertEqual(rb.dispersion, 16.666666666666668) expected = {0: 768, 1: 384, 2: 384} self.assertAlmostPartCount(_partition_counts(rb, 'zone'), expected) report = dict(utils.dispersion_report( rb, r'r\d+z\d+$', verbose=True)['graph']) counts = {int(k.split('z')[1]): d['placed_parts'] for k, d in report.items()} self.assertAlmostPartCount(counts, expected) rb.rebalance() self.assertEqual(0, rb.dispersion) expected = {0: 512, 1: 512, 2: 512} self.assertAlmostPartCount(_partition_counts(rb, 'zone'), expected) report = dict(utils.dispersion_report( rb, r'r\d+z\d+$', verbose=True)['graph']) counts = {int(k.split('z')[1]): d['placed_parts'] for k, d in report.items()} self.assertAlmostPartCount(counts, expected) def test_weight_dispersion(self): rb = ring.RingBuilder(8, 3, 0) for i in range(2): for d in range(3): rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.%s.1' % i, 'port': 6000, 'weight': 1.0, 'device': next(self.devs)}) for d in range(3): rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.2.1', 'port': 6000, 'weight': 10.0, 'device': next(self.devs)}) rb.rebalance() # each tier should only have 1 replicanth, but the big server has 2 # replicas of every part and 3 replicas another 1/2 - so our total # dispersion is greater than one replicanth, it's 1.5 self.assertEqual(50.0, rb.dispersion) expected = { '127.0.0.1': 64, '127.0.1.1': 64, '127.0.2.1': 640, } self.assertAlmostPartCount(_partition_counts(rb, 'ip'), expected) report = dict(utils.dispersion_report( rb, r'r\d+z\d+-[^/]*$', verbose=True)['graph']) counts = {k.split('-')[1]: d['placed_parts'] for k, d in report.items()} self.assertAlmostPartCount(counts, expected) def test_multiple_tier_dispersion(self): rb = ring.RingBuilder(10, 8, 0) r_z_to_ip_count = { (0, 0): 2, (1, 1): 1, (1, 2): 2, } ip_index = 0 for (r, z), ip_count in sorted(r_z_to_ip_count.items()): for i in range(ip_count): ip_index += 1 for d in range(3): rb.add_dev({'region': r, 'zone': z, 'ip': '127.%s.%s.%s' % (r, z, ip_index), 'port': 6000, 'weight': 1.0, 'device': next(self.devs)}) for i in range(3): # it might take a few rebalances for all the right part replicas to # balance from r1z2 into r1z1 rb.rebalance() self.assertAlmostEqual(15.52734375, rb.dispersion, delta=5.0) self.assertAlmostEqual(0.0, rb.get_balance(), delta=0.5) expected = { '127.0.0.1': 1638, '127.0.0.2': 1638, '127.1.1.3': 1638, '127.1.2.4': 1638, '127.1.2.5': 1638, } delta = 10 self.assertAlmostPartCount(_partition_counts(rb, 'ip'), expected, delta=delta) report = dict(utils.dispersion_report( rb, r'r\d+z\d+-[^/]*$', verbose=True)['graph']) counts = {k.split('-')[1]: d['placed_parts'] for k, d in report.items()} self.assertAlmostPartCount(counts, expected, delta=delta) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
from numpy._core import _umath_tests from numpy._core._multiarray_umath import ( __cpu_baseline__, __cpu_dispatch__, __cpu_features__, ) from numpy.testing import assert_equal def test_dispatcher(): """ Testing the utilities of the CPU dispatcher """ targets = ( "X86_V2", "X86_V3", "VSX", "VSX2", "VSX3", "NEON", "ASIMD", "ASIMDHP", "VX", "VXE", "LSX", "RVV" ) highest_sfx = "" # no suffix for the baseline all_sfx = [] for feature in reversed(targets): # skip baseline features, by the default `CCompilerOpt` do not generate # separated objects for the baseline, just one object combined all of them # via 'baseline' option within the configuration statements. if feature in __cpu_baseline__: continue # check compiler and running machine support if feature not in __cpu_dispatch__ or not __cpu_features__[feature]: continue if not highest_sfx: highest_sfx = "_" + feature all_sfx.append("func" + "_" + feature) test = _umath_tests.test_dispatch() assert_equal(test["func"], "func" + highest_sfx) assert_equal(test["var"], "var" + highest_sfx) if highest_sfx: assert_equal(test["func_xb"], "func" + highest_sfx) assert_equal(test["var_xb"], "var" + highest_sfx) else: assert_equal(test["func_xb"], "nobase") assert_equal(test["var_xb"], "nobase") all_sfx.append("func") # add the baseline assert_equal(test["all"], all_sfx)
python
github
https://github.com/numpy/numpy
numpy/_core/tests/test_cpu_dispatcher.py
/*------------------------------------------------------------------------- * * compression.c * * Shared code for compression methods and specifications. * * A compression specification specifies the parameters that should be used * when performing compression with a specific algorithm. The simplest * possible compression specification is an integer, which sets the * compression level. * * Otherwise, a compression specification is a comma-separated list of items, * each having the form keyword or keyword=value. * * Currently, the supported keywords are "level", "long", and "workers". * * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group * * IDENTIFICATION * src/common/compression.c *------------------------------------------------------------------------- */ #ifndef FRONTEND #include "postgres.h" #else #include "postgres_fe.h" #endif #ifdef USE_ZSTD #include <zstd.h> #endif #ifdef HAVE_LIBZ #include <zlib.h> #endif #include "common/compression.h" static int expect_integer_value(char *keyword, char *value, pg_compress_specification *result); static bool expect_boolean_value(char *keyword, char *value, pg_compress_specification *result); /* * Look up a compression algorithm by name. Returns true and sets *algorithm * if the name is recognized. Otherwise returns false. */ bool parse_compress_algorithm(char *name, pg_compress_algorithm *algorithm) { if (strcmp(name, "none") == 0) *algorithm = PG_COMPRESSION_NONE; else if (strcmp(name, "gzip") == 0) *algorithm = PG_COMPRESSION_GZIP; else if (strcmp(name, "lz4") == 0) *algorithm = PG_COMPRESSION_LZ4; else if (strcmp(name, "zstd") == 0) *algorithm = PG_COMPRESSION_ZSTD; else return false; return true; } /* * Get the human-readable name corresponding to a particular compression * algorithm. */ const char * get_compress_algorithm_name(pg_compress_algorithm algorithm) { switch (algorithm) { case PG_COMPRESSION_NONE: return "none"; case PG_COMPRESSION_GZIP: return "gzip"; case PG_COMPRESSION_LZ4: return "lz4"; case PG_COMPRESSION_ZSTD: return "zstd"; /* no default, to provoke compiler warnings if values are added */ } Assert(false); return "???"; /* placate compiler */ } /* * Parse a compression specification for a specified algorithm. * * See the file header comments for a brief description of what a compression * specification is expected to look like. * * On return, all fields of the result object will be initialized. * In particular, result->parse_error will be NULL if no errors occurred * during parsing, and will otherwise contain an appropriate error message. * The caller may free this error message string using pfree, if desired. * Note, however, even if there's no parse error, the string might not make * sense: e.g. for gzip, level=12 is not sensible, but it does parse OK. * * The compression level is assigned by default if not directly specified * by the specification. * * Use validate_compress_specification() to find out whether a compression * specification is semantically sensible. */ void parse_compress_specification(pg_compress_algorithm algorithm, char *specification, pg_compress_specification *result) { int bare_level; char *bare_level_endp; /* Initial setup of result object. */ result->algorithm = algorithm; result->options = 0; result->parse_error = NULL; /* * Assign a default level depending on the compression method. This may * be enforced later. */ switch (result->algorithm) { case PG_COMPRESSION_NONE: result->level = 0; break; case PG_COMPRESSION_LZ4: #ifdef USE_LZ4 result->level = 0; /* fast compression mode */ #else result->parse_error = psprintf(_("this build does not support compression with %s"), "LZ4"); #endif break; case PG_COMPRESSION_ZSTD: #ifdef USE_ZSTD result->level = ZSTD_CLEVEL_DEFAULT; #else result->parse_error = psprintf(_("this build does not support compression with %s"), "ZSTD"); #endif break; case PG_COMPRESSION_GZIP: #ifdef HAVE_LIBZ result->level = Z_DEFAULT_COMPRESSION; #else result->parse_error = psprintf(_("this build does not support compression with %s"), "gzip"); #endif break; } /* If there is no specification, we're done already. */ if (specification == NULL) return; /* As a special case, the specification can be a bare integer. */ bare_level = strtol(specification, &bare_level_endp, 10); if (specification != bare_level_endp && *bare_level_endp == '\0') { result->level = bare_level; return; } /* Look for comma-separated keyword or keyword=value entries. */ while (1) { char *kwstart; char *kwend; char *vstart; char *vend; int kwlen; int vlen; bool has_value; char *keyword; char *value; /* Figure start, end, and length of next keyword and any value. */ kwstart = kwend = specification; while (*kwend != '\0' && *kwend != ',' && *kwend != '=') ++kwend; kwlen = kwend - kwstart; if (*kwend != '=') { vstart = vend = NULL; vlen = 0; has_value = false; } else { vstart = vend = kwend + 1; while (*vend != '\0' && *vend != ',') ++vend; vlen = vend - vstart; has_value = true; } /* Reject empty keyword. */ if (kwlen == 0) { result->parse_error = pstrdup(_("found empty string where a compression option was expected")); break; } /* Extract keyword and value as separate C strings. */ keyword = palloc(kwlen + 1); memcpy(keyword, kwstart, kwlen); keyword[kwlen] = '\0'; if (!has_value) value = NULL; else { value = palloc(vlen + 1); memcpy(value, vstart, vlen); value[vlen] = '\0'; } /* Handle whatever keyword we found. */ if (strcmp(keyword, "level") == 0) { result->level = expect_integer_value(keyword, value, result); /* * No need to set a flag in "options", there is a default level * set at least thanks to the logic above. */ } else if (strcmp(keyword, "workers") == 0) { result->workers = expect_integer_value(keyword, value, result); result->options |= PG_COMPRESSION_OPTION_WORKERS; } else if (strcmp(keyword, "long") == 0) { result->long_distance = expect_boolean_value(keyword, value, result); result->options |= PG_COMPRESSION_OPTION_LONG_DISTANCE; } else result->parse_error = psprintf(_("unrecognized compression option: \"%s\""), keyword); /* Release memory, just to be tidy. */ pfree(keyword); if (value != NULL) pfree(value); /* * If we got an error or have reached the end of the string, stop. * * If there is no value, then the end of the keyword might have been * the end of the string. If there is a value, then the end of the * keyword cannot have been the end of the string, but the end of the * value might have been. */ if (result->parse_error != NULL || (vend == NULL ? *kwend == '\0' : *vend == '\0')) break; /* Advance to next entry and loop around. */ specification = vend == NULL ? kwend + 1 : vend + 1; } } /* * Parse 'value' as an integer and return the result. * * If parsing fails, set result->parse_error to an appropriate message * and return -1. */ static int expect_integer_value(char *keyword, char *value, pg_compress_specification *result) { int ivalue; char *ivalue_endp; if (value == NULL) { result->parse_error = psprintf(_("compression option \"%s\" requires a value"), keyword); return -1; } ivalue = strtol(value, &ivalue_endp, 10); if (ivalue_endp == value || *ivalue_endp != '\0') { result->parse_error = psprintf(_("value for compression option \"%s\" must be an integer"), keyword); return -1; } return ivalue; } /* * Parse 'value' as a boolean and return the result. * * If parsing fails, set result->parse_error to an appropriate message * and return -1. The caller must check result->parse_error to determine if * the call was successful. * * Valid values are: yes, no, on, off, 1, 0. * * Inspired by ParseVariableBool(). */ static bool expect_boolean_value(char *keyword, char *value, pg_compress_specification *result) { if (value == NULL) return true; if (pg_strcasecmp(value, "yes") == 0) return true; if (pg_strcasecmp(value, "on") == 0) return true; if (pg_strcasecmp(value, "1") == 0) return true; if (pg_strcasecmp(value, "no") == 0) return false; if (pg_strcasecmp(value, "off") == 0) return false; if (pg_strcasecmp(value, "0") == 0) return false; result->parse_error = psprintf(_("value for compression option \"%s\" must be a Boolean value"), keyword); return false; } /* * Returns NULL if the compression specification string was syntactically * valid and semantically sensible. Otherwise, returns an error message. * * Does not test whether this build of PostgreSQL supports the requested * compression method. */ char * validate_compress_specification(pg_compress_specification *spec) { int min_level = 1; int max_level = 1; int default_level = 0; /* If it didn't even parse OK, it's definitely no good. */ if (spec->parse_error != NULL) return spec->parse_error; /* * Check that the algorithm expects a compression level and it is within * the legal range for the algorithm. */ switch (spec->algorithm) { case PG_COMPRESSION_GZIP: max_level = 9; #ifdef HAVE_LIBZ default_level = Z_DEFAULT_COMPRESSION; #endif break; case PG_COMPRESSION_LZ4: max_level = 12; default_level = 0; /* fast mode */ break; case PG_COMPRESSION_ZSTD: #ifdef USE_ZSTD max_level = ZSTD_maxCLevel(); min_level = ZSTD_minCLevel(); default_level = ZSTD_CLEVEL_DEFAULT; #endif break; case PG_COMPRESSION_NONE: if (spec->level != 0) return psprintf(_("compression algorithm \"%s\" does not accept a compression level"), get_compress_algorithm_name(spec->algorithm)); break; } if ((spec->level < min_level || spec->level > max_level) && spec->level != default_level) return psprintf(_("compression algorithm \"%s\" expects a compression level between %d and %d (default at %d)"), get_compress_algorithm_name(spec->algorithm), min_level, max_level, default_level); /* * Of the compression algorithms that we currently support, only zstd * allows parallel workers. */ if ((spec->options & PG_COMPRESSION_OPTION_WORKERS) != 0 && (spec->algorithm != PG_COMPRESSION_ZSTD)) { return psprintf(_("compression algorithm \"%s\" does not accept a worker count"), get_compress_algorithm_name(spec->algorithm)); } /* * Of the compression algorithms that we currently support, only zstd * supports long-distance mode. */ if ((spec->options & PG_COMPRESSION_OPTION_LONG_DISTANCE) != 0 && (spec->algorithm != PG_COMPRESSION_ZSTD)) { return psprintf(_("compression algorithm \"%s\" does not support long-distance mode"), get_compress_algorithm_name(spec->algorithm)); } return NULL; } #ifdef FRONTEND /* * Basic parsing of a value specified through a command-line option, commonly * -Z/--compress. * * The parsing consists of a METHOD:DETAIL string fed later to * parse_compress_specification(). This only extracts METHOD and DETAIL. * If only an integer is found, the method is implied by the value specified. */ void parse_compress_options(const char *option, char **algorithm, char **detail) { const char *sep; char *endp; long result; /* * Check whether the compression specification consists of a bare integer. * * For backward-compatibility, assume "none" if the integer found is zero * and "gzip" otherwise. */ result = strtol(option, &endp, 10); if (*endp == '\0') { if (result == 0) { *algorithm = pstrdup("none"); *detail = NULL; } else { *algorithm = pstrdup("gzip"); *detail = pstrdup(option); } return; } /* * Check whether there is a compression detail following the algorithm * name. */ sep = strchr(option, ':'); if (sep == NULL) { *algorithm = pstrdup(option); *detail = NULL; } else { char *alg; alg = palloc((sep - option) + 1); memcpy(alg, option, sep - option); alg[sep - option] = '\0'; *algorithm = alg; *detail = pstrdup(sep + 1); } } #endif /* FRONTEND */
c
github
https://github.com/postgres/postgres
src/common/compression.c
/*! * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import { Ellipsis } from "./Ellipsis"; import { Item } from "./Item"; import { Items } from "./Items"; import { NextTrigger } from "./NextTrigger"; import { PageText } from "./PageText"; import { PrevTrigger } from "./PrevTrigger"; import { Root } from "./Root"; export const Pagination = { Ellipsis, Item, Items, NextTrigger, PageText, PrevTrigger, Root, };
typescript
github
https://github.com/apache/airflow
airflow-core/src/airflow/ui/src/components/ui/Pagination/index.ts
""" .. module:: dj-stripe.tests.test_contrib.test_views :synopsis: dj-stripe Rest views for Subscription Tests. .. moduleauthor:: Philippe Luickx (@philippeluickx) """ from __future__ import unicode_literals from decimal import Decimal from django.utils import timezone from django.conf import settings from django.contrib.auth import get_user_model from django.core.urlresolvers import reverse from mock import patch, PropertyMock from rest_framework import status from rest_framework.test import APITestCase from djstripe.models import CurrentSubscription, Customer from djstripe import settings as djstripe_settings if settings.STRIPE_PUBLIC_KEY and settings.STRIPE_SECRET_KEY: import stripe stripe.api_key = settings.STRIPE_SECRET_KEY class RestSubscriptionTest(APITestCase): """ Test the REST api for subscriptions. """ def setUp(self): self.url = reverse("rest_djstripe:subscription") self.user = get_user_model().objects.create_user( username="testuser", email="test@example.com", password="123" ) self.assertTrue(self.client.login(username="testuser", password="123")) @patch("djstripe.models.Customer.subscribe", autospec=True) @patch("djstripe.models.Customer.update_card", autospec=True) @patch("stripe.Customer.create", return_value=PropertyMock(id="cus_xxx1234567890")) def test_create_subscription(self, stripe_customer_mock, update_card_mock, subscribe_mock): self.assertEqual(0, Customer.objects.count()) data = { "plan": "test0", "stripe_token": "cake", } response = self.client.post(self.url, data) self.assertEqual(1, Customer.objects.count()) update_card_mock.assert_called_once_with(self.user.customer, "cake") subscribe_mock.assert_called_once_with(self.user.customer, "test0") self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data, data) @patch("djstripe.models.Customer.subscribe", autospec=True) @patch("djstripe.models.Customer.update_card", autospec=True) @patch("stripe.Customer.create", return_value=PropertyMock(id="cus_xxx1234567890")) def test_create_subscription_exception(self, stripe_customer_mock, update_card_mock, subscribe_mock): e = Exception subscribe_mock.side_effect = e data = { "plan": "test0", "stripe_token": "cake", } response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_get_no_content_for_subscription(self): response = self.client.get(self.url) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) def test_get_subscription(self): fake_customer = Customer.objects.create( stripe_id="cus_xxx1234567890", subscriber=self.user ) CurrentSubscription.objects.create( customer=fake_customer, plan="test", quantity=1, start=timezone.now(), amount=Decimal(25.00), status="active", ) response = self.client.get(self.url) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["plan"], "test") self.assertEqual(response.data['status'], 'active') self.assertEqual(response.data['cancel_at_period_end'], False) @patch("djstripe.models.Customer.cancel_subscription", return_value=CurrentSubscription(status=CurrentSubscription.STATUS_ACTIVE)) @patch("djstripe.models.Customer.current_subscription", new_callable=PropertyMock, return_value=CurrentSubscription(plan="test", amount=Decimal(25.00), status="active")) @patch("djstripe.models.Customer.subscribe", autospec=True) def test_cancel_subscription(self, subscribe_mock, stripe_create_customer_mock, cancel_subscription_mock): fake_customer = Customer.objects.create( stripe_id="cus_xxx1234567890", subscriber=self.user ) CurrentSubscription.objects.create( customer=fake_customer, plan="test", quantity=1, start=timezone.now(), amount=Decimal(25.00), status="active", ) self.assertEqual(1, CurrentSubscription.objects.count()) response = self.client.delete(self.url) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) # Cancelled means flagged as cancelled, so it should still be there self.assertEqual(1, CurrentSubscription.objects.count()) cancel_subscription_mock.assert_called_once_with( at_period_end=djstripe_settings.CANCELLATION_AT_PERIOD_END ) self.assertTrue(self.user.is_authenticated()) def test_cancel_subscription_exception(self): response = self.client.delete(self.url) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_create_subscription_incorrect_data(self): self.assertEqual(0, Customer.objects.count()) data = { "foo": "bar", } response = self.client.post(self.url, data) self.assertEqual(0, Customer.objects.count()) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) class RestSubscriptionNotLoggedInTest(APITestCase): """ Test the exceptions thrown by the subscription rest views. """ def setUp(self): self.url = reverse("rest_djstripe:subscription") def test_create_subscription_not_logged_in(self): self.assertEqual(0, Customer.objects.count()) data = { "plan": "test0", "stripe_token": "cake", } response = self.client.post(self.url, data) self.assertEqual(0, Customer.objects.count()) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
unknown
codeparrot/codeparrot-clean
""" Tests the interface.renew_vessels and interface.renew_all_vessels calls. """ #pragma out #pragma error OK # The clearinghouse testlib must be imported first. from clearinghouse.tests import testlib from clearinghouse.tests import mocklib from clearinghouse.common.api import maindb from clearinghouse.common.exceptions import * from clearinghouse.website.control import interface from clearinghouse.website.tests import testutil import datetime import unittest mocklib.mock_lockserver_calls() class SeattleGeniTestCase(unittest.TestCase): def setUp(self): # Setup a fresh database for each test. testlib.setup_test_db() def tearDown(self): # Cleanup the test database. testlib.teardown_test_db() def test_renew_vessels_insufficient_vessel_credits(self): # Create a user who will be doing the acquiring. user = maindb.create_user("testuser", "password", "example@example.com", "affiliation", "1 2", "2 2 2", "3 4") userport = user.usable_vessel_port vesselcount = 4 # Have every vessel acquisition to the backend request succeed. calls_results = [True] * vesselcount mocklib.mock_backend_acquire_vessel(calls_results) testutil.create_nodes_on_different_subnets(vesselcount, [userport]) # Acquire all of the vessels the user can acquire. vessel_list = interface.acquire_vessels(user, vesselcount, 'rand') # Decrease the user's vessel credits to one less than the number of vessels # they have acquired. user.free_vessel_credits = 0 user.save() func = interface.renew_vessels args = (user, vessel_list) self.assertRaises(InsufficientUserResourcesError, func, *args) func = interface.renew_all_vessels args = (user,) self.assertRaises(InsufficientUserResourcesError, func, *args) def test_renew_some_of_users_vessel(self): # Create a user who will be doing the acquiring. user = maindb.create_user("testuser", "password", "example@example.com", "affiliation", "1 2", "2 2 2", "3 4") userport = user.usable_vessel_port vesselcount = 4 # Have every vessel acquisition to the backend request succeed. calls_results = [True] * vesselcount mocklib.mock_backend_acquire_vessel(calls_results) testutil.create_nodes_on_different_subnets(vesselcount, [userport]) # Acquire all of the vessels the user can acquire. vessel_list = interface.acquire_vessels(user, vesselcount, 'rand') renew_vessels_list = vessel_list[:2] not_renewed_vessels_list = vessel_list[2:] interface.renew_vessels(user, renew_vessels_list) now = datetime.datetime.now() timedelta_oneday = datetime.timedelta(days=1) for vessel in renew_vessels_list: self.assertTrue(vessel.date_expires - now > timedelta_oneday) for vessel in not_renewed_vessels_list: self.assertTrue(vessel.date_expires - now < timedelta_oneday) def test_renew_vessels_dont_belong_to_user(self): # Create a user who will be doing the acquiring. user = maindb.create_user("testuser", "password", "example@example.com", "affiliation", "1 2", "2 2 2", "3 4") userport = user.usable_vessel_port # Create a second user. user2 = maindb.create_user("user2", "password", "user2@example.com", "affiliation", "1 2", "2 2 2", "3 4") vesselcount = 4 # Have every vessel acquisition to the backend request succeed. calls_results = [True] * vesselcount mocklib.mock_backend_acquire_vessel(calls_results) testutil.create_nodes_on_different_subnets(vesselcount, [userport]) # Acquire all of the vessels the user can acquire. vessel_list = interface.acquire_vessels(user, vesselcount, 'rand') release_vessel = vessel_list[0] interface.release_vessels(user, [release_vessel]) # Manually fiddle with one of the vessels to make it owned by user2. user2_vessel = vessel_list[1] user2_vessel.acquired_by_user = user2 user2_vessel.save() # Try to renew all of the originally acquired vessels, including the ones # that were released. We expect these to just be ignored. interface.renew_vessels(user, vessel_list) # Get fresh vessel objects that reflect the renewal. remaining_vessels = interface.get_acquired_vessels(user) release_vessel = maindb.get_vessel(release_vessel.node.node_identifier, release_vessel.name) user2_vessel = maindb.get_vessel(user2_vessel.node.node_identifier, user2_vessel.name) now = datetime.datetime.now() timedelta_oneday = datetime.timedelta(days=1) # Ensure that the vessels the user still has were renewed but that the ones # the user released were ignored (not renewed). for vessel in remaining_vessels: self.assertTrue(vessel.date_expires - now > timedelta_oneday) self.assertTrue(user2_vessel.date_expires - now < timedelta_oneday) self.assertEqual(release_vessel.date_expires, None) def run_test(): unittest.main() if __name__ == "__main__": run_test()
unknown
codeparrot/codeparrot-clean
# Copyright (C) 2007 Adriano Monteiro Marques # # Authors: Tianwei Liu <liutianweidlut@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 # USA """ TimeLine Part in Dashboard Window """ import gtk from umit.icm.agent.I18N import _ from umit.icm.agent.gui.dashboard.timeline.TimeLineGraph import InteractiveGraph from umit.icm.agent.gui.dashboard.timeline.TimeLineGraphToolbar import TimeLineGraphToolbar from umit.icm.agent.gui.dashboard.timeline.TimeLineGraphBase import TimeLineBase from deps.higwidgets.higboxes import HIGHBox, HIGVBox,hig_box_space_holder class TLHoder(gtk.VBox): def __init__(self,dashboard,connector=None): #maybe import some kinds(report,task,connection,Throttled,Service) """ Load timeline for every report(sent or unsent), test successful or failed (website or service) task (done or not), Throttled details(different charts) """ gtk.VBox.__init__(self) self.connector = connector self.dashboard = dashboard self.base = TimeLineBase(self.connector,self.dashboard) #Maybe add some items self.__create_widgets() self.__packed_widgets() self.__connect_widgets() def __create_widgets(self): """ """ # startup data line_filter, start, evts = self.base.grab_data() xlabel = self.base.xlabel glabel = self.base.title_by_graphmode() dlabel = self.base.descr_by_graphmode() #Box self.box = HIGVBox() #graph self.graph_box = gtk.HBox() self.graph = InteractiveGraph(evts, start, x_label=xlabel, y_label=_('Number of events'), graph_label=glabel, descr_label=dlabel, vdiv_labels=self.base.labels, line_filter=line_filter, connector=self.connector) #graph toolbar self.graphtb = TimeLineGraphToolbar(self.graph, self.connector, self.base.graph_mode,self.base.graph_kind, self.base) #TODO: Add Display Bar in the further def __packed_widgets(self): """ """ self.graph_box.add(self.graph) self.box._pack_noexpand_nofill(self.graphtb) self.box._pack_expand_fill(self.graph_box) self.add(self.box) self.show_all() def __connect_widgets(self): """ Handle the connector signals """ self.connector.connect('data_update',self._update_graph) #TODO: we should add signals for the changes of left treeview def _update_graph(self,obj,*args): """ New graph data arrived """ line_filter, start, evts, labels, xlabel, glabel, dlabel = args # new graph data self.graph.start_pts_data = start self.graph.graph_data = evts # find new max value self.graph.find_max_value() # update graph labels self.graph.xlabel = xlabel self.graph.graph_label = glabel self.graph.descr_label = dlabel self.graph.vdiv_labels = labels # do graph animation with new data self.graph.do_animation()
unknown
codeparrot/codeparrot-clean
twig: cache: false
unknown
github
https://github.com/symfony/symfony
src/Symfony/Bundle/TwigBundle/Tests/DependencyInjection/Fixtures/yml/no-cache.yml
/* Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package app import ( "context" "fmt" "os" "strings" "time" "github.com/fsnotify/fsnotify" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apiserver/pkg/server/flagz" utilfeature "k8s.io/apiserver/pkg/util/feature" cliflag "k8s.io/component-base/cli/flag" logsapi "k8s.io/component-base/logs/api/v1" zpagesfeatures "k8s.io/component-base/zpages/features" "k8s.io/klog/v2" "k8s.io/kube-proxy/config/v1alpha1" "k8s.io/kubernetes/pkg/cluster/ports" "k8s.io/kubernetes/pkg/kubelet/qos" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" proxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme" kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1" "k8s.io/kubernetes/pkg/proxy/apis/config/validation" proxyutil "k8s.io/kubernetes/pkg/proxy/util" "k8s.io/kubernetes/pkg/util/filesystem" utilflag "k8s.io/kubernetes/pkg/util/flag" "k8s.io/utils/ptr" ) // Options contains everything necessary to create and run a proxy server. type Options struct { // ConfigFile is the location of the proxy server's configuration file. ConfigFile string // WriteConfigTo is the path where the default configuration will be written. WriteConfigTo string // CleanupAndExit, when true, makes the proxy server clean up iptables and ipvs rules, then exit. CleanupAndExit bool // InitAndExit, when true, makes the proxy server makes configurations that need privileged access, then exit. InitAndExit bool // config is the proxy server's configuration object. config *kubeproxyconfig.KubeProxyConfiguration // watcher is used to watch on the update change of ConfigFile watcher filesystem.FSWatcher // proxyServer is the interface to run the proxy server proxyServer proxyRun // errCh is the channel that errors will be sent errCh chan error // flagz is the Reader interface to get flags for the flagz page. flagz flagz.Reader // The fields below here are placeholders for flags that can't be directly mapped into // config.KubeProxyConfiguration. // // TODO remove these fields once the deprecated flags are removed. // master is used to override the kubeconfig's URL to the apiserver. master string // healthzPort is the port to be used by the healthz server. healthzPort int32 // metricsPort is the port to be used by the metrics server. metricsPort int32 // hostnameOverride, if set from the command line flag, takes precedence over the `HostnameOverride` value from the config file hostnameOverride string logger klog.Logger // The fields below here are placeholders for flags that can't be directly mapped into // config.KubeProxyConfiguration. iptablesSyncPeriod time.Duration iptablesMinSyncPeriod time.Duration ipvsSyncPeriod time.Duration ipvsMinSyncPeriod time.Duration clusterCIDRs string } // AddFlags adds flags to fs and binds them to options. func (o *Options) AddFlags(fs *pflag.FlagSet) { o.addOSFlags(fs) fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "The path to the configuration file.") fs.StringVar(&o.WriteConfigTo, "write-config-to", o.WriteConfigTo, "If set, write the default configuration values to this file and exit.") fs.BoolVar(&o.CleanupAndExit, "cleanup", o.CleanupAndExit, "If true cleanup iptables and ipvs rules and exit.") fs.Var(cliflag.NewMapStringBool(&o.config.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ "Options are:\n"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), "\n")+"\n"+ "This parameter is ignored if a config file is specified by --config.") fs.StringVar(&o.config.ClientConnection.Kubeconfig, "kubeconfig", o.config.ClientConnection.Kubeconfig, "Path to kubeconfig file with authorization information (the master location can be overridden by the master flag).") fs.StringVar(&o.master, "master", o.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.StringVar(&o.config.ClientConnection.ContentType, "kube-api-content-type", o.config.ClientConnection.ContentType, "Content type of requests sent to apiserver.") fs.Int32Var(&o.config.ClientConnection.Burst, "kube-api-burst", o.config.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver") fs.Float32Var(&o.config.ClientConnection.QPS, "kube-api-qps", o.config.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver") fs.StringVar(&o.hostnameOverride, "hostname-override", o.hostnameOverride, "If non-empty, will be used as the name of the Node that kube-proxy is running on. If unset, the node name is assumed to be the same as the node's hostname.") fs.Var(&utilflag.IPVar{Val: &o.config.BindAddress}, "bind-address", "Overrides kube-proxy's idea of what its node's primary IP is. Note that the name is a historical artifact, and kube-proxy does not actually bind any sockets to this IP. This parameter is ignored if a config file is specified by --config.") fs.Var(&utilflag.IPPortVar{Val: &o.config.HealthzBindAddress}, "healthz-bind-address", "The IP address and port for the health check server to serve on, defaulting to \"0.0.0.0:10256\". This parameter is ignored if a config file is specified by --config.") fs.Var(&utilflag.IPPortVar{Val: &o.config.MetricsBindAddress}, "metrics-bind-address", "The IP address and port for the metrics server to serve on, defaulting to \"127.0.0.1:10249\". (Set to \"0.0.0.0:10249\" / \"[::]:10249\" to bind on all interfaces.) Set empty to disable. This parameter is ignored if a config file is specified by --config.") fs.BoolVar(&o.config.BindAddressHardFail, "bind-address-hard-fail", o.config.BindAddressHardFail, "If true kube-proxy will treat failure to bind to a port as fatal and exit") fs.BoolVar(&o.config.EnableProfiling, "profiling", o.config.EnableProfiling, "If true enables profiling via web interface on /debug/pprof handler. This parameter is ignored if a config file is specified by --config.") fs.StringVar(&o.config.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.config.ShowHiddenMetricsForVersion, "The previous version for which you want to show hidden metrics. "+ "Only the previous minor version is meaningful, other values will not be allowed. "+ "The format is <major>.<minor>, e.g.: '1.16'. "+ "The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+ "rather than being surprised when they are permanently removed in the release after that. "+ "This parameter is ignored if a config file is specified by --config.") fs.BoolVar(&o.InitAndExit, "init-only", o.InitAndExit, "If true, perform any initialization steps that must be done with full root privileges, and then exit. After doing this, you can run kube-proxy again with only the CAP_NET_ADMIN capability.") fs.Var(&o.config.Mode, "proxy-mode", "Which proxy mode to use: on Linux this can be 'iptables' (default), 'ipvs', or 'nftables'. On Windows the only supported value is 'kernelspace'. "+ "This parameter is ignored if a config file is specified by --config.") fs.Int32Var(o.config.IPTables.MasqueradeBit, "iptables-masquerade-bit", ptr.Deref(o.config.IPTables.MasqueradeBit, 14), "If using the iptables or ipvs proxy mode, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].") fs.BoolVar(&o.config.Linux.MasqueradeAll, "masquerade-all", o.config.Linux.MasqueradeAll, "SNAT all traffic sent via Service cluster IPs. This may be required with some CNI plugins. Only supported on Linux.") fs.BoolVar(o.config.IPTables.LocalhostNodePorts, "iptables-localhost-nodeports", ptr.Deref(o.config.IPTables.LocalhostNodePorts, true), "If false, kube-proxy will disable the legacy behavior of allowing NodePort services to be accessed via localhost. (Applies only to iptables mode and IPv4; localhost NodePorts are never allowed with other proxy modes or with IPv6.)") fs.DurationVar(&o.iptablesSyncPeriod, "iptables-sync-period", o.config.SyncPeriod.Duration, "An interval (e.g. '5s', '1m', '2h22m') indicating how frequently various re-synchronizing and cleanup operations are performed. Must be greater than 0.") fs.DurationVar(&o.iptablesMinSyncPeriod, "iptables-min-sync-period", o.config.MinSyncPeriod.Duration, "The minimum period between iptables rule resyncs (e.g. '5s', '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will result in an immediate iptables resync.") fs.DurationVar(&o.ipvsSyncPeriod, "ipvs-sync-period", o.config.SyncPeriod.Duration, "An interval (e.g. '5s', '1m', '2h22m') indicating how frequently various re-synchronizing and cleanup operations are performed. Must be greater than 0.") fs.DurationVar(&o.ipvsMinSyncPeriod, "ipvs-min-sync-period", o.config.MinSyncPeriod.Duration, "The minimum period between IPVS rule resyncs (e.g. '5s', '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will result in an immediate IPVS resync.") fs.StringVar(&o.config.IPVS.Scheduler, "ipvs-scheduler", o.config.IPVS.Scheduler, "The ipvs scheduler type when proxy mode is ipvs") fs.StringSliceVar(&o.config.IPVS.ExcludeCIDRs, "ipvs-exclude-cidrs", o.config.IPVS.ExcludeCIDRs, "A comma-separated list of CIDRs which the ipvs proxier should not touch when cleaning up IPVS rules.") fs.BoolVar(&o.config.IPVS.StrictARP, "ipvs-strict-arp", o.config.IPVS.StrictARP, "Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2") fs.DurationVar(&o.config.IPVS.TCPTimeout.Duration, "ipvs-tcp-timeout", o.config.IPVS.TCPTimeout.Duration, "The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") fs.DurationVar(&o.config.IPVS.TCPFinTimeout.Duration, "ipvs-tcpfin-timeout", o.config.IPVS.TCPFinTimeout.Duration, "The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") fs.DurationVar(&o.config.IPVS.UDPTimeout.Duration, "ipvs-udp-timeout", o.config.IPVS.UDPTimeout.Duration, "The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") fs.Var(&o.config.DetectLocalMode, "detect-local-mode", "Mode to use to detect local traffic. This parameter is ignored if a config file is specified by --config.") fs.StringVar(&o.config.DetectLocal.BridgeInterface, "pod-bridge-interface", o.config.DetectLocal.BridgeInterface, "A bridge interface name. When --detect-local-mode is set to BridgeInterface, kube-proxy will consider traffic to be local if it originates from this bridge.") fs.StringVar(&o.config.DetectLocal.InterfaceNamePrefix, "pod-interface-name-prefix", o.config.DetectLocal.InterfaceNamePrefix, "An interface name prefix. When --detect-local-mode is set to InterfaceNamePrefix, kube-proxy will consider traffic to be local if it originates from any interface whose name begins with this prefix.") fs.StringVar(&o.clusterCIDRs, "cluster-cidr", strings.Join(o.config.DetectLocal.ClusterCIDRs, ","), "The CIDR range of the pods in the cluster. (For dual-stack clusters, this can be a comma-separated dual-stack pair of CIDR ranges.). When --detect-local-mode is set to ClusterCIDR, kube-proxy will consider traffic to be local if its source IP is in this range. (Otherwise it is not used.) "+ "This parameter is ignored if a config file is specified by --config.") fs.StringSliceVar(&o.config.NodePortAddresses, "nodeport-addresses", o.config.NodePortAddresses, "A list of CIDR ranges that contain valid node IPs, or alternatively, the single string 'primary'. If set to a list of CIDRs, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If set to 'primary', NodePort services will only be accepted on the node's primary IP(s) according to the Node object. If unset, NodePort connections will be accepted on all local IPs. This parameter is ignored if a config file is specified by --config.") fs.Int32Var(o.config.Linux.OOMScoreAdj, "oom-score-adj", ptr.Deref(o.config.Linux.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]. This parameter is ignored if a config file is specified by --config.") fs.Int32Var(o.config.Linux.Conntrack.MaxPerCore, "conntrack-max-per-core", *o.config.Linux.Conntrack.MaxPerCore, "Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).") fs.Int32Var(o.config.Linux.Conntrack.Min, "conntrack-min", *o.config.Linux.Conntrack.Min, "Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).") fs.DurationVar(&o.config.Linux.Conntrack.TCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", o.config.Linux.Conntrack.TCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)") fs.DurationVar( &o.config.Linux.Conntrack.TCPCloseWaitTimeout.Duration, "conntrack-tcp-timeout-close-wait", o.config.Linux.Conntrack.TCPCloseWaitTimeout.Duration, "NAT timeout for TCP connections in the CLOSE_WAIT state") fs.BoolVar(&o.config.Linux.Conntrack.TCPBeLiberal, "conntrack-tcp-be-liberal", o.config.Linux.Conntrack.TCPBeLiberal, "Enable liberal mode for tracking TCP packets by setting nf_conntrack_tcp_be_liberal to 1") fs.DurationVar(&o.config.Linux.Conntrack.UDPTimeout.Duration, "conntrack-udp-timeout", o.config.Linux.Conntrack.UDPTimeout.Duration, "Idle timeout for UNREPLIED UDP connections (0 to leave as-is)") fs.DurationVar(&o.config.Linux.Conntrack.UDPStreamTimeout.Duration, "conntrack-udp-timeout-stream", o.config.Linux.Conntrack.UDPStreamTimeout.Duration, "Idle timeout for ASSURED UDP connections (0 to leave as-is)") fs.DurationVar(&o.config.ConfigSyncPeriod.Duration, "config-sync-period", o.config.ConfigSyncPeriod.Duration, "How often configuration from the apiserver is refreshed. Must be greater than 0.") fs.Int32Var(&o.healthzPort, "healthz-port", o.healthzPort, "The port to bind the health check server. Use 0 to disable.") _ = fs.MarkDeprecated("healthz-port", "This flag is deprecated and will be removed in a future release. Please use --healthz-bind-address instead.") fs.Int32Var(&o.metricsPort, "metrics-port", o.metricsPort, "The port to bind the metrics server. Use 0 to disable.") _ = fs.MarkDeprecated("metrics-port", "This flag is deprecated and will be removed in a future release. Please use --metrics-bind-address instead.") logsapi.AddFlags(&o.config.Logging, fs) } // newKubeProxyConfiguration returns a KubeProxyConfiguration with default values func newKubeProxyConfiguration() *kubeproxyconfig.KubeProxyConfiguration { versionedConfig := &v1alpha1.KubeProxyConfiguration{} proxyconfigscheme.Scheme.Default(versionedConfig) internalConfig, err := proxyconfigscheme.Scheme.ConvertToVersion(versionedConfig, kubeproxyconfig.SchemeGroupVersion) if err != nil { panic(fmt.Sprintf("Unable to create default config: %v", err)) } return internalConfig.(*kubeproxyconfig.KubeProxyConfiguration) } // NewOptions returns initialized Options func NewOptions() *Options { return &Options{ config: newKubeProxyConfiguration(), healthzPort: ports.ProxyHealthzPort, metricsPort: ports.ProxyStatusPort, errCh: make(chan error), logger: klog.FromContext(context.Background()), } } // Complete completes all the required options. func (o *Options) Complete(fs *pflag.FlagSet) error { if len(o.ConfigFile) == 0 && len(o.WriteConfigTo) == 0 { o.config.HealthzBindAddress = addressFromDeprecatedFlags(o.config.HealthzBindAddress, o.healthzPort) o.config.MetricsBindAddress = addressFromDeprecatedFlags(o.config.MetricsBindAddress, o.metricsPort) } // Load the config file here in Complete, so that Validate validates the fully-resolved config. if len(o.ConfigFile) > 0 { c, err := o.loadConfigFromFile(o.ConfigFile) if err != nil { return err } // Before we overwrite the config which holds the parsed // command line parameters, we need to copy all modified // logging settings over to the loaded config (i.e. logging // command line flags have priority). Otherwise `--config // ... -v=5` doesn't work (config resets verbosity even // when it contains no logging settings). _ = copyLogsFromFlags(fs, &c.Logging) o.config = c if err := o.initWatcher(); err != nil { return err } } else { o.processV1Alpha1Flags(fs) } o.platformApplyDefaults(o.config) if err := o.processHostnameOverrideFlag(); err != nil { return err } if err := utilfeature.DefaultMutableFeatureGate.SetFromMap(o.config.FeatureGates); err != nil { return err } if utilfeature.DefaultFeatureGate.Enabled(zpagesfeatures.ComponentFlagz) { nfs := cliflag.NamedFlagSets{ FlagSets: make(map[string]*pflag.FlagSet), } nfs.FlagSets["generic"] = fs o.flagz = flagz.NamedFlagSetsReader{ FlagSets: nfs, } } return nil } // copyLogsFromFlags applies the logging flags from the given flag set to the given // configuration. Fields for which the corresponding flag was not used are left // unmodified. For fields that have multiple values (like vmodule), the values from // the flags get joined so that the command line flags have priority. // // TODO (pohly): move this to logsapi func copyLogsFromFlags(from *pflag.FlagSet, to *logsapi.LoggingConfiguration) error { var cloneFS pflag.FlagSet logsapi.AddFlags(to, &cloneFS) vmodule := to.VModule to.VModule = nil var err error cloneFS.VisitAll(func(f *pflag.Flag) { if err != nil { return } fsFlag := from.Lookup(f.Name) if fsFlag == nil { err = fmt.Errorf("logging flag %s not found in flag set", f.Name) return } if !fsFlag.Changed { return } if setErr := f.Value.Set(fsFlag.Value.String()); setErr != nil { err = fmt.Errorf("copying flag %s value: %w", f.Name, setErr) return } }) to.VModule = append(to.VModule, vmodule...) return err } // Creates a new filesystem watcher and adds watches for the config file. func (o *Options) initWatcher() error { fswatcher := filesystem.NewFsnotifyWatcher() err := fswatcher.Init(o.eventHandler, o.errorHandler) if err != nil { return err } err = fswatcher.AddWatch(o.ConfigFile) if err != nil { return err } o.watcher = fswatcher return nil } func (o *Options) eventHandler(ent fsnotify.Event) { if ent.Has(fsnotify.Write) || ent.Has(fsnotify.Rename) { // error out when ConfigFile is updated o.errCh <- fmt.Errorf("content of the proxy server's configuration file was updated") return } o.errCh <- nil } func (o *Options) errorHandler(err error) { o.errCh <- err } // processHostnameOverrideFlag processes hostname-override flag func (o *Options) processHostnameOverrideFlag() error { // Check if hostname-override flag is set and use value since configFile always overrides if len(o.hostnameOverride) > 0 { hostName := strings.TrimSpace(o.hostnameOverride) if len(hostName) == 0 { return fmt.Errorf("empty hostname-override is invalid") } o.config.HostnameOverride = strings.ToLower(hostName) } return nil } // processV1Alpha1Flags processes v1alpha1 flags which can't be directly mapped to internal config. func (o *Options) processV1Alpha1Flags(fs *pflag.FlagSet) { if fs.Changed("iptables-sync-period") && o.config.Mode != kubeproxyconfig.ProxyModeIPVS { o.config.SyncPeriod.Duration = o.iptablesSyncPeriod } if fs.Changed("iptables-min-sync-period") && o.config.Mode != kubeproxyconfig.ProxyModeIPVS { o.config.MinSyncPeriod.Duration = o.iptablesMinSyncPeriod } if fs.Changed("ipvs-sync-period") && o.config.Mode == kubeproxyconfig.ProxyModeIPVS { o.config.SyncPeriod.Duration = o.ipvsSyncPeriod } if fs.Changed("ipvs-min-sync-period") && o.config.Mode == kubeproxyconfig.ProxyModeIPVS { o.config.MinSyncPeriod.Duration = o.ipvsMinSyncPeriod } if fs.Changed("cluster-cidr") { o.config.DetectLocal.ClusterCIDRs = strings.Split(o.clusterCIDRs, ",") } } // Validate validates all the required options. func (o *Options) Validate() error { if errs := validation.Validate(o.config); len(errs) != 0 { return errs.ToAggregate() } return nil } // Run runs the specified ProxyServer. func (o *Options) Run(ctx context.Context) error { defer close(o.errCh) if len(o.WriteConfigTo) > 0 { return o.writeConfigFile() } err := platformCleanup(ctx, o.config.Mode, o.CleanupAndExit) if o.CleanupAndExit { return err } // We ignore err otherwise; the cleanup is best-effort, and the backends will have // logged messages if they failed in interesting ways. proxyServer, err := newProxyServer(ctx, o.config, o.master, o.InitAndExit, o.flagz) if err != nil { return err } if o.InitAndExit { return nil } o.proxyServer = proxyServer return o.runLoop(ctx) } // runLoop will watch on the update change of the proxy server's configuration file. // Return an error when updated func (o *Options) runLoop(ctx context.Context) error { if o.watcher != nil { o.watcher.Run() } // run the proxy in goroutine go func() { err := o.proxyServer.Run(ctx) o.errCh <- err }() for { err := <-o.errCh if err != nil { return err } } } func (o *Options) writeConfigFile() (err error) { const mediaType = runtime.ContentTypeYAML info, ok := runtime.SerializerInfoForMediaType(proxyconfigscheme.Codecs.SupportedMediaTypes(), mediaType) if !ok { return fmt.Errorf("unable to locate encoder -- %q is not a supported media type", mediaType) } encoder := proxyconfigscheme.Codecs.EncoderForVersion(info.Serializer, v1alpha1.SchemeGroupVersion) configFile, err := os.Create(o.WriteConfigTo) if err != nil { return err } defer func() { ferr := configFile.Close() if ferr != nil && err == nil { err = ferr } }() if err = encoder.Encode(o.config, configFile); err != nil { return err } o.logger.Info("Wrote configuration", "file", o.WriteConfigTo) return nil } // addressFromDeprecatedFlags returns server address from flags // passed on the command line based on the following rules: // 1. If port is 0, disable the server (e.g. set address to empty). // 2. Otherwise, set the port portion of the config accordingly. func addressFromDeprecatedFlags(addr string, port int32) string { if port == 0 { return "" } return proxyutil.AppendPortIfNeeded(addr, port) } // newLenientSchemeAndCodecs returns a scheme that has only v1alpha1 registered into // it and a CodecFactory with strict decoding disabled. func newLenientSchemeAndCodecs() (*runtime.Scheme, *serializer.CodecFactory, error) { lenientScheme := runtime.NewScheme() if err := kubeproxyconfig.AddToScheme(lenientScheme); err != nil { return nil, nil, fmt.Errorf("failed to add kube-proxy config API to lenient scheme: %w", err) } if err := kubeproxyconfigv1alpha1.AddToScheme(lenientScheme); err != nil { return nil, nil, fmt.Errorf("failed to add kube-proxy config v1alpha1 API to lenient scheme: %w", err) } lenientCodecs := serializer.NewCodecFactory(lenientScheme, serializer.DisableStrict) return lenientScheme, &lenientCodecs, nil } // loadConfigFromFile loads the contents of file and decodes it as a // KubeProxyConfiguration object. func (o *Options) loadConfigFromFile(file string) (*kubeproxyconfig.KubeProxyConfiguration, error) { data, err := os.ReadFile(file) if err != nil { return nil, err } return o.loadConfig(data) } // loadConfig decodes a serialized KubeProxyConfiguration to the internal type. func (o *Options) loadConfig(data []byte) (*kubeproxyconfig.KubeProxyConfiguration, error) { configObj, gvk, err := proxyconfigscheme.Codecs.UniversalDecoder().Decode(data, nil, nil) if err != nil { // Try strict decoding first. If that fails decode with a lenient // decoder, which has only v1alpha1 registered, and log a warning. // The lenient path is to be dropped when support for v1alpha1 is dropped. if !runtime.IsStrictDecodingError(err) { return nil, fmt.Errorf("failed to decode: %w", err) } _, lenientCodecs, lenientErr := newLenientSchemeAndCodecs() if lenientErr != nil { return nil, lenientErr } configObj, gvk, lenientErr = lenientCodecs.UniversalDecoder().Decode(data, nil, nil) if lenientErr != nil { // Lenient decoding failed with the current version, return the // original strict error. return nil, fmt.Errorf("failed lenient decoding: %w", err) } // Continue with the v1alpha1 object that was decoded leniently, but emit a warning. o.logger.Info("Using lenient decoding as strict decoding failed", "err", err) } proxyConfig, ok := configObj.(*kubeproxyconfig.KubeProxyConfiguration) if !ok { return nil, fmt.Errorf("got unexpected config type: %v", gvk) } return proxyConfig, nil }
go
github
https://github.com/kubernetes/kubernetes
cmd/kube-proxy/app/options.go
from __future__ import absolute_import from .packages.six.moves.http_client import ( IncompleteRead as httplib_IncompleteRead ) # Base Exceptions class HTTPError(Exception): "Base exception used by this module." pass class HTTPWarning(Warning): "Base warning used by this module." pass class PoolError(HTTPError): "Base exception for errors caused within a pool." def __init__(self, pool, message): self.pool = pool HTTPError.__init__(self, "%s: %s" % (pool, message)) def __reduce__(self): # For pickling purposes. return self.__class__, (None, None) class RequestError(PoolError): "Base exception for PoolErrors that have associated URLs." def __init__(self, pool, url, message): self.url = url PoolError.__init__(self, pool, message) def __reduce__(self): # For pickling purposes. return self.__class__, (None, self.url, None) class SSLError(HTTPError): "Raised when SSL certificate fails in an HTTPS connection." pass class ProxyError(HTTPError): "Raised when the connection to a proxy fails." pass class DecodeError(HTTPError): "Raised when automatic decoding based on Content-Type fails." pass class ProtocolError(HTTPError): "Raised when something unexpected happens mid-request/response." pass #: Renamed to ProtocolError but aliased for backwards compatibility. ConnectionError = ProtocolError # Leaf Exceptions class MaxRetryError(RequestError): """Raised when the maximum number of retries is exceeded. :param pool: The connection pool :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` :param string url: The requested Url :param exceptions.Exception reason: The underlying error """ def __init__(self, pool, url, reason=None): self.reason = reason message = "Max retries exceeded with url: %s (Caused by %r)" % ( url, reason) RequestError.__init__(self, pool, url, message) class HostChangedError(RequestError): "Raised when an existing pool gets a request for a foreign host." def __init__(self, pool, url, retries=3): message = "Tried to open a foreign host with url: %s" % url RequestError.__init__(self, pool, url, message) self.retries = retries class TimeoutStateError(HTTPError): """ Raised when passing an invalid state to a timeout """ pass class TimeoutError(HTTPError): """ Raised when a socket timeout error occurs. Catching this error will catch both :exc:`ReadTimeoutErrors <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. """ pass class ReadTimeoutError(TimeoutError, RequestError): "Raised when a socket timeout occurs while receiving data from a server" pass # This timeout error does not have a URL attached and needs to inherit from the # base HTTPError class ConnectTimeoutError(TimeoutError): "Raised when a socket timeout occurs while connecting to a server" pass class NewConnectionError(ConnectTimeoutError, PoolError): "Raised when we fail to establish a new connection. Usually ECONNREFUSED." pass class EmptyPoolError(PoolError): "Raised when a pool runs out of connections and no more are allowed." pass class ClosedPoolError(PoolError): "Raised when a request enters a pool after the pool has been closed." pass class LocationValueError(ValueError, HTTPError): "Raised when there is something wrong with a given URL input." pass class LocationParseError(LocationValueError): "Raised when get_host or similar fails to parse the URL input." def __init__(self, location): message = "Failed to parse: %s" % location HTTPError.__init__(self, message) self.location = location class ResponseError(HTTPError): "Used as a container for an error reason supplied in a MaxRetryError." GENERIC_ERROR = 'too many error responses' SPECIFIC_ERROR = 'too many {status_code} error responses' class SecurityWarning(HTTPWarning): "Warned when performing security reducing actions" pass class SubjectAltNameWarning(SecurityWarning): "Warned when connecting to a host with a certificate missing a SAN." pass class InsecureRequestWarning(SecurityWarning): "Warned when making an unverified HTTPS request." pass class SystemTimeWarning(SecurityWarning): "Warned when system time is suspected to be wrong" pass class InsecurePlatformWarning(SecurityWarning): "Warned when certain SSL configuration is not available on a platform." pass class SNIMissingWarning(HTTPWarning): "Warned when making a HTTPS request without SNI available." pass class DependencyWarning(HTTPWarning): """ Warned when an attempt is made to import a module with missing optional dependencies. """ pass class ResponseNotChunked(ProtocolError, ValueError): "Response needs to be chunked in order to read it as chunks." pass class BodyNotHttplibCompatible(HTTPError): """ Body should be httplib.HTTPResponse like (have an fp attribute which returns raw chunks) for read_chunked(). """ pass class IncompleteRead(HTTPError, httplib_IncompleteRead): """ Response length doesn't match expected Content-Length Subclass of http_client.IncompleteRead to allow int value for `partial` to avoid creating large objects on streamed reads. """ def __init__(self, partial, expected): super(IncompleteRead, self).__init__(partial, expected) def __repr__(self): return ('IncompleteRead(%i bytes read, ' '%i more expected)' % (self.partial, self.expected)) class InvalidHeader(HTTPError): "The header provided was somehow invalid." pass class ProxySchemeUnknown(AssertionError, ValueError): "ProxyManager does not support the supplied scheme" # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. def __init__(self, scheme): message = "Not supported proxy scheme %s" % scheme super(ProxySchemeUnknown, self).__init__(message) class HeaderParsingError(HTTPError): "Raised by assert_header_parsing, but we convert it to a log.warning statement." def __init__(self, defects, unparsed_data): message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) super(HeaderParsingError, self).__init__(message) class UnrewindableBodyError(HTTPError): "urllib3 encountered an error when trying to rewind a body" pass
unknown
codeparrot/codeparrot-clean
import { A, B, C } from "./inner"; function x(type) { switch (type) { case "a": return withA("b"); case "b": return withB("c"); case "c": return "ok"; } } function y(v) { return withA(v); } function withA(v) { const value = x(v); return A(value); } function withB(v) { const value = x(v); return B(value); } function withC(v) { const value = x(v); return C(value); } export { x, y };
javascript
github
https://github.com/webpack/webpack
test/cases/inner-graph/circular/module.js
//@flow const foo = undefined; component C(...{scope = foo ?? null}: any) { return scope; } export const FIXTURE_ENTRYPOINT = { fn: C, params: [{scope: undefined}], };
javascript
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/logical-reorder.flow.js
<table border="1" class="dataframe"> <thead> <tr> <th></th> <th></th> <th></th> <th colspan="2" halign="left">a</th> <th>...</th> <th colspan="2" halign="left">b</th> </tr> <tr> <th></th> <th></th> <th></th> <th colspan="2" halign="left">c</th> <th>...</th> <th colspan="2" halign="left">d</th> </tr> <tr> <th></th> <th></th> <th></th> <th>e</th> <th>f</th> <th>...</th> <th>e</th> <th>f</th> </tr> <tr> <th>foo</th> <th></th> <th>baz</th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th rowspan="2" valign="top">a</th> <th rowspan="2" valign="top">c</th> <th>e</th> <td>0</td> <td>1</td> <td>...</td> <td>6</td> <td>7</td> </tr> <tr> <th>f</th> <td>8</td> <td>9</td> <td>...</td> <td>14</td> <td>15</td> </tr> <tr> <th>...</th> <th>...</th> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th rowspan="2" valign="top">b</th> <th rowspan="2" valign="top">d</th> <th>e</th> <td>48</td> <td>49</td> <td>...</td> <td>54</td> <td>55</td> </tr> <tr> <th>f</th> <td>56</td> <td>57</td> <td>...</td> <td>62</td> <td>63</td> </tr> </tbody> </table>
html
github
https://github.com/pandas-dev/pandas
pandas/tests/io/formats/data/html/trunc_df_index_named_multi_columns_unnamed_multi.html
// #docplaster import {Component, signal} from '@angular/core'; @Component({ selector: 'app-leave-binding', templateUrl: 'leave-binding.html', styleUrls: ['leave-binding.css'], }) export class LeaveBinding { isShown = signal(false); toggle() { this.isShown.update((isShown) => !isShown); } farewell = signal('leaving'); }
typescript
github
https://github.com/angular/angular
adev/src/content/examples/animations/src/app/enter-and-leave/leave-binding.ts
// Copyright 2021 The Abseil Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_STRINGS_CORDZ_TEST_HELPERS_H_ #define ABSL_STRINGS_CORDZ_TEST_HELPERS_H_ #include <utility> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/config.h" #include "absl/base/macros.h" #include "absl/strings/cord.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cordz_info.h" #include "absl/strings/internal/cordz_sample_token.h" #include "absl/strings/internal/cordz_statistics.h" #include "absl/strings/internal/cordz_update_tracker.h" #include "absl/strings/str_cat.h" namespace absl { ABSL_NAMESPACE_BEGIN // Returns the CordzInfo for the cord, or nullptr if the cord is not sampled. inline const cord_internal::CordzInfo* GetCordzInfoForTesting( const Cord& cord) { if (!cord.contents_.is_tree()) return nullptr; return cord.contents_.cordz_info(); } // Returns true if the provided cordz_info is in the list of sampled cords. inline bool CordzInfoIsListed(const cord_internal::CordzInfo* cordz_info, cord_internal::CordzSampleToken token = {}) { for (const cord_internal::CordzInfo& info : token) { if (cordz_info == &info) return true; } return false; } // Matcher on Cord that verifies all of: // - the cord is sampled // - the CordzInfo of the cord is listed / discoverable. // - the reported CordzStatistics match the cord's actual properties // - the cord has an (initial) UpdateTracker count of 1 for `method` MATCHER_P(HasValidCordzInfoOf, method, "CordzInfo matches cord") { const cord_internal::CordzInfo* cord_info = GetCordzInfoForTesting(arg); if (cord_info == nullptr) { *result_listener << "cord is not sampled"; return false; } if (!CordzInfoIsListed(cord_info)) { *result_listener << "cord is sampled, but not listed"; return false; } cord_internal::CordzStatistics stat = cord_info->GetCordzStatistics(); if (stat.size != arg.size()) { *result_listener << "cordz size " << stat.size << " does not match cord size " << arg.size(); return false; } if (stat.update_tracker.Value(method) != 1) { *result_listener << "Expected method count 1 for " << method << ", found " << stat.update_tracker.Value(method); return false; } return true; } // Matcher on Cord that verifies that the cord is sampled and that the CordzInfo // update tracker has 'method' with a call count of 'n' MATCHER_P2(CordzMethodCountEq, method, n, absl::StrCat("CordzInfo method count equals ", n)) { const cord_internal::CordzInfo* cord_info = GetCordzInfoForTesting(arg); if (cord_info == nullptr) { *result_listener << "cord is not sampled"; return false; } cord_internal::CordzStatistics stat = cord_info->GetCordzStatistics(); if (stat.update_tracker.Value(method) != n) { *result_listener << "Expected method count " << n << " for " << method << ", found " << stat.update_tracker.Value(method); return false; } return true; } // Cordz will only update with a new rate once the previously scheduled event // has fired. When we disable Cordz, a long delay takes place where we won't // consider profiling new Cords. CordzSampleIntervalHelper will burn through // that interval and allow for testing that assumes that the average sampling // interval is a particular value. class CordzSamplingIntervalHelper { public: explicit CordzSamplingIntervalHelper(int32_t interval) : orig_mean_interval_(absl::cord_internal::get_cordz_mean_interval()) { absl::cord_internal::set_cordz_mean_interval(interval); absl::cord_internal::cordz_set_next_sample_for_testing(interval); } ~CordzSamplingIntervalHelper() { absl::cord_internal::set_cordz_mean_interval(orig_mean_interval_); absl::cord_internal::cordz_set_next_sample_for_testing(orig_mean_interval_); } private: int32_t orig_mean_interval_; }; // Wrapper struct managing a small CordRep `rep` struct TestCordRep { cord_internal::CordRepFlat* rep; TestCordRep() { rep = cord_internal::CordRepFlat::New(100); rep->length = 100; memset(rep->Data(), 1, 100); } ~TestCordRep() { cord_internal::CordRep::Unref(rep); } }; // Wrapper struct managing a small CordRep `rep`, and // an InlineData `data` initialized with that CordRep. struct TestCordData { TestCordRep rep; cord_internal::InlineData data{rep.rep}; }; // Creates a Cord that is not sampled template <typename... Args> Cord UnsampledCord(Args... args) { CordzSamplingIntervalHelper never(9999); Cord cord(std::forward<Args>(args)...); ABSL_ASSERT(GetCordzInfoForTesting(cord) == nullptr); return cord; } ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_STRINGS_CORDZ_TEST_HELPERS_H_
c
github
https://github.com/mysql/mysql-server
extra/abseil/abseil-cpp-20230802.1/absl/strings/cordz_test_helpers.h
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.ScatteringByteChannel; import java.util.stream.IntStream; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; public class NetworkReceiveTest { @Test public void testBytesRead() throws IOException { NetworkReceive receive = new NetworkReceive(128, "0"); assertEquals(0, receive.bytesRead()); ScatteringByteChannel channel = Mockito.mock(ScatteringByteChannel.class); ArgumentCaptor<ByteBuffer> bufferCaptor = ArgumentCaptor.forClass(ByteBuffer.class); Mockito.when(channel.read(bufferCaptor.capture())).thenAnswer(invocation -> { bufferCaptor.getValue().putInt(128); return 4; }).thenReturn(0); assertEquals(4, receive.readFrom(channel)); assertEquals(4, receive.bytesRead()); assertFalse(receive.complete()); Mockito.reset(channel); Mockito.when(channel.read(bufferCaptor.capture())).thenAnswer(invocation -> { bufferCaptor.getValue().put(TestUtils.randomBytes(64)); return 64; }); assertEquals(64, receive.readFrom(channel)); assertEquals(68, receive.bytesRead()); assertFalse(receive.complete()); Mockito.reset(channel); Mockito.when(channel.read(bufferCaptor.capture())).thenAnswer(invocation -> { bufferCaptor.getValue().put(TestUtils.randomBytes(64)); return 64; }); assertEquals(64, receive.readFrom(channel)); assertEquals(132, receive.bytesRead()); assertTrue(receive.complete()); } @Test public void testRequiredMemoryAmountKnownWhenNotSet() { NetworkReceive receive = new NetworkReceive("0"); assertFalse(receive.requiredMemoryAmountKnown(), "Memory amount should not be known before read."); } @Test public void testRequiredMemoryAmountKnownWhenSet() throws IOException { NetworkReceive receive = new NetworkReceive(128, "0"); ScatteringByteChannel channel = Mockito.mock(ScatteringByteChannel.class); ArgumentCaptor<ByteBuffer> bufferCaptor = ArgumentCaptor.forClass(ByteBuffer.class); Mockito.when(channel.read(bufferCaptor.capture())).thenAnswer(invocation -> { bufferCaptor.getValue().putInt(64); return 4; }); receive.readFrom(channel); assertTrue(receive.requiredMemoryAmountKnown(), "Memory amount should be known after read."); } @Test public void testSizeWithPredefineBuffer() { int payloadSize = 8; int expectedTotalSize = 4 + payloadSize; // 4 bytes for size buffer + payload size ByteBuffer payloadBuffer = ByteBuffer.allocate(payloadSize); IntStream.range(0, payloadSize).forEach(i -> payloadBuffer.put((byte) i)); NetworkReceive networkReceive = new NetworkReceive("0", payloadBuffer); assertEquals(expectedTotalSize, networkReceive.size(), "The total size should be the sum of the size buffer and payload."); } @Test public void testSizeAfterRead() throws IOException { int payloadSize = 32; int expectedTotalSize = 4 + payloadSize; // 4 bytes for size buffer + payload size NetworkReceive receive = new NetworkReceive(128, "0"); ScatteringByteChannel channel = Mockito.mock(ScatteringByteChannel.class); ArgumentCaptor<ByteBuffer> bufferCaptor = ArgumentCaptor.forClass(ByteBuffer.class); Mockito.when(channel.read(bufferCaptor.capture())).thenAnswer(invocation -> { bufferCaptor.getValue().putInt(payloadSize); return 4; }); receive.readFrom(channel); assertEquals(expectedTotalSize, receive.size(), "The total size should be the sum of the size buffer and receive size."); } }
java
github
https://github.com/apache/kafka
clients/src/test/java/org/apache/kafka/common/network/NetworkReceiveTest.java
# Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.common.watchlist.watchlistrule import WatchListRule class WatchListRuleTest(unittest.TestCase): def test_instruction_list(self): instructions = ['a', 'b'] rule = WatchListRule('definition1', instructions[:]) self.assertEqual(instructions, rule.instructions()) def test_remove_instruction(self): instructions = ['a', 'b'] rule = WatchListRule('definition1', instructions[:]) rule.remove_instruction('b') self.assertEqual(['a'], rule.instructions()) def test_simple_definition(self): definition_name = 'definition1' rule = WatchListRule(definition_name, []) self.assertTrue(rule.match([definition_name])) self.assertFalse(rule.match([definition_name + '1'])) def test_complex_definition(self): definition_name1 = 'definition1' definition_name2 = 'definition2' definition_name3 = 'definition3' rule = WatchListRule(definition_name1 + '|' + definition_name2 + '|' + definition_name3, []) self.assertTrue(rule.match([definition_name1])) self.assertTrue(rule.match([definition_name2])) self.assertTrue(rule.match([definition_name3])) self.assertFalse(rule.match([definition_name1 + '1'])) self.assertFalse(rule.match([definition_name2 + '1'])) self.assertFalse(rule.match([definition_name3 + '1']))
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.common.net.buildbot import Build from webkitpy.common.net.failuremap import * from webkitpy.common.net.regressionwindow import RegressionWindow from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder class FailureMapTest(unittest.TestCase): builder1 = MockBuilder("Builder1") builder2 = MockBuilder("Builder2") build1a = Build(builder1, build_number=22, revision=1233, is_green=True) build1b = Build(builder1, build_number=23, revision=1234, is_green=False) build2a = Build(builder2, build_number=89, revision=1233, is_green=True) build2b = Build(builder2, build_number=90, revision=1235, is_green=False) regression_window1 = RegressionWindow(build1a, build1b, failing_tests=[u'test1', u'test1']) regression_window2 = RegressionWindow(build2a, build2b, failing_tests=[u'test1']) def _make_failure_map(self): failure_map = FailureMap() failure_map.add_regression_window(self.builder1, self.regression_window1) failure_map.add_regression_window(self.builder2, self.regression_window2) return failure_map def test_failing_revisions(self): failure_map = self._make_failure_map() self.assertEqual(failure_map.failing_revisions(), [1234, 1235]) def test_new_failures(self): failure_map = self._make_failure_map() failure_map.filter_out_old_failures(lambda revision: False) self.assertEqual(failure_map.failing_revisions(), [1234, 1235]) def test_new_failures_with_old_revisions(self): failure_map = self._make_failure_map() failure_map.filter_out_old_failures(lambda revision: revision == 1234) self.assertEqual(failure_map.failing_revisions(), []) def test_new_failures_with_more_old_revisions(self): failure_map = self._make_failure_map() failure_map.filter_out_old_failures(lambda revision: revision == 1235) self.assertEqual(failure_map.failing_revisions(), [1234]) def test_tests_failing_for(self): failure_map = self._make_failure_map() self.assertEqual(failure_map.tests_failing_for(1234), [u'test1']) def test_failing_tests(self): failure_map = self._make_failure_map() self.assertEqual(failure_map.failing_tests(), set([u'test1']))
unknown
codeparrot/codeparrot-clean
"""Test the Cloudflare config flow.""" from pycfdns.exceptions import ( CloudflareAuthenticationException, CloudflareConnectionException, CloudflareZoneException, ) from homeassistant.components.cloudflare.const import CONF_RECORDS, DOMAIN from homeassistant.config_entries import SOURCE_USER from homeassistant.const import CONF_API_TOKEN, CONF_SOURCE, CONF_ZONE from homeassistant.data_entry_flow import ( RESULT_TYPE_ABORT, RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM, ) from homeassistant.setup import async_setup_component from . import ( ENTRY_CONFIG, USER_INPUT, USER_INPUT_RECORDS, USER_INPUT_ZONE, _patch_async_setup_entry, ) from tests.common import MockConfigEntry async def test_user_form(hass, cfupdate_flow): """Test we get the user initiated form.""" await async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: SOURCE_USER} ) assert result["type"] == RESULT_TYPE_FORM assert result["step_id"] == "user" assert result["errors"] == {} result = await hass.config_entries.flow.async_configure( result["flow_id"], USER_INPUT, ) await hass.async_block_till_done() assert result["type"] == RESULT_TYPE_FORM assert result["step_id"] == "zone" assert result["errors"] == {} result = await hass.config_entries.flow.async_configure( result["flow_id"], USER_INPUT_ZONE, ) await hass.async_block_till_done() assert result["type"] == RESULT_TYPE_FORM assert result["step_id"] == "records" assert result["errors"] == {} with _patch_async_setup_entry() as mock_setup_entry: result = await hass.config_entries.flow.async_configure( result["flow_id"], USER_INPUT_RECORDS, ) await hass.async_block_till_done() assert result["type"] == RESULT_TYPE_CREATE_ENTRY assert result["title"] == USER_INPUT_ZONE[CONF_ZONE] assert result["data"] assert result["data"][CONF_API_TOKEN] == USER_INPUT[CONF_API_TOKEN] assert result["data"][CONF_ZONE] == USER_INPUT_ZONE[CONF_ZONE] assert result["data"][CONF_RECORDS] == USER_INPUT_RECORDS[CONF_RECORDS] assert result["result"] assert result["result"].unique_id == USER_INPUT_ZONE[CONF_ZONE] assert len(mock_setup_entry.mock_calls) == 1 async def test_user_form_cannot_connect(hass, cfupdate_flow): """Test we handle cannot connect error.""" instance = cfupdate_flow.return_value result = await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: SOURCE_USER} ) instance.get_zones.side_effect = CloudflareConnectionException() result = await hass.config_entries.flow.async_configure( result["flow_id"], USER_INPUT, ) assert result["type"] == RESULT_TYPE_FORM assert result["errors"] == {"base": "cannot_connect"} async def test_user_form_invalid_auth(hass, cfupdate_flow): """Test we handle invalid auth error.""" instance = cfupdate_flow.return_value result = await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: SOURCE_USER} ) instance.get_zones.side_effect = CloudflareAuthenticationException() result = await hass.config_entries.flow.async_configure( result["flow_id"], USER_INPUT, ) assert result["type"] == RESULT_TYPE_FORM assert result["errors"] == {"base": "invalid_auth"} async def test_user_form_invalid_zone(hass, cfupdate_flow): """Test we handle invalid zone error.""" instance = cfupdate_flow.return_value result = await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: SOURCE_USER} ) instance.get_zones.side_effect = CloudflareZoneException() result = await hass.config_entries.flow.async_configure( result["flow_id"], USER_INPUT, ) assert result["type"] == RESULT_TYPE_FORM assert result["errors"] == {"base": "invalid_zone"} async def test_user_form_unexpected_exception(hass, cfupdate_flow): """Test we handle unexpected exception.""" instance = cfupdate_flow.return_value result = await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: SOURCE_USER} ) instance.get_zones.side_effect = Exception() result = await hass.config_entries.flow.async_configure( result["flow_id"], USER_INPUT, ) assert result["type"] == RESULT_TYPE_FORM assert result["errors"] == {"base": "unknown"} async def test_user_form_single_instance_allowed(hass): """Test that configuring more than one instance is rejected.""" entry = MockConfigEntry(domain=DOMAIN, data=ENTRY_CONFIG) entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data=USER_INPUT, ) assert result["type"] == RESULT_TYPE_ABORT assert result["reason"] == "single_instance_allowed"
unknown
codeparrot/codeparrot-clean
// // Copyright 2018 Stefan Seefeld // Copyright 2005-2007 Adobe Systems Incorporated // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // #ifndef BOOST_GIL_HPP #define BOOST_GIL_HPP #include <boost/gil/algorithm.hpp> #include <boost/gil/bit_aligned_pixel_iterator.hpp> #include <boost/gil/bit_aligned_pixel_reference.hpp> #include <boost/gil/channel.hpp> #include <boost/gil/channel_algorithm.hpp> #include <boost/gil/cmyk.hpp> #include <boost/gil/color_base.hpp> #include <boost/gil/color_base_algorithm.hpp> #include <boost/gil/color_convert.hpp> #include <boost/gil/concepts.hpp> #include <boost/gil/device_n.hpp> #include <boost/gil/dynamic_step.hpp> #include <boost/gil/gray.hpp> #include <boost/gil/histogram.hpp> #include <boost/gil/image.hpp> #include <boost/gil/image_view.hpp> #include <boost/gil/image_view_factory.hpp> #include <boost/gil/iterator_from_2d.hpp> #include <boost/gil/locator.hpp> #include <boost/gil/metafunctions.hpp> #include <boost/gil/packed_pixel.hpp> #include <boost/gil/pixel.hpp> #include <boost/gil/pixel_iterator.hpp> #include <boost/gil/pixel_iterator_adaptor.hpp> #include <boost/gil/planar_pixel_iterator.hpp> #include <boost/gil/planar_pixel_reference.hpp> #include <boost/gil/point.hpp> #include <boost/gil/position_iterator.hpp> #include <boost/gil/premultiply.hpp> #include <boost/gil/promote_integral.hpp> #include <boost/gil/extension/rasterization/circle.hpp> #include <boost/gil/extension/rasterization/ellipse.hpp> #include <boost/gil/extension/rasterization/line.hpp> #include <boost/gil/rgb.hpp> #include <boost/gil/rgba.hpp> #include <boost/gil/step_iterator.hpp> #include <boost/gil/typedefs.hpp> #include <boost/gil/utilities.hpp> #include <boost/gil/virtual_locator.hpp> #include <boost/gil/image_processing/adaptive_histogram_equalization.hpp> #include "boost/gil/extension/image_processing/diffusion.hpp" #include <boost/gil/image_processing/filter.hpp> #include <boost/gil/image_processing/harris.hpp> #include <boost/gil/image_processing/hessian.hpp> #include <boost/gil/image_processing/histogram_equalization.hpp> #include <boost/gil/image_processing/histogram_matching.hpp> #include "boost/gil/extension/image_processing/hough_parameter.hpp" #include "boost/gil/extension/image_processing/hough_transform.hpp" #include <boost/gil/image_processing/morphology.hpp> #include <boost/gil/image_processing/numeric.hpp> #include <boost/gil/image_processing/scaling.hpp> #include <boost/gil/image_processing/threshold.hpp> #endif
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/gil.hpp
/* * Copyright (c) 2017 Mockito contributors * This program is made available under the terms of the MIT License. */ package org.mockitoutil; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.Assert.fail; import static org.mockitoutil.ClassLoaders.currentClassLoader; import static org.mockitoutil.ClassLoaders.excludingClassLoader; import static org.mockitoutil.ClassLoaders.isolatedClassLoader; import static org.mockitoutil.ClassLoaders.jdkClassLoader; import java.util.concurrent.atomic.AtomicBoolean; import org.assertj.core.api.Assertions; import org.junit.Test; import org.mockito.Mockito; public class ClassLoadersTest { public static final String CLASS_NAME_DEPENDING_ON_INTERFACE = "org.mockitoutil.ClassLoadersTest$ClassUsingInterface1"; public static final String INTERFACE_NAME = "org.mockitoutil.ClassLoadersTest$Interface1"; @Test public void isolated_class_loader_cannot_load_classes_when_no_given_prefix() { // given ClassLoader cl = isolatedClassLoader().build(); // when / then assertThatThrownBy( () -> { cl.loadClass("org.mockito.Mockito"); }) .isInstanceOf(ClassNotFoundException.class) .hasMessage("Can only load classes with prefixes : [], but not : []"); } @Test public void isolated_class_loader_cannot_load_classes_if_no_code_source_path() { // given ClassLoader cl = isolatedClassLoader().withPrivateCopyOf(CLASS_NAME_DEPENDING_ON_INTERFACE).build(); // when try { cl.loadClass(CLASS_NAME_DEPENDING_ON_INTERFACE); fail(); } catch (ClassNotFoundException e) { // then assertThat(e).hasMessageContaining(CLASS_NAME_DEPENDING_ON_INTERFACE); } } @Test public void isolated_class_loader_cannot_load_classes_if_dependent_classes_do_not_match_the_prefixes() throws Exception { // given ClassLoader cl = isolatedClassLoader() .withCurrentCodeSourceUrls() .withPrivateCopyOf(CLASS_NAME_DEPENDING_ON_INTERFACE) .build(); // when try { cl.loadClass(CLASS_NAME_DEPENDING_ON_INTERFACE); fail(); } catch (NoClassDefFoundError e) { // then assertThat(e).hasMessageContaining("org/mockitoutil/ClassLoadersTest$Interface1"); } } @Test public void isolated_class_loader_can_load_classes_when_dependent_classes_are_matching_the_prefixes() throws Exception { // given ClassLoader cl = isolatedClassLoader() .withCurrentCodeSourceUrls() .withPrivateCopyOf(CLASS_NAME_DEPENDING_ON_INTERFACE) .withPrivateCopyOf(INTERFACE_NAME) .build(); // when Class<?> aClass = cl.loadClass(CLASS_NAME_DEPENDING_ON_INTERFACE); // then assertThat(aClass).isNotNull(); assertThat(aClass.getClassLoader()).isEqualTo(cl); assertThat(aClass.getInterfaces()[0].getClassLoader()).isEqualTo(cl); } @Test public void isolated_class_loader_can_load_classes_isolated_classes_in_isolation() throws Exception { // given ClassLoader cl = isolatedClassLoader() .withCurrentCodeSourceUrls() .withPrivateCopyOf(ClassLoadersTest.class.getPackage().getName()) .build(); // when Class<?> aClass = cl.loadClass(AClass.class.getName()); // then assertThat(aClass).isNotNull(); assertThat(aClass).isNotSameAs(AClass.class); assertThat(aClass.getClassLoader()).isEqualTo(cl); } @Test public void isolated_class_loader_cannot_load_classes_if_prefix_excluded() { // given ClassLoader cl = isolatedClassLoader() .withCurrentCodeSourceUrls() .withPrivateCopyOf(ClassLoadersTest.class.getPackage().getName()) .without(AClass.class.getName()) .build(); // when try { cl.loadClass(AClass.class.getName()); fail(); } catch (ClassNotFoundException e) { // then assertThat(e) .hasMessageContaining("org.mockitoutil") .hasMessageContaining(AClass.class.getName()); } } @Test public void isolated_class_loader_has_no_parent() { ClassLoader cl = isolatedClassLoader() .withCurrentCodeSourceUrls() .withPrivateCopyOf(CLASS_NAME_DEPENDING_ON_INTERFACE) .withPrivateCopyOf(INTERFACE_NAME) .build(); assertThat(cl.getParent()).isNull(); } @Test public void excluding_class_loader_cannot_load_classes_when_no_correct_source_url_set() { // given ClassLoader cl = excludingClassLoader().withCodeSourceUrlOf(this.getClass()).build(); // when / then assertThatThrownBy( () -> { cl.loadClass("org.mockito.Mockito"); }) .isInstanceOf(ClassNotFoundException.class) .hasMessage("org.mockito.Mockito"); } @Test public void excluding_class_loader_can_load_classes_when_correct_source_url_set() throws Exception { // given ClassLoader cl = excludingClassLoader().withCodeSourceUrlOf(Mockito.class).build(); // when cl.loadClass("org.mockito.Mockito"); // then class successfully loaded } @Test public void excluding_class_loader_cannot_load_class_when_excluded_prefix_match_class_to_load() throws Exception { // given ClassLoader cl = excludingClassLoader() .withCodeSourceUrlOf(Mockito.class) .without("org.mockito.BDDMockito") .build(); cl.loadClass("org.mockito.Mockito"); // when try { cl.loadClass("org.mockito.BDDMockito"); fail("should have raise a ClassNotFoundException"); } catch (ClassNotFoundException e) { assertThat(e.getMessage()).contains("org.mockito.BDDMockito"); } // then class successfully loaded } @Test public void can_not_load_a_class_not_previously_registered_in_builder() { // given ClassLoader cl = ClassLoaders.inMemoryClassLoader() .withClassDefinition( "yop.Dude", SimpleClassGenerator.makeMarkerInterface("yop.Dude")) .build(); // when try { cl.loadClass("not.Defined"); fail(); } catch (ClassNotFoundException e) { // then assertThat(e.getMessage()).contains("not.Defined"); } } @Test public void can_load_a_class_in_memory_from_bytes() throws Exception { // given ClassLoader cl = ClassLoaders.inMemoryClassLoader() .withClassDefinition( "yop.Dude", SimpleClassGenerator.makeMarkerInterface("yop.Dude")) .build(); // when Class<?> aClass = cl.loadClass("yop.Dude"); // then assertThat(aClass).isNotNull(); assertThat(aClass.getClassLoader()).isEqualTo(cl); assertThat(aClass.getName()).isEqualTo("yop.Dude"); } @Test public void cannot_load_a_class_file_not_in_parent() throws Exception { // given ClassLoader cl = ClassLoaders.inMemoryClassLoader().withParent(jdkClassLoader()).build(); cl.loadClass("java.lang.String"); try { // when cl.loadClass("org.mockito.Mockito"); fail("should have not found Mockito class"); } catch (ClassNotFoundException e) { // then assertThat(e.getMessage()).contains("org.mockito.Mockito"); } } @Test public void can_list_all_classes_reachable_in_a_classloader() throws Exception { ClassLoader classLoader = ClassLoaders.inMemoryClassLoader() .withParent(jdkClassLoader()) .withClassDefinition("a.A", SimpleClassGenerator.makeMarkerInterface("a.A")) .withClassDefinition( "a.b.B", SimpleClassGenerator.makeMarkerInterface("a.b.B")) .withClassDefinition("c.C", SimpleClassGenerator.makeMarkerInterface("c.C")) // .withCodeSourceUrlOf(ClassLoaders.class) .build(); assertThat(ClassLoaders.in(classLoader).listOwnedClasses()) .containsOnly("a.A", "a.b.B", "c.C"); assertThat(ClassLoaders.in(classLoader).omit("b", "c").listOwnedClasses()) .containsOnly("a.A"); } @Test public void return_bootstrap_classloader() { assertThat(jdkClassLoader()).isNotEqualTo(Mockito.class.getClassLoader()); assertThat(jdkClassLoader()).isNotEqualTo(ClassLoaders.class.getClassLoader()); assertThat(jdkClassLoader()).isEqualTo(Number.class.getClassLoader()); assertThat(jdkClassLoader()).isEqualTo(null); } @Test public void return_current_classloader() { assertThat(currentClassLoader()).isEqualTo(this.getClass().getClassLoader()); } @Test public void can_run_in_given_classloader() throws Exception { // given final ClassLoader cl = isolatedClassLoader() .withCurrentCodeSourceUrls() .withCodeSourceUrlOf(Assertions.class) .withPrivateCopyOf("org.assertj.core") .withPrivateCopyOf(ClassLoadersTest.class.getPackage().getName()) .without(AClass.class.getName()) .build(); final AtomicBoolean executed = new AtomicBoolean(false); // when ClassLoaders.using(cl) .execute( new Runnable() { @Override public void run() { assertThat(this.getClass().getClassLoader()) .describedAs("runnable is reloaded in given classloader") .isEqualTo(cl); assertThat(Thread.currentThread().getContextClassLoader()) .describedAs( "Thread context classloader is using given classloader") .isEqualTo(cl); try { assertThat( Thread.currentThread() .getContextClassLoader() .loadClass("java.lang.String")) .describedAs("can load JDK type") .isNotNull(); assertThat( Thread.currentThread() .getContextClassLoader() .loadClass( "org.mockitoutil.ClassLoadersTest$ClassUsingInterface1")) .describedAs("can load classloader types") .isNotNull(); } catch (ClassNotFoundException cnfe) { Assertions.fail("should not have raised a CNFE", cnfe); } executed.set(true); } }); // then assertThat(executed.get()).isEqualTo(true); } @Test public void cannot_load_runnable_in_given_classloader_if_some_type_cant_be_loaded() throws Exception { // given final ClassLoader cl = isolatedClassLoader() .withCurrentCodeSourceUrls() .withPrivateCopyOf(ClassLoadersTest.class.getPackage().getName()) .without(AClass.class.getName()) .build(); // when try { ClassLoaders.using(cl) .execute( new Runnable() { @Override public void run() { AClass cant_be_found = new AClass(); } }); Assertions.fail("should have raised a ClassNotFoundException"); } catch (IllegalStateException ise) { // then assertThat(ise) .hasCauseInstanceOf(NoClassDefFoundError.class) .hasMessageContaining("AClass"); } } @SuppressWarnings("unused") static class AClass {} @SuppressWarnings("unused") static class ClassUsingInterface1 implements Interface1 {} @SuppressWarnings("unused") interface Interface1 {} }
java
github
https://github.com/mockito/mockito
mockito-core/src/testFixtures/java/org/mockitoutil/ClassLoadersTest.java
//===--- InputFile.h --------------------------------------------*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #ifndef SWIFT_FRONTEND_INPUTFILE_H #define SWIFT_FRONTEND_INPUTFILE_H #include "swift/Basic/FileTypes.h" #include "swift/Basic/PrimarySpecificPaths.h" #include "swift/Basic/SupplementaryOutputPaths.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include <string> namespace swift { /// An \c InputFile encapsulates information about an input passed to the /// frontend. /// /// Compiler inputs are usually passed on the command line without a leading /// flag. However, there are clients that use the \c CompilerInvocation as /// a library like LLDB and SourceKit that generate their own \c InputFile /// instances programmatically. Note that an \c InputFile need not actually be /// backed by a physical file, nor does its file name actually reflect its /// contents. \c InputFile has a constructor that will try to figure out the file /// type from the file name if none is provided, but many clients that /// construct \c InputFile instances themselves may provide bogus file names /// with pre-computed kinds. It is imperative that \c InputFile::getType be used /// as a source of truth for this information. /// /// \warning \c InputFile takes an unfortunately lax view of the ownership of /// its primary data. It currently only owns the file name and a copy of any /// assigned \c PrimarySpecificPaths outright. It is the responsibility of the /// caller to ensure that an associated memory buffer outlives the \c InputFile. class InputFile final { std::string Filename; file_types::ID FileID; llvm::PointerIntPair<llvm::MemoryBuffer *, 1, bool> BufferAndIsPrimary; PrimarySpecificPaths PSPs; public: /// Constructs an input file from the provided data. /// /// \warning This entrypoint infers the type of the file from its extension /// and is therefore not suitable for most clients that use files synthesized /// from memory buffers. Use the overload of this constructor accepting a /// memory buffer and an explicit \c file_types::ID instead. InputFile(StringRef name, bool isPrimary, llvm::MemoryBuffer *buffer = nullptr) : InputFile(name, isPrimary, buffer, file_types::lookupTypeForExtension( llvm::sys::path::extension(name))) {} /// Constructs an input file from the provided data. InputFile(StringRef name, bool isPrimary, llvm::MemoryBuffer *buffer, file_types::ID FileID) : Filename( convertBufferNameFromLLVM_getFileOrSTDIN_toSwiftConventions(name)), FileID(FileID), BufferAndIsPrimary(buffer, isPrimary), PSPs(PrimarySpecificPaths()) { assert(!name.empty()); } public: /// Retrieves the type of this input file. file_types::ID getType() const { return FileID; }; /// Retrieves whether this input file was passed as a primary to the frontend. bool isPrimary() const { return BufferAndIsPrimary.getInt(); } /// Retrieves the backing buffer for this input file, if any. llvm::MemoryBuffer *getBuffer() const { return BufferAndIsPrimary.getPointer(); } /// The name of this \c InputFile, or `-` if this input corresponds to the /// standard input stream. /// /// The returned file name is guaranteed not to be the empty string. const std::string &getFileName() const { assert(!Filename.empty()); return Filename; } /// Return Swift-standard file name from a buffer name set by /// llvm::MemoryBuffer::getFileOrSTDIN, which uses "<stdin>" instead of "-". static StringRef convertBufferNameFromLLVM_getFileOrSTDIN_toSwiftConventions( StringRef filename) { return filename == "<stdin>" ? "-" : filename; } /// Retrieves the name of the output file corresponding to this input. /// /// If there is no such corresponding file, the result is the empty string. /// If there the resulting output should be directed to the standard output /// stream, the result is "-". std::string outputFilename() const { return PSPs.OutputFilename; } std::string indexUnitOutputFilename() const { if (!PSPs.IndexUnitOutputFilename.empty()) return PSPs.IndexUnitOutputFilename; return outputFilename(); } /// If there are explicit primary inputs (i.e. designated with -primary-input /// or -primary-filelist), the paths specific to those inputs (other than the /// input file path itself) are kept here. If there are no explicit primary /// inputs (for instance for whole module optimization), the corresponding /// paths are kept in the first input file. const PrimarySpecificPaths &getPrimarySpecificPaths() const { return PSPs; } void setPrimarySpecificPaths(PrimarySpecificPaths &&PSPs) { this->PSPs = std::move(PSPs); } // The next set of functions provides access to those primary-specific paths // accessed directly from an InputFile, as opposed to via // FrontendInputsAndOutputs. They merely make the call sites // a bit shorter. Add more forwarding methods as needed. StringRef getDependenciesFilePath() const { return getPrimarySpecificPaths().SupplementaryOutputs.DependenciesFilePath; } StringRef getLoadedModuleTracePath() const { return getPrimarySpecificPaths().SupplementaryOutputs.LoadedModuleTracePath; } StringRef getFineModuleTracePath() const { return getPrimarySpecificPaths().SupplementaryOutputs.FineModuleTracePath; } StringRef getSerializedDiagnosticsPath() const { return getPrimarySpecificPaths().SupplementaryOutputs .SerializedDiagnosticsPath; } StringRef getFixItsOutputPath() const { return getPrimarySpecificPaths().SupplementaryOutputs.FixItsOutputPath; } }; } // namespace swift #endif // SWIFT_FRONTEND_INPUTFILE_H
c
github
https://github.com/apple/swift
include/swift/Frontend/InputFile.h
""" IPython extension: %lookfor command for searching docstrings """ # Pauli Virtanen <pav@iki.fi>, 2008. import re, inspect, pkgutil, pydoc #------------------------------------------------------------------------------ # Lookfor functionality #------------------------------------------------------------------------------ # Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...} # where kind: "func", "class", "module", "object" # and index: index in breadth-first namespace traversal _lookfor_caches = {} # regexp whose match indicates that the string may contain a function signature _function_signature_re = re.compile(r"[a-z_]+\(.*[,=].*\)", re.I) def lookfor(what, modules=None, import_modules=True, regenerate=False): """ Search for objects whose documentation contains all given words. Shows a summary of matching objects, sorted roughly by relevance. Parameters ---------- what : str String containing words to look for. module : str, module Module whose docstrings to go through. import_modules : bool Whether to import sub-modules in packages. Will import only modules in __all__ regenerate: bool Re-generate the docstring cache """ # Cache cache = {} for module in modules: try: c = _lookfor_generate_cache(module, import_modules, regenerate) cache.update(c) except ImportError: pass # Search # XXX: maybe using a real stemming search engine would be better? found = [] whats = str(what).lower().split() if not whats: return for name, (docstring, kind, index) in cache.iteritems(): if kind in ('module', 'object'): # don't show modules or objects continue ok = True doc = docstring.lower() for w in whats: if w not in doc: ok = False break if ok: found.append(name) # Relevance sort # XXX: this is full Harrison-Stetson heuristics now, # XXX: it probably could be improved kind_relevance = {'func': 1000, 'class': 1000, 'module': -1000, 'object': -1000} def relevance(name, docstr, kind, index): r = 0 # do the keywords occur within the start of the docstring? first_doc = "\n".join(docstr.lower().strip().split("\n")[:3]) r += sum([200 for w in whats if w in first_doc]) # do the keywords occur in the function name? r += sum([30 for w in whats if w in name]) # is the full name long? r += -len(name) * 5 # is the object of bad type? r += kind_relevance.get(kind, -1000) # is the object deep in namespace hierarchy? r += -name.count('.') * 10 r += max(-index / 100, -100) return r def relevance_sort(a, b): dr = relevance(b, *cache[b]) - relevance(a, *cache[a]) if dr != 0: return dr else: return cmp(a, b) found.sort(relevance_sort) # Pretty-print s = "Search results for '%s'" % (' '.join(whats)) help_text = [s, "-"*len(s)] for name in found: doc, kind, ix = cache[name] doclines = [line.strip() for line in doc.strip().split("\n") if line.strip()] # find a suitable short description try: first_doc = doclines[0].strip() if _function_signature_re.search(first_doc): first_doc = doclines[1].strip() except IndexError: first_doc = "" help_text.append("%s\n %s" % (name, first_doc)) # Output if len(help_text) > 10: pager = pydoc.getpager() pager("\n".join(help_text)) else: print "\n".join(help_text) def _lookfor_generate_cache(module, import_modules, regenerate): """ Generate docstring cache for given module. Parameters ---------- module : str, None, module Module for which to generate docstring cache import_modules : bool Whether to import sub-modules in packages. Will import only modules in __all__ regenerate: bool Re-generate the docstring cache Returns ------- cache : dict {obj_full_name: (docstring, kind, index), ...} Docstring cache for the module, either cached one (regenerate=False) or newly generated. """ global _lookfor_caches if module is None: module = "numpy" if isinstance(module, str): module = __import__(module) if id(module) in _lookfor_caches and not regenerate: return _lookfor_caches[id(module)] # walk items and collect docstrings cache = {} _lookfor_caches[id(module)] = cache seen = {} index = 0 stack = [(module.__name__, module)] while stack: name, item = stack.pop(0) if id(item) in seen: continue seen[id(item)] = True index += 1 kind = "object" if inspect.ismodule(item): kind = "module" try: _all = item.__all__ except AttributeError: _all = None # import sub-packages if import_modules and hasattr(item, '__path__'): for m in pkgutil.iter_modules(item.__path__): if _all is not None and m[1] not in _all: continue try: __import__("%s.%s" % (name, m[1])) except ImportError: continue for n, v in inspect.getmembers(item): if _all is not None and n not in _all: continue stack.append(("%s.%s" % (name, n), v)) elif inspect.isclass(item): kind = "class" for n, v in inspect.getmembers(item): stack.append(("%s.%s" % (name, n), v)) elif callable(item): kind = "func" doc = inspect.getdoc(item) if doc is not None: cache[name] = (doc, kind, index) return cache #------------------------------------------------------------------------------ # IPython connectivity #------------------------------------------------------------------------------ from IPython.core import ipapi ip = ipapi.get() _lookfor_modules = ['numpy', 'scipy'] def lookfor_f(self, arg=''): r""" Search for objects whose documentation contains all given words. Shows a summary of matching objects, sorted roughly by relevance. Usage ----- %lookfor +numpy some words Search module 'numpy' %lookfor_modules numpy scipy Set default modules whose docstrings to search """ lookfor(arg, modules=_lookfor_modules) def lookfor_modules_f(self, arg=''): global _lookfor_modules if not arg: print "Modules included in %lookfor search:", _lookfor_modules else: _lookfor_modules = arg.split() ip.define_magic('lookfor', lookfor_f) ip.define_magic('lookfor_modules', lookfor_modules_f)
unknown
codeparrot/codeparrot-clean
##Copyright 2009-2015 Thomas Paviot (tpaviot@gmail.com) ## ##This file is part of pythonOCC. ## ##pythonOCC is free software: you can redistribute it and/or modify ##it under the terms of the GNU Lesser General Public License as published by ##the Free Software Foundation, either version 3 of the License, or ##(at your option) any later version. ## ##pythonOCC is distributed in the hope that it will be useful, ##but WITHOUT ANY WARRANTY; without even the implied warranty of ##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##GNU Lesser General Public License for more details. ## ##You should have received a copy of the GNU Lesser General Public License ##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>. import math from OCC.gp import (gp_Pnt, gp_Sphere, gp_Ax3, gp_Dir, gp_Circ, gp_Ax2, gp_Pnt2d, gp_Dir2d) from OCC.BRepBuilderAPI import (BRepBuilderAPI_MakeEdge, BRepBuilderAPI_MakeFace, BRepBuilderAPI_MakeWire) from OCC.TColgp import TColgp_Array2OfPnt from OCC.GeomAPI import GeomAPI_PointsToBSplineSurface from OCC.GeomAbs import GeomAbs_C2 from OCC.Geom2d import Geom2d_Line from OCC.BRepLib import breplib_BuildCurves3d from OCC.Quantity import Quantity_Color, Quantity_NOC_PINK from OCC.Display.SimpleGui import init_display display, start_display, add_menu, add_function_to_menu = init_display() def face(): p1 = gp_Pnt() p2 = gp_Pnt() p3 = gp_Pnt() p4 = gp_Pnt() p5 = gp_Pnt() p6 = gp_Pnt() # The white Face sphere = gp_Sphere(gp_Ax3(gp_Pnt(0, 0, 0), gp_Dir(1, 0, 0)), 150) green_face = BRepBuilderAPI_MakeFace(sphere, 0.1, 0.7, 0.2, 0.9) # The red face p1.SetCoord(-15, 200, 10) p2.SetCoord(5, 204, 0) p3.SetCoord(15, 200, 0) p4.SetCoord(-15, 20, 15) p5.SetCoord(-5, 20, 0) p6.SetCoord(15, 20, 35) array = TColgp_Array2OfPnt(1, 3, 1, 2) array.SetValue(1, 1, p1) array.SetValue(2, 1, p2) array.SetValue(3, 1, p3) array.SetValue(1, 2, p4) array.SetValue(2, 2, p5) array.SetValue(3, 2, p6) curve = GeomAPI_PointsToBSplineSurface(array, 3, 8, GeomAbs_C2, 0.001).Surface() red_face = BRepBuilderAPI_MakeFace(curve, 1e-6) #The brown face circle = gp_Circ(gp_Ax2(gp_Pnt(0, 0, 0), gp_Dir(1, 0, 0)), 80) Edge1 = BRepBuilderAPI_MakeEdge(circle, 0, math.pi) Edge2 = BRepBuilderAPI_MakeEdge(gp_Pnt(0, 0, -80), gp_Pnt(0, -10, 40)) Edge3 = BRepBuilderAPI_MakeEdge(gp_Pnt(0, -10, 40), gp_Pnt(0, 0, 80)) ##TopoDS_Wire YellowWire MW1 = BRepBuilderAPI_MakeWire(Edge1.Edge(), Edge2.Edge(), Edge3.Edge()) if MW1.IsDone(): yellow_wire = MW1.Wire() brown_face = BRepBuilderAPI_MakeFace(yellow_wire) #The pink face p1.SetCoord(35, -200, 40) p2.SetCoord(50, -204, 30) p3.SetCoord(65, -200, 30) p4.SetCoord(35, -20, 45) p5.SetCoord(45, -20, 30) p6.SetCoord(65, -20, 65) array2 = TColgp_Array2OfPnt(1, 3, 1, 2) array2.SetValue(1, 1, p1) array2.SetValue(2, 1, p2) array2.SetValue(3, 1, p3) array2.SetValue(1, 2, p4) array2.SetValue(2, 2, p5) array2.SetValue(3, 2, p6) BSplineSurf = GeomAPI_PointsToBSplineSurface(array2, 3, 8, GeomAbs_C2, 0.001) aFace = BRepBuilderAPI_MakeFace(BSplineSurf.Surface(), 1e-6).Face() ## ##//2d lines P12d = gp_Pnt2d(0.9, 0.1) P22d = gp_Pnt2d(0.2, 0.7) P32d = gp_Pnt2d(0.02, 0.1) ## line1 = Geom2d_Line(P12d, gp_Dir2d((0.2-0.9), (0.7-0.1))) line2 = Geom2d_Line(P22d, gp_Dir2d((0.02-0.2), (0.1-0.7))) line3 = Geom2d_Line(P32d, gp_Dir2d((0.9-0.02), (0.1-0.1))) ## ##//Edges are on the BSpline surface Edge1 = BRepBuilderAPI_MakeEdge(line1.GetHandle(), BSplineSurf.Surface(), 0, P12d.Distance(P22d)).Edge() Edge2 = BRepBuilderAPI_MakeEdge(line2.GetHandle(), BSplineSurf.Surface(), 0, P22d.Distance(P32d)).Edge() Edge3 = BRepBuilderAPI_MakeEdge(line3.GetHandle(), BSplineSurf.Surface(), 0, P32d.Distance(P12d)).Edge() ## Wire1 = BRepBuilderAPI_MakeWire(Edge1, Edge2, Edge3).Wire() Wire1.Reverse() pink_face = BRepBuilderAPI_MakeFace(aFace, Wire1).Face() breplib_BuildCurves3d(pink_face) display.DisplayColoredShape(green_face.Face(), 'GREEN') display.DisplayColoredShape(red_face.Face(), 'RED') display.DisplayColoredShape(pink_face, Quantity_Color(Quantity_NOC_PINK)) display.DisplayColoredShape(brown_face.Face(), 'BLUE') display.DisplayColoredShape(yellow_wire, 'YELLOW', update=True) if __name__ == '__main__': face() start_display()
unknown
codeparrot/codeparrot-clean
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "NamespaceCommentCheck.h" #include "../utils/LexerUtils.h" #include "clang/AST/ASTContext.h" #include "clang/ASTMatchers/ASTMatchers.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/TokenKinds.h" #include "clang/Lex/Lexer.h" #include <optional> using namespace clang::ast_matchers; namespace clang::tidy::readability { NamespaceCommentCheck::NamespaceCommentCheck(StringRef Name, ClangTidyContext *Context) : ClangTidyCheck(Name, Context), NamespaceCommentPattern( "^/[/*] *(end (of )?)? *(anonymous|unnamed)? *" "namespace( +(((inline )|([a-zA-Z0-9_:]))+))?\\.? *(\\*/)?$", llvm::Regex::IgnoreCase), ShortNamespaceLines(Options.get("ShortNamespaceLines", 1U)), SpacesBeforeComments(Options.get("SpacesBeforeComments", 1U)), AllowOmittingNamespaceComments( Options.get("AllowOmittingNamespaceComments", false)) {} void NamespaceCommentCheck::storeOptions(ClangTidyOptions::OptionMap &Opts) { Options.store(Opts, "ShortNamespaceLines", ShortNamespaceLines); Options.store(Opts, "SpacesBeforeComments", SpacesBeforeComments); Options.store(Opts, "AllowOmittingNamespaceComments", AllowOmittingNamespaceComments); } void NamespaceCommentCheck::registerMatchers(MatchFinder *Finder) { Finder->addMatcher(namespaceDecl().bind("namespace"), this); } static bool locationsInSameFile(const SourceManager &Sources, SourceLocation Loc1, SourceLocation Loc2) { return Loc1.isFileID() && Loc2.isFileID() && Sources.getFileID(Loc1) == Sources.getFileID(Loc2); } static std::optional<std::string> getNamespaceNameAsWritten(SourceLocation &Loc, const SourceManager &Sources, const LangOptions &LangOpts) { // Loc should be at the begin of the namespace decl (usually, `namespace` // token). We skip the first token right away, but in case of `inline // namespace` or `namespace a::inline b` we can see both `inline` and // `namespace` keywords, which we just ignore. Nested parens/squares before // the opening brace can result from attributes. std::string Result; int Nesting = 0; while (std::optional<Token> T = utils::lexer::findNextTokenSkippingComments( Loc, Sources, LangOpts)) { Loc = T->getLocation(); if (T->is(tok::l_brace)) break; if (T->isOneOf(tok::l_square, tok::l_paren)) { ++Nesting; } else if (T->isOneOf(tok::r_square, tok::r_paren)) { --Nesting; } else if (Nesting == 0) { if (T->is(tok::raw_identifier)) { const StringRef ID = T->getRawIdentifier(); if (ID != "namespace") Result.append(std::string(ID)); if (ID == "inline") Result.append(" "); } else if (T->is(tok::coloncolon)) { Result.append("::"); } else { // Any other kind of token is unexpected here. return std::nullopt; } } } return Result; } void NamespaceCommentCheck::check(const MatchFinder::MatchResult &Result) { const auto *ND = Result.Nodes.getNodeAs<NamespaceDecl>("namespace"); const SourceManager &Sources = *Result.SourceManager; // Ignore namespaces inside macros and namespaces split across files. if (ND->getBeginLoc().isMacroID() || !locationsInSameFile(Sources, ND->getBeginLoc(), ND->getRBraceLoc())) return; // Don't require closing comments for namespaces spanning less than certain // number of lines. const unsigned StartLine = Sources.getSpellingLineNumber(ND->getBeginLoc()); const unsigned EndLine = Sources.getSpellingLineNumber(ND->getRBraceLoc()); if (EndLine - StartLine + 1 <= ShortNamespaceLines) return; // Find next token after the namespace closing brace. const SourceLocation AfterRBrace = Lexer::getLocForEndOfToken( ND->getRBraceLoc(), /*Offset=*/0, Sources, getLangOpts()); SourceLocation Loc = AfterRBrace; SourceLocation LBraceLoc = ND->getBeginLoc(); // Currently for nested namespace (n1::n2::...) the AST matcher will match foo // then bar instead of a single match. So if we got a nested namespace we have // to skip the next ones. for (const SourceLocation &EndOfNameLocation : Ends) if (Sources.isBeforeInTranslationUnit(ND->getLocation(), EndOfNameLocation)) return; std::optional<std::string> NamespaceNameAsWritten = getNamespaceNameAsWritten(LBraceLoc, Sources, getLangOpts()); if (!NamespaceNameAsWritten) return; if (NamespaceNameAsWritten->empty() != ND->isAnonymousNamespace()) { // Apparently, we didn't find the correct namespace name. Give up. return; } Ends.push_back(LBraceLoc); Token Tok; // Skip whitespace until we find the next token. while (Lexer::getRawToken(Loc, Tok, Sources, getLangOpts()) || Tok.is(tok::semi)) { Loc = Loc.getLocWithOffset(1); } if (!locationsInSameFile(Sources, ND->getRBraceLoc(), Loc)) return; const bool NextTokenIsOnSameLine = Sources.getSpellingLineNumber(Loc) == EndLine; // If we insert a line comment before the token in the same line, we need // to insert a line break. bool NeedLineBreak = NextTokenIsOnSameLine && Tok.isNot(tok::eof); SourceRange OldCommentRange(AfterRBrace, AfterRBrace); std::string Message = "%0 not terminated with a closing comment"; bool HasComment = false; // Try to find existing namespace closing comment on the same line. if (Tok.is(tok::comment) && NextTokenIsOnSameLine) { const StringRef Comment(Sources.getCharacterData(Loc), Tok.getLength()); SmallVector<StringRef, 7> Groups; if (NamespaceCommentPattern.match(Comment, &Groups)) { const StringRef NamespaceNameInComment = Groups.size() > 5 ? Groups[5] : ""; const StringRef Anonymous = Groups.size() > 3 ? Groups[3] : ""; if ((ND->isAnonymousNamespace() && NamespaceNameInComment.empty()) || (*NamespaceNameAsWritten == NamespaceNameInComment && Anonymous.empty())) { // Check if the namespace in the comment is the same. // FIXME: Maybe we need a strict mode, where we always fix namespace // comments with different format. return; } HasComment = true; // Otherwise we need to fix the comment. NeedLineBreak = Comment.starts_with("/*"); OldCommentRange = SourceRange(AfterRBrace, Loc.getLocWithOffset(Tok.getLength())); Message = (llvm::Twine( "%0 ends with a comment that refers to a wrong namespace '") + NamespaceNameInComment + "'") .str(); } else if (Comment.starts_with("//")) { // Assume that this is an unrecognized form of a namespace closing line // comment. Replace it. NeedLineBreak = false; OldCommentRange = SourceRange(AfterRBrace, Loc.getLocWithOffset(Tok.getLength())); Message = "%0 ends with an unrecognized comment"; } // If it's a block comment, just move it to the next line, as it can be // multi-line or there may be other tokens behind it. } const std::string NamespaceNameForDiag = ND->isAnonymousNamespace() ? "anonymous namespace" : ("namespace '" + *NamespaceNameAsWritten + "'"); // If no namespace comment is allowed if (!HasComment && AllowOmittingNamespaceComments) return; std::string Fix(SpacesBeforeComments, ' '); Fix.append("// namespace"); if (!ND->isAnonymousNamespace()) Fix.append(" ").append(*NamespaceNameAsWritten); if (NeedLineBreak) Fix.append("\n"); // Place diagnostic at an old comment, or closing brace if we did not have it. const SourceLocation DiagLoc = OldCommentRange.getBegin() != OldCommentRange.getEnd() ? OldCommentRange.getBegin() : ND->getRBraceLoc(); diag(DiagLoc, Message) << NamespaceNameForDiag << FixItHint::CreateReplacement( CharSourceRange::getCharRange(OldCommentRange), Fix); diag(ND->getLocation(), "%0 starts here", DiagnosticIDs::Note) << NamespaceNameForDiag; } } // namespace clang::tidy::readability
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/clang-tidy/readability/NamespaceCommentCheck.cpp
#!/usr/bin/python """ CodeHilite Extension for Python-Markdown ======================================== Adds code/syntax highlighting to standard Python-Markdown code blocks. Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/). Project website: <http://packages.python.org/Markdown/extensions/code_hilite.html> Contact: markdown@freewisdom.org License: BSD (see ../LICENSE.md for details) Dependencies: * [Python 2.3+](http://python.org/) * [Markdown 2.0+](http://packages.python.org/Markdown/) * [Pygments](http://pygments.org/) """ import markdown try: from pygments import highlight from pygments.lexers import get_lexer_by_name, guess_lexer, TextLexer from pygments.formatters import HtmlFormatter pygments = True except ImportError: pygments = False # ------------------ The Main CodeHilite Class ---------------------- class CodeHilite: """ Determine language of source code, and pass it into the pygments hilighter. Basic Usage: >>> code = CodeHilite(src = 'some text') >>> html = code.hilite() * src: Source string or any object with a .readline attribute. * linenos: (Boolen) Turn line numbering 'on' or 'off' (off by default). * guess_lang: (Boolen) Turn language auto-detection 'on' or 'off' (on by default). * css_class: Set class name of wrapper div ('codehilite' by default). Low Level Usage: >>> code = CodeHilite() >>> code.src = 'some text' # String or anything with a .readline attr. >>> code.linenos = True # True or False; Turns line numbering on or of. >>> html = code.hilite() """ def __init__(self, src=None, linenos=False, guess_lang=True, css_class="codehilite", lang=None, style='default', noclasses=False, tab_length=4): self.src = src self.lang = lang self.linenos = linenos self.guess_lang = guess_lang self.css_class = css_class self.style = style self.noclasses = noclasses self.tab_length = tab_length def hilite(self): """ Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with optional line numbers. The output should then be styled with css to your liking. No styles are applied by default - only styling hooks (i.e.: <span class="k">). returns : A string of html. """ self.src = self.src.strip('\n') if self.lang is None: self._getLang() if pygments: try: lexer = get_lexer_by_name(self.lang) except ValueError: try: if self.guess_lang: lexer = guess_lexer(self.src) else: lexer = TextLexer() except ValueError: lexer = TextLexer() formatter = HtmlFormatter(linenos=self.linenos, cssclass=self.css_class, style=self.style, noclasses=self.noclasses) return highlight(self.src, lexer, formatter) else: # just escape and build markup usable by JS highlighting libs txt = self.src.replace('&', '&amp;') txt = txt.replace('<', '&lt;') txt = txt.replace('>', '&gt;') txt = txt.replace('"', '&quot;') classes = [] if self.lang: classes.append('language-%s' % self.lang) if self.linenos: classes.append('linenums') class_str = '' if classes: class_str = ' class="%s"' % ' '.join(classes) return '<pre class="%s"><code%s>%s</code></pre>\n'% \ (self.css_class, class_str, txt) def _getLang(self): """ Determines language of a code block from shebang line and whether said line should be removed or left in place. If the sheband line contains a path (even a single /) then it is assumed to be a real shebang line and left alone. However, if no path is given (e.i.: #!python or :::python) then it is assumed to be a mock shebang for language identifitation of a code fragment and removed from the code block prior to processing for code highlighting. When a mock shebang (e.i: #!python) is found, line numbering is turned on. When colons are found in place of a shebang (e.i.: :::python), line numbering is left in the current state - off by default. """ import re #split text into lines lines = self.src.split("\n") #pull first line to examine fl = lines.pop(0) c = re.compile(r''' (?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons. (?P<path>(?:/\w+)*[/ ])? # Zero or 1 path (?P<lang>[\w+-]*) # The language ''', re.VERBOSE) # search first line for shebang m = c.search(fl) if m: # we have a match try: self.lang = m.group('lang').lower() except IndexError: self.lang = None if m.group('path'): # path exists - restore first line lines.insert(0, fl) if m.group('shebang'): # shebang exists - use line numbers self.linenos = True else: # No match lines.insert(0, fl) self.src = "\n".join(lines).strip("\n") # ------------------ The Markdown Extension ------------------------------- class HiliteTreeprocessor(markdown.treeprocessors.Treeprocessor): """ Hilight source code in code blocks. """ def run(self, root): """ Find code blocks and store in htmlStash. """ blocks = root.getiterator('pre') for block in blocks: children = block.getchildren() if len(children) == 1 and children[0].tag == 'code': code = CodeHilite(children[0].text, linenos=self.config['force_linenos'], guess_lang=self.config['guess_lang'], css_class=self.config['css_class'], style=self.config['pygments_style'], noclasses=self.config['noclasses'], tab_length=self.markdown.tab_length) placeholder = self.markdown.htmlStash.store(code.hilite(), safe=True) # Clear codeblock in etree instance block.clear() # Change to p element which will later # be removed when inserting raw html block.tag = 'p' block.text = placeholder class CodeHiliteExtension(markdown.Extension): """ Add source code hilighting to markdown codeblocks. """ def __init__(self, configs): # define default configs self.config = { 'force_linenos' : [False, "Force line numbers - Default: False"], 'guess_lang' : [True, "Automatic language detection - Default: True"], 'css_class' : ["codehilite", "Set class name for wrapper <div> - Default: codehilite"], 'pygments_style' : ['default', 'Pygments HTML Formatter Style (Colorscheme) - Default: default'], 'noclasses': [False, 'Use inline styles instead of CSS classes - Default false'] } # Override defaults with user settings for key, value in configs: # convert strings to booleans if value == 'True': value = True if value == 'False': value = False self.setConfig(key, value) def extendMarkdown(self, md, md_globals): """ Add HilitePostprocessor to Markdown instance. """ hiliter = HiliteTreeprocessor(md) hiliter.config = self.getConfigs() md.treeprocessors.add("hilite", hiliter, "<inline") md.registerExtension(self) def makeExtension(configs={}): return CodeHiliteExtension(configs=configs)
unknown
codeparrot/codeparrot-clean
import smtplib import time from pell.assistant.text_to_speech import talk, listen import sys import os class EmailAction: def __init__(self, command, subject=None, message=None, receiver=None): self.command = command self.subject = subject self.message = message self.receiver = receiver self.email = os.getenv("gmail_email") self.password = os.getenv("gmail_password") def execute(self): # Ask for recepient only when not provided in the command if self.receiver == None: talk('Who do you want to send it to?') time.sleep(3) self.receiver = listen() # Ask for subject only when not provided in the command if self.subject == None: talk('What is the subject?') time.sleep(3) self.subject = listen() # Ask for message only when not provided in the command if self.message == None: talk('What should I say?') time.sleep(3) self.message = listen() content = 'Subject: {}\n\n{}'.format(self.subject, self.message) mail = smtplib.SMTP('smtp.gmail.com', 587) mail.ehlo() mail.starttls() mail.login(self.email, self.password) # send message mail.sendmail(self.email, self.receiver, content) # end mail connection mail.close() talk('Email sent.') sys.exit('Email sent.') if __name__ == "__main__": test_email = os.getenv("test_email") EmailAction( "Send email to myself", "PELL", "This is a test message from PELL", test_email ).execute()
unknown
codeparrot/codeparrot-clean
package main import ( "io" "log" "net" "sync" "github.com/ishidawataru/sctp" ) // SCTPProxy is a proxy for SCTP connections. It implements the Proxy interface to // handle SCTP traffic forwarding between the frontend and backend addresses. type SCTPProxy struct { listener *sctp.SCTPListener frontendAddr *sctp.SCTPAddr backendAddr *sctp.SCTPAddr } // NewSCTPProxy creates a new SCTPProxy. func NewSCTPProxy(listener *sctp.SCTPListener, backendAddr *sctp.SCTPAddr) (*SCTPProxy, error) { return &SCTPProxy{ listener: listener, frontendAddr: listener.Addr().(*sctp.SCTPAddr), backendAddr: backendAddr, }, nil } func (proxy *SCTPProxy) clientLoop(client *sctp.SCTPConn, quit chan bool) { backend, err := sctp.DialSCTP("sctp", nil, proxy.backendAddr) if err != nil { log.Printf("Can't forward traffic to backend sctp/%v: %s\n", proxy.backendAddr, err) client.Close() return } clientC := sctp.NewSCTPSndRcvInfoWrappedConn(client) backendC := sctp.NewSCTPSndRcvInfoWrappedConn(backend) var wg sync.WaitGroup broker := func(to, from net.Conn) { io.Copy(to, from) from.Close() to.Close() wg.Done() } wg.Add(2) go broker(clientC, backendC) go broker(backendC, clientC) finish := make(chan struct{}) go func() { wg.Wait() close(finish) }() select { case <-quit: case <-finish: } clientC.Close() backendC.Close() <-finish } // Run starts forwarding the traffic using SCTP. func (proxy *SCTPProxy) Run() { quit := make(chan bool) defer close(quit) for { client, err := proxy.listener.Accept() if err != nil { log.Printf("Stopping proxy on sctp/%v for sctp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) return } go proxy.clientLoop(client.(*sctp.SCTPConn), quit) } } // Close stops forwarding the traffic. func (proxy *SCTPProxy) Close() { proxy.listener.Close() }
go
github
https://github.com/moby/moby
cmd/docker-proxy/sctp_proxy_linux.go
# -*- coding: utf-8 -*- import os import sys import unittest2 import stripe from mock import patch from stripe.test.helper import (StripeTestCase, NOW, DUMMY_CHARGE, DUMMY_CARD) class FunctionalTests(StripeTestCase): request_client = stripe.http_client.Urllib2Client def setUp(self): super(FunctionalTests, self).setUp() def get_http_client(*args, **kwargs): return self.request_client(*args, **kwargs) self.client_patcher = patch( 'stripe.http_client.new_default_http_client') client_mock = self.client_patcher.start() client_mock.side_effect = get_http_client def tearDown(self): super(FunctionalTests, self).tearDown() self.client_patcher.stop() def test_dns_failure(self): api_base = stripe.api_base try: stripe.api_base = 'https://my-invalid-domain.ireallywontresolve/v1' self.assertRaises(stripe.error.APIConnectionError, stripe.Customer.create) finally: stripe.api_base = api_base def test_run(self): charge = stripe.Charge.create(**DUMMY_CHARGE) self.assertFalse(charge.refunded) charge.refund() self.assertTrue(charge.refunded) def test_refresh(self): charge = stripe.Charge.create(**DUMMY_CHARGE) charge2 = stripe.Charge.retrieve(charge.id) self.assertEqual(charge2.created, charge.created) charge2.junk = 'junk' charge2.refresh() self.assertRaises(AttributeError, lambda: charge2.junk) def test_list_accessors(self): customer = stripe.Customer.create(card=DUMMY_CARD) self.assertEqual(customer['created'], customer.created) customer['foo'] = 'bar' self.assertEqual(customer.foo, 'bar') def test_raise(self): EXPIRED_CARD = DUMMY_CARD.copy() EXPIRED_CARD['exp_month'] = NOW.month - 2 EXPIRED_CARD['exp_year'] = NOW.year - 2 self.assertRaises(stripe.error.CardError, stripe.Charge.create, amount=100, currency='usd', card=EXPIRED_CARD) def test_response_headers(self): EXPIRED_CARD = DUMMY_CARD.copy() EXPIRED_CARD['exp_month'] = NOW.month - 2 EXPIRED_CARD['exp_year'] = NOW.year - 2 try: stripe.Charge.create(amount=100, currency='usd', card=EXPIRED_CARD) self.fail('charge creation with expired card did not fail') except stripe.error.CardError as e: self.assertTrue(e.request_id.startswith('req_')) def test_unicode(self): # Make sure unicode requests can be sent self.assertRaises(stripe.error.InvalidRequestError, stripe.Charge.retrieve, id='☃') def test_none_values(self): customer = stripe.Customer.create(plan=None) self.assertTrue(customer.id) def test_missing_id(self): customer = stripe.Customer() self.assertRaises(stripe.error.InvalidRequestError, customer.refresh) class RequestsFunctionalTests(FunctionalTests): request_client = stripe.http_client.RequestsClient class UrlfetchFunctionalTests(FunctionalTests): request_client = 'urlfetch' def setUp(self): if stripe.http_client.urlfetch is None: self.skipTest( '`urlfetch` from Google App Engine is unavailable.') else: super(UrlfetchFunctionalTests, self).setUp() class PycurlFunctionalTests(FunctionalTests): def setUp(self): if not os.environ.get('STRIPE_TEST_PYCURL'): self.skipTest('Pycurl skipped as STRIPE_TEST_PYCURL is not set') if sys.version_info >= (3, 0): self.skipTest('Pycurl is not supported in Python 3') else: super(PycurlFunctionalTests, self).setUp() request_client = stripe.http_client.PycurlClient class AuthenticationErrorTest(StripeTestCase): def test_invalid_credentials(self): key = stripe.api_key try: stripe.api_key = 'invalid' stripe.Customer.create() except stripe.error.AuthenticationError as e: self.assertEqual(401, e.http_status) self.assertTrue(isinstance(e.http_body, str)) self.assertTrue(isinstance(e.json_body, dict)) # Note that an invalid API key bypasses many of the standard # facilities in the API server so currently no Request ID is # returned. finally: stripe.api_key = key class CardErrorTest(StripeTestCase): def test_declined_card_props(self): EXPIRED_CARD = DUMMY_CARD.copy() EXPIRED_CARD['exp_month'] = NOW.month - 2 EXPIRED_CARD['exp_year'] = NOW.year - 2 try: stripe.Charge.create(amount=100, currency='usd', card=EXPIRED_CARD) except stripe.error.CardError as e: self.assertEqual(402, e.http_status) self.assertTrue(isinstance(e.http_body, str)) self.assertTrue(isinstance(e.json_body, dict)) self.assertTrue(e.request_id.startswith('req_')) class InvalidRequestErrorTest(StripeTestCase): def test_nonexistent_object(self): try: stripe.Charge.retrieve('invalid') except stripe.error.InvalidRequestError as e: self.assertEqual(404, e.http_status) self.assertTrue(isinstance(e.http_body, str)) self.assertTrue(isinstance(e.json_body, dict)) self.assertTrue(e.request_id.startswith('req_')) def test_invalid_data(self): try: stripe.Charge.create() except stripe.error.InvalidRequestError as e: self.assertEqual(400, e.http_status) self.assertTrue(isinstance(e.http_body, str)) self.assertTrue(isinstance(e.json_body, dict)) self.assertTrue(e.request_id.startswith('req_')) if __name__ == '__main__': unittest2.main()
unknown
codeparrot/codeparrot-clean
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause %YAML 1.2 --- $id: http://devicetree.org/schemas/arm/qcom,coresight-remote-etm.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Qualcomm Coresight Remote ETM(Embedded Trace Macrocell) maintainers: - Jinlong Mao <quic_jinlmao@quicinc.com> - Tao Zhang <quic_taozha@quicinc.com> description: Support for ETM trace collection on remote processor using coresight framework. Enabling this will allow turning on ETM tracing on remote processor like modem processor via sysfs and collecting the trace via coresight TMC sinks. properties: compatible: const: qcom,coresight-remote-etm label: description: Description of a coresight device. out-ports: $ref: /schemas/graph.yaml#/properties/ports additionalProperties: false properties: port: description: Output connection to the CoreSight Trace bus. $ref: /schemas/graph.yaml#/properties/port required: - compatible - out-ports additionalProperties: false examples: - | etm { compatible = "qcom,coresight-remote-etm"; out-ports { port { modem_etm0_out_funnel_modem: endpoint { remote-endpoint = <&funnel_modem_in_modem_etm0>; }; }; }; }; ...
unknown
github
https://github.com/torvalds/linux
Documentation/devicetree/bindings/arm/qcom,coresight-remote-etm.yaml
import os, time, random, getpass from os.path import abspath, dirname, isfile from fabric.api import * from fabric.contrib.files import append, exists from fabtools import require import fabtools SITE_ROOT = dirname(abspath(__file__)) from _set_local_env_vars import import_env_vars import_env_vars(SITE_ROOT) PRJ_ENV = os.environ['PRJ_ENV'] PRJ_NAME = os.environ['PRJ_NAME'] PRJ_DB = os.environ['PRJ_DB'] PRJ_USER = os.environ['PRJ_USER'] PRJ_PASS = os.environ['PRJ_PASS'] PRJ_GIT_REPO = os.environ['PRJ_GIT_REPO'] PRJ_ADDR_STAGING = os.environ['PRJ_ADDR_STAGING'] PRJ_ADDR_PRODUCTION = os.environ['PRJ_ADDR_PRODUCTION'] PRJ_ADDR_TEST = os.environ['PRJ_ADDR_TEST'] ENVIRONMENTS = { 'dev': ['127.0.0.1'], 'staging': PRJ_ADDR_STAGING, 'production': PRJ_ADDR_PRODUCTION, 'test': PRJ_ADDR_TEST, } env.user = 'django' # sshagent_run credits to http://lincolnloop.com/blog/2009/sep/22/easy-fabric-deployment-part-1-gitmercurial-and-ssh/ # modified by dvd :) def sshagent_run(cmd, capture=True): """ Helper function. Runs a command with SSH agent forwarding enabled. Note:: Fabric (and paramiko) can't forward your SSH agent. This helper uses your system's ssh to do so. """ cwd = env.get('cwd', '') if cwd: cmd = 'cd %s;%s' % (cwd, cmd) with settings(cwd=''): for h in env.hosts: try: # catch the port number to pass to ssh host, port = h.split(':') local('ssh -p %s -A %s@%s "%s"' % (port, env.user, host, cmd), capture=capture) except ValueError: local('ssh -A %s@%s "%s"' % (env.user, h, cmd), capture=capture) @task def dev(): env.name = 'dev' env.hosts = ENVIRONMENTS[env.name] @task def staging(): env.name = 'staging' env.hosts = ENVIRONMENTS[env.name] @task def production(): env.name = 'production' env.hosts = ENVIRONMENTS[env.name] @task def test(): env.name = 'production' env.hosts = ENVIRONMENTS[env.name] @task def configure_db(): if env.name == 'dev': from sh import createuser, createdb #todo untested createuser("-Upostgres -d -R -S %s" % PRJ_USER) createdb ("-Upostgres -O%s %s" % PRJ_USER, PRJ_DB) else: require.postgres.server() require.postgres.user(PRJ_USER, PRJ_PASS) require.postgres.database(PRJ_DB, PRJ_USER) @task def setup(): configure_db() @task def bower(): from sh import bower pah = os.path.join(SITE_ROOT, 'requirements', 'clientside.txt') with open(pah, 'r') as reqfile: for line in reqfile: bower.install(line.strip()) @task def plug_prerequisites(name): env.user = 'vagrant' pah = os.path.join(SITE_ROOT, 'etc', 'install', 'plug_%s.pkg' % name) packages_to_install = [] with open(pah, 'r') as pkgs_file: lines = pkgs_file.readlines() for line in lines: if len(line) and not line[0:1] == '#': packages_to_install.append(line.strip()) require.deb.packages(packages_to_install) @task def plug_packages(name): from sh import pip for line in pip.install('-r%s/requirements/plug_%s.txt' % (SITE_ROOT, name), _iter=True): print line def set_plug_active(name, pah=os.path.join(SITE_ROOT, '.env')): templines = [] with open(pah, 'r') as envfile: lines = envfile.readlines() found = False for line in lines: if line.find('export PRJ_IS_%s=' % name.upper()) == 0: if line.find('TRUE') == 0: templines.append(line) else: templines.append(line.replace('FALSE', 'TRUE')) found = True else: templines.append(line) if not found: templines.append('\nexport PRJ_IS_%s=TRUE' % name.upper()) with open(pah, 'w') as newfile: newfile.writelines(templines) @task def plug(name): set_plug_active(name) # replace_or_append('export PRJ_IS_%s=FALSE' % name.upper(), 'export PRJ_IS_%s=TRUE' % name.upper(), pah) # replace_or_append('export PRJ_IS_%s=TRUE' % name.upper(), 'export PRJ_IS_%s=TRUE' % name.upper(), pah) plug_prerequisites(name) plug_packages(name) @task def plug_all(): for key, val in os.environ.items(): if key[0:7] == 'PRJ_IS_' and val == 'TRUE': name = key[7:].lower() print "plugging '%s' apps group" % name plug_prerequisites(name) plug_packages(name) @task def project_setup(): if not exists("/home/django/.virtualenvs/%s" % PRJ_NAME): run('mkvirtualenv %s' % (PRJ_NAME,)) # Clono, se non e' gia stato clonato il progetto with cd("/home/django/"): if not exists(PRJ_NAME): sshagent_run('git clone %s %s' % (PRJ_GIT_REPO, PRJ_NAME)) with prefix('workon %s' % PRJ_NAME): # Aggiungo all'ambiente virtuale la directory base del progetto e la directory apps run("add2virtualenv /home/django/%s" % PRJ_NAME) run("add2virtualenv /home/django/%s/external_apps" % PRJ_NAME) run("add2virtualenv /home/django/%s/website" % PRJ_NAME) with cd(PRJ_NAME): # Installo i pacchetti necessari run("pip install -r requirements/%s.txt" % env.name) # Creo il db remote_db_name = raw_input(u"db name for the %s db ? (defaults to project name) \n" % env.name) if len(remote_db_name.strip()) == 0: remote_db_name = PRJ_NAME remote_db_user = raw_input(u"username for the %s db ? (defaults to db name) \n" % env.name) if len(remote_db_user.strip()) == 0: remote_db_user = remote_db_name remote_db_pass = getpass.getpass(u"password for the %s db ? (defaults to username) \n" % env.name) if len(remote_db_pass.strip()) == 0: remote_db_pass = remote_db_user run("createuser -U postgres -d -R -S %s" % remote_db_user) run("createdb -U %s -h localhost %s" % (remote_db_user, remote_db_name)) with cd("/home/django/%s/" % PRJ_NAME): run("touch .env") append(".env", "PRJ_ENV=%s" % env.name) append(".env", "PRJ_ENGINE=postgresql_psycopg2") append(".env", "PRJ_NAME=%s" % PRJ_NAME) append(".env", "PRJ_DB=%s" % PRJ_DB) append(".env", "PRJ_USER=%s" % PRJ_USER) append(".env", "PRJ_PASS=%s" % PRJ_PASS) append(".env", 'PRJ_SECRET_KEY="%s"' % "".join([random.choice( "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_+)") for i in range(50)])) # Installazione app e db with cd("/home/django/%s/website" % PRJ_NAME): with prefix('workon %s' % PRJ_NAME): run("python manage.py syncdb --all") run("python manage.py migrate --fake") run("python manage.py collectstatic") env.user = 'root' # Per sicurezza, rendo eseguibile il file conf/gunicorn.sh with cd("/home/django/%s/etc" % PRJ_NAME): run("chmod +x gunicorn.sh") with cd("/etc/nginx/sites-enabled/"): run("ln -s /home/django/%s/etc/nginx.conf %s" % (PRJ_NAME, PRJ_NAME)) with cd("/etc/supervisor/conf.d/"): run("ln -s /home/django/%s/etc/supervisor.conf %s.conf" % (PRJ_NAME, PRJ_NAME)) run("/etc/init.d/supervisor stop") time.sleep(5) run("/etc/init.d/supervisor start") run("supervisorctl reload") run("/etc/init.d/nginx reload") @task def update(): # Clono, se non e' gia stato clonato il progetto with cd("/home/django/%s" % PRJ_NAME): sshagent_run("git pull") with prefix('workon %s' % PRJ_NAME): run("pip install -r requirements/%s.txt" % env.name) run("python website/manage.py migrate") run("python website/manage.py collectstatic --noinput") env.user = 'root' run('supervisorctl reload') @task def reload_server(): env.user = 'root' run('/etc/init.d/nginx reload') run('supervisorctl reload') @task def restart_server(): env.user = 'root' run('/etc/init.d/nginx restart') run("/etc/init.d/supervisor stop") time.sleep(5) run("/etc/init.d/supervisor start")
unknown
codeparrot/codeparrot-clean
#from .util import util from .. import subpackage
python
github
https://github.com/python/cpython
Lib/test/test_import/data/circular_imports/subpkg/subpackage2.py
import json import os import sys import urllib2 URL = 'https://tldr-bot.starbeamrainbowlabs.com/' def post_comment(pr_id, comment_body): # Constructing the url req = urllib2.Request(URL, json.dumps({'body': comment_body, 'pr_id': pr_id }), {'Content-Type': 'application/json'}) # Making the request f = urllib2.urlopen(req) if f.getcode() != 200: print f.read() # Get the environment variables PR_NUMBER = os.environ.get('TRAVIS_PULL_REQUEST') BUILD_ID = os.environ.get('TRAVIS_BUILD_ID') # Read the test result output from stdin test_result = sys.stdin.read().strip() # Populate the template text comment = ( "The [build]" "(https://travis-ci.org/tldr-pages/tldr/builds/{build_id})" " for this PR has failed with the following error(s):" "\n```\n" "{comment_body}" "\n```\n" "Please fix the error(s) and push again." ).format(build_id=BUILD_ID, comment_body=test_result) # If it's a PR, post a comment on it if PR_NUMBER != "false": post_comment(PR_NUMBER, comment)
unknown
codeparrot/codeparrot-clean
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import collections import itertools import re from ._structures import Infinity __all__ = [ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" ] _Version = collections.namedtuple( "_Version", ["epoch", "release", "dev", "pre", "post", "local"], ) def parse(version): """ Parse the given version string and return either a :class:`Version` object or a :class:`LegacyVersion` object depending on if the given version is a valid PEP 440 version or a legacy version. """ try: return Version(version) except InvalidVersion: return LegacyVersion(version) class InvalidVersion(ValueError): """ An invalid version was found, users should refer to PEP 440. """ class _BaseVersion(object): def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, _BaseVersion): return NotImplemented return method(self._key, other._key) class LegacyVersion(_BaseVersion): def __init__(self, version): self._version = str(version) self._key = _legacy_cmpkey(self._version) def __str__(self): return self._version def __repr__(self): return "<LegacyVersion({0})>".format(repr(str(self))) @property def public(self): return self._version @property def base_version(self): return self._version @property def local(self): return None @property def is_prerelease(self): return False @property def is_postrelease(self): return False _legacy_version_component_re = re.compile( r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, ) _legacy_version_replacement_map = { "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", } def _parse_version_parts(s): for part in _legacy_version_component_re.split(s): part = _legacy_version_replacement_map.get(part, part) if not part or part == ".": continue if part[:1] in "0123456789": # pad for numeric comparison yield part.zfill(8) else: yield "*" + part # ensure that alpha/beta/candidate are before final yield "*final" def _legacy_cmpkey(version): # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch # greater than or equal to 0. This will effectively put the LegacyVersion, # which uses the defacto standard originally implemented by setuptools, # as before all PEP 440 versions. epoch = -1 # This scheme is taken from pkg_resources.parse_version setuptools prior to # it's adoption of the packaging library. parts = [] for part in _parse_version_parts(version.lower()): if part.startswith("*"): # remove "-" before a prerelease tag if part < "*final": while parts and parts[-1] == "*final-": parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == "00000000": parts.pop() parts.append(part) parts = tuple(parts) return epoch, parts # Deliberately not anchored to the start and end of the string, to make it # easier for 3rd party code to reuse VERSION_PATTERN = r""" v? (?: (?:(?P<epoch>[0-9]+)!)? # epoch (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment (?P<pre> # pre-release [-_\.]? (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) [-_\.]? (?P<pre_n>[0-9]+)? )? (?P<post> # post release (?:-(?P<post_n1>[0-9]+)) | (?: [-_\.]? (?P<post_l>post|rev|r) [-_\.]? (?P<post_n2>[0-9]+)? ) )? (?P<dev> # dev release [-_\.]? (?P<dev_l>dev) [-_\.]? (?P<dev_n>[0-9]+)? )? ) (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version """ class Version(_BaseVersion): _regex = re.compile( r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE, ) def __init__(self, version): # Validate the version and parse it into pieces match = self._regex.search(version) if not match: raise InvalidVersion("Invalid version: '{0}'".format(version)) # Store the parsed out pieces of the version self._version = _Version( epoch=int(match.group("epoch")) if match.group("epoch") else 0, release=tuple(int(i) for i in match.group("release").split(".")), pre=_parse_letter_version( match.group("pre_l"), match.group("pre_n"), ), post=_parse_letter_version( match.group("post_l"), match.group("post_n1") or match.group("post_n2"), ), dev=_parse_letter_version( match.group("dev_l"), match.group("dev_n"), ), local=_parse_local_version(match.group("local")), ) # Generate a key which will be used for sorting self._key = _cmpkey( self._version.epoch, self._version.release, self._version.pre, self._version.post, self._version.dev, self._version.local, ) def __repr__(self): return "<Version({0})>".format(repr(str(self))) def __str__(self): parts = [] # Epoch if self._version.epoch != 0: parts.append("{0}!".format(self._version.epoch)) # Release segment parts.append(".".join(str(x) for x in self._version.release)) # Pre-release if self._version.pre is not None: parts.append("".join(str(x) for x in self._version.pre)) # Post-release if self._version.post is not None: parts.append(".post{0}".format(self._version.post[1])) # Development release if self._version.dev is not None: parts.append(".dev{0}".format(self._version.dev[1])) # Local version segment if self._version.local is not None: parts.append( "+{0}".format(".".join(str(x) for x in self._version.local)) ) return "".join(parts) @property def public(self): return str(self).split("+", 1)[0] @property def base_version(self): parts = [] # Epoch if self._version.epoch != 0: parts.append("{0}!".format(self._version.epoch)) # Release segment parts.append(".".join(str(x) for x in self._version.release)) return "".join(parts) @property def local(self): version_string = str(self) if "+" in version_string: return version_string.split("+", 1)[1] @property def is_prerelease(self): return bool(self._version.dev or self._version.pre) @property def is_postrelease(self): return bool(self._version.post) def _parse_letter_version(letter, number): if letter: # We consider there to be an implicit 0 in a pre-release if there is # not a numeral associated with it. if number is None: number = 0 # We normalize any letters to their lower case form letter = letter.lower() # We consider some words to be alternate spellings of other words and # in those cases we want to normalize the spellings to our preferred # spelling. if letter == "alpha": letter = "a" elif letter == "beta": letter = "b" elif letter in ["c", "pre", "preview"]: letter = "rc" elif letter in ["rev", "r"]: letter = "post" return letter, int(number) if not letter and number: # We assume if we are given a number, but we are not given a letter # then this is using the implicit post release syntax (e.g. 1.0-1) letter = "post" return letter, int(number) _local_version_seperators = re.compile(r"[\._-]") def _parse_local_version(local): """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ if local is not None: return tuple( part.lower() if not part.isdigit() else int(part) for part in _local_version_seperators.split(local) ) def _cmpkey(epoch, release, pre, post, dev, local): # When we compare a release version, we want to compare it with all of the # trailing zeros removed. So we'll use a reverse the list, drop all the now # leading zeros until we come to something non zero, then take the rest # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. release = tuple( reversed(list( itertools.dropwhile( lambda x: x == 0, reversed(release), ) )) ) # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. # We'll do this by abusing the pre segment, but we _only_ want to do this # if there is not a pre or a post segment. If we have one of those then # the normal sorting rules will handle this case correctly. if pre is None and post is None and dev is not None: pre = -Infinity # Versions without a pre-release (except as noted above) should sort after # those with one. elif pre is None: pre = Infinity # Versions without a post segment should sort before those with one. if post is None: post = -Infinity # Versions without a development segment should sort after those with one. if dev is None: dev = Infinity if local is None: # Versions without a local segment should sort before those with one. local = -Infinity else: # Versions with a local segment need that segment parsed to implement # the sorting rules in PEP440. # - Alpha numeric segments sort before numeric segments # - Alpha numeric segments sort lexicographically # - Numeric segments sort numerically # - Shorter versions sort before longer versions when the prefixes # match exactly local = tuple( (i, "") if isinstance(i, int) else (-Infinity, i) for i in local ) return epoch, release, pre, post, dev, local
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python """ Test one data file """ from numpy import * import sys import kernel import distance import classifier import clustering import distribution import regression import preprocessor from modshogun import Math_init_random SUPPORTED=['kernel', 'distance', 'classifier', 'clustering', 'distribution', 'regression', 'preprocessor'] def _get_name_fun (fnam): module=None for supported in SUPPORTED: if fnam.find(supported)>-1: module=supported break if module is None: print('Module required for %s not supported yet!' % fnam) return None return module+'.test' def _test_mfile (fnam): try: mfile=open(fnam, mode='r') except IOError as e: print(e) return False indata={} name_fun=_get_name_fun(fnam) if name_fun is None: return False for line in mfile: line=line.strip(" \t\n;") param = line.split('=')[0].strip() if param=='name': name=line.split('=')[1].strip().split("'")[1] indata[param]=name elif param=='kernel_symdata' or param=='kernel_data': indata[param]=_read_matrix(line) elif param.startswith('kernel_matrix') or \ param.startswith('distance_matrix'): indata[param]=_read_matrix(line) elif param.find('data_train')>-1 or param.find('data_test')>-1: # data_{train,test} might be prepended by 'subkernelX_' indata[param]=_read_matrix(line) elif param=='classifier_alphas' or param=='classifier_support_vectors': try: indata[param]=eval(line.split('=')[1]) except SyntaxError: # might be MultiClass SVM and hence matrix indata[param]=_read_matrix(line) elif param=='clustering_centers' or param=='clustering_pairs': indata[param]=_read_matrix(line) else: if (line.find("'")==-1): indata[param]=eval(line.split('=')[1]) else: indata[param]=line.split('=')[1].strip().split("'")[1] mfile.close() fun=eval(name_fun) # seed random to constant value used at data file's creation Math_init_random(indata['init_random']) random.seed(indata['init_random']) return fun(indata) def _read_matrix (line): try: str_line=(line.split('[')[1]).split(']')[0] except IndexError: str_line=(line.split('{')[1]).split('}')[0] lines=str_line.split(';') lis2d=list() for x in lines: lis=list() for y in x.split(','): y=y.replace("'","").strip() if(y.isalpha()): lis.append(y) else: if y.find('.')!=-1: lis.append(float(y)) else: try: lis.append(int(y)) except ValueError: # not int, RAWDNA? lis.append(y) lis2d.append(lis) return array(lis2d) for filename in sys.argv: if (filename.endswith('.m')): res=_test_mfile(filename) if res: sys.exit(0) else: sys.exit(1)
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import socket import sys from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import timeutils import six from cinder.i18n import _LE, _LW, _LI from cinder import utils netapp_lib = importutils.try_import('netapp_lib') if netapp_lib: from netapp_lib.api.zapi import zapi as netapp_api LOG = logging.getLogger(__name__) @six.add_metaclass(utils.TraceWrapperMetaclass) class Client(object): def __init__(self, **kwargs): self.connection = netapp_api.NaServer( host=kwargs['hostname'], transport_type=kwargs['transport_type'], port=kwargs['port'], username=kwargs['username'], password=kwargs['password']) def get_ontapi_version(self, cached=True): """Gets the supported ontapi version.""" if cached: return self.connection.get_api_version() ontapi_version = netapp_api.NaElement('system-get-ontapi-version') res = self.connection.invoke_successfully(ontapi_version, False) major = res.get_child_content('major-version') minor = res.get_child_content('minor-version') return major, minor def get_connection(self): return self.connection def check_is_naelement(self, elem): """Checks if object is instance of NaElement.""" if not isinstance(elem, netapp_api.NaElement): raise ValueError('Expects NaElement') def send_request(self, api_name, api_args=None, enable_tunneling=True): """Sends request to Ontapi.""" request = netapp_api.NaElement(api_name) if api_args: request.translate_struct(api_args) return self.connection.invoke_successfully(request, enable_tunneling) def create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None): """Issues API request for creating LUN on volume.""" path = '/vol/%s/%s' % (volume_name, lun_name) lun_create = netapp_api.NaElement.create_node_with_children( 'lun-create-by-size', **{'path': path, 'size': six.text_type(size), 'ostype': metadata['OsType'], 'space-reservation-enabled': metadata['SpaceReserved']}) if qos_policy_group_name: lun_create.add_new_child('qos-policy-group', qos_policy_group_name) try: self.connection.invoke_successfully(lun_create, True) except netapp_api.NaApiError as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error provisioning volume %(lun_name)s on " "%(volume_name)s. Details: %(ex)s"), {'lun_name': lun_name, 'volume_name': volume_name, 'ex': ex}) def destroy_lun(self, path, force=True): """Destroys the LUN at the path.""" lun_destroy = netapp_api.NaElement.create_node_with_children( 'lun-destroy', **{'path': path}) if force: lun_destroy.add_new_child('force', 'true') self.connection.invoke_successfully(lun_destroy, True) seg = path.split("/") LOG.debug("Destroyed LUN %s", seg[-1]) def map_lun(self, path, igroup_name, lun_id=None): """Maps LUN to the initiator and returns LUN id assigned.""" lun_map = netapp_api.NaElement.create_node_with_children( 'lun-map', **{'path': path, 'initiator-group': igroup_name}) if lun_id: lun_map.add_new_child('lun-id', lun_id) try: result = self.connection.invoke_successfully(lun_map, True) return result.get_child_content('lun-id-assigned') except netapp_api.NaApiError as e: code = e.code message = e.message LOG.warning(_LW('Error mapping LUN. Code :%(code)s, Message: ' '%(message)s'), {'code': code, 'message': message}) raise def unmap_lun(self, path, igroup_name): """Unmaps a LUN from given initiator.""" lun_unmap = netapp_api.NaElement.create_node_with_children( 'lun-unmap', **{'path': path, 'initiator-group': igroup_name}) try: self.connection.invoke_successfully(lun_unmap, True) except netapp_api.NaApiError as e: exc_info = sys.exc_info() LOG.warning(_LW("Error unmapping LUN. Code :%(code)s, Message: " "%(message)s"), {'code': e.code, 'message': e.message}) # if the LUN is already unmapped if e.code == '13115' or e.code == '9016': pass else: six.reraise(*exc_info) def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'): """Creates igroup with specified args.""" igroup_create = netapp_api.NaElement.create_node_with_children( 'igroup-create', **{'initiator-group-name': igroup, 'initiator-group-type': igroup_type, 'os-type': os_type}) self.connection.invoke_successfully(igroup_create, True) def add_igroup_initiator(self, igroup, initiator): """Adds initiators to the specified igroup.""" igroup_add = netapp_api.NaElement.create_node_with_children( 'igroup-add', **{'initiator-group-name': igroup, 'initiator': initiator}) self.connection.invoke_successfully(igroup_add, True) def do_direct_resize(self, path, new_size_bytes, force=True): """Resize the LUN.""" seg = path.split("/") LOG.info(_LI("Resizing LUN %s directly to new size."), seg[-1]) lun_resize = netapp_api.NaElement.create_node_with_children( 'lun-resize', **{'path': path, 'size': new_size_bytes}) if force: lun_resize.add_new_child('force', 'true') self.connection.invoke_successfully(lun_resize, True) def get_lun_geometry(self, path): """Gets the LUN geometry.""" geometry = {} lun_geo = netapp_api.NaElement("lun-get-geometry") lun_geo.add_new_child('path', path) try: result = self.connection.invoke_successfully(lun_geo, True) geometry['size'] = result.get_child_content("size") geometry['bytes_per_sector'] =\ result.get_child_content("bytes-per-sector") geometry['sectors_per_track'] =\ result.get_child_content("sectors-per-track") geometry['tracks_per_cylinder'] =\ result.get_child_content("tracks-per-cylinder") geometry['cylinders'] =\ result.get_child_content("cylinders") geometry['max_resize'] =\ result.get_child_content("max-resize-size") except Exception as e: LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s"), {'path': path, 'msg': e.message}) return geometry def get_volume_options(self, volume_name): """Get the value for the volume option.""" opts = [] vol_option_list = netapp_api.NaElement("volume-options-list-info") vol_option_list.add_new_child('volume', volume_name) result = self.connection.invoke_successfully(vol_option_list, True) options = result.get_child_by_name("options") if options: opts = options.get_children() return opts def move_lun(self, path, new_path): """Moves the LUN at path to new path.""" seg = path.split("/") new_seg = new_path.split("/") LOG.debug("Moving LUN %(name)s to %(new_name)s.", {'name': seg[-1], 'new_name': new_seg[-1]}) lun_move = netapp_api.NaElement("lun-move") lun_move.add_new_child("path", path) lun_move.add_new_child("new-path", new_path) self.connection.invoke_successfully(lun_move, True) def get_iscsi_target_details(self): """Gets the iSCSI target portal details.""" raise NotImplementedError() def get_fc_target_wwpns(self): """Gets the FC target details.""" raise NotImplementedError() def get_iscsi_service_details(self): """Returns iscsi iqn.""" raise NotImplementedError() def get_lun_list(self): """Gets the list of LUNs on filer.""" raise NotImplementedError() def get_igroup_by_initiators(self, initiator_list): """Get igroups exactly matching a set of initiators.""" raise NotImplementedError() def _has_luns_mapped_to_initiator(self, initiator): """Checks whether any LUNs are mapped to the given initiator.""" lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info') lun_list_api.add_new_child('initiator', initiator) result = self.connection.invoke_successfully(lun_list_api, True) lun_maps_container = result.get_child_by_name( 'lun-maps') or netapp_api.NaElement('none') return len(lun_maps_container.get_children()) > 0 def has_luns_mapped_to_initiators(self, initiator_list): """Checks whether any LUNs are mapped to the given initiator(s).""" for initiator in initiator_list: if self._has_luns_mapped_to_initiator(initiator): return True return False def get_lun_by_args(self, **args): """Retrieves LUNs with specified args.""" raise NotImplementedError() def provide_ems(self, requester, netapp_backend, app_version, server_type="cluster"): """Provide ems with volume stats for the requester. :param server_type: cluster or 7mode. """ def _create_ems(netapp_backend, app_version, server_type): """Create ems API request.""" ems_log = netapp_api.NaElement('ems-autosupport-log') host = socket.getfqdn() or 'Cinder_node' if server_type == "cluster": dest = "cluster node" else: dest = "7 mode controller" ems_log.add_new_child('computer-name', host) ems_log.add_new_child('event-id', '0') ems_log.add_new_child('event-source', 'Cinder driver %s' % netapp_backend) ems_log.add_new_child('app-version', app_version) ems_log.add_new_child('category', 'provisioning') ems_log.add_new_child('event-description', 'OpenStack Cinder connected to %s' % dest) ems_log.add_new_child('log-level', '6') ems_log.add_new_child('auto-support', 'false') return ems_log def _create_vs_get(): """Create vs_get API request.""" vs_get = netapp_api.NaElement('vserver-get-iter') vs_get.add_new_child('max-records', '1') query = netapp_api.NaElement('query') query.add_node_with_children('vserver-info', **{'vserver-type': 'node'}) vs_get.add_child_elem(query) desired = netapp_api.NaElement('desired-attributes') desired.add_node_with_children( 'vserver-info', **{'vserver-name': '', 'vserver-type': ''}) vs_get.add_child_elem(desired) return vs_get def _get_cluster_node(na_server): """Get the cluster node for ems.""" na_server.set_vserver(None) vs_get = _create_vs_get() res = na_server.invoke_successfully(vs_get) if (res.get_child_content('num-records') and int(res.get_child_content('num-records')) > 0): attr_list = res.get_child_by_name('attributes-list') vs_info = attr_list.get_child_by_name('vserver-info') vs_name = vs_info.get_child_content('vserver-name') return vs_name return None do_ems = True if hasattr(requester, 'last_ems'): sec_limit = 3559 if not (timeutils.is_older_than(requester.last_ems, sec_limit)): do_ems = False if do_ems: na_server = copy.copy(self.connection) na_server.set_timeout(25) ems = _create_ems(netapp_backend, app_version, server_type) try: if server_type == "cluster": api_version = na_server.get_api_version() if api_version: major, minor = api_version else: raise netapp_api.NaApiError( code='Not found', message='No API version found') if major == 1 and minor > 15: node = getattr(requester, 'vserver', None) else: node = _get_cluster_node(na_server) if node is None: raise netapp_api.NaApiError( code='Not found', message='No vserver found') na_server.set_vserver(node) else: na_server.set_vfiler(None) na_server.invoke_successfully(ems, True) LOG.debug("ems executed successfully.") except netapp_api.NaApiError as e: LOG.warning(_LW("Failed to invoke ems. Message : %s"), e) finally: requester.last_ems = timeutils.utcnow()
unknown
codeparrot/codeparrot-clean