code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
#!/usr/bin/env python
#
# Copyright (C) 2013 Strahinja Val Markovic <val@markovic.io>
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
from copy import deepcopy
import vim
DEFAULT_FILETYPE_TRIGGERS = {
'c' : ['->', '.'],
'objc' : ['->', '.'],
'ocaml' : ['.', '#'],
'cpp,objcpp' : ['->', '.', '::'],
'perl' : ['->'],
'php' : ['->', '::'],
'cs,java,javascript,d,vim,python,perl6,scala,vb,elixir,go' : ['.'],
'ruby' : ['.', '::'],
'lua' : ['.', ':'],
'erlang' : [':'],
}
def _FiletypeTriggerDictFromSpec( trigger_dict_spec ):
triggers_for_filetype = defaultdict( set )
for key, value in trigger_dict_spec.iteritems():
filetypes = key.split( ',' )
for filetype in filetypes:
triggers_for_filetype[ filetype ].update( value )
return triggers_for_filetype
def _FiletypeDictUnion( dict_one, dict_two ):
"""Returns a new filetye dict that's a union of the provided two dicts.
Dict params are supposed to be type defaultdict(set)."""
final_dict = deepcopy( dict_one )
for key, value in dict_two.iteritems():
final_dict[ key ].update( value )
return final_dict
def TriggersForFiletype():
user_triggers = _FiletypeTriggerDictFromSpec(
vim.eval( 'g:ycm_semantic_triggers' ) )
default_triggers = _FiletypeTriggerDictFromSpec(
DEFAULT_FILETYPE_TRIGGERS )
return _FiletypeDictUnion( default_triggers, user_triggers )
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Pygame unit test suite package
Exports function run()
A quick way to run the test suite package from the command line
is by importing the go submodule:
python -m "import pygame.tests" [<test options>]
Command line option --help displays a usage message. Available options
correspond to the pygame.tests.run arguments.
The xxxx_test submodules of the tests package are unit test suites for
individual parts of Pygame. Each can also be run as a main program. This is
useful if the test, such as cdrom_test, is interactive.
For Pygame development the test suite can be run from a Pygame distribution
root directory using run_tests.py. Alternately, test/__main__.py can be run
directly.
"""
if __name__ == 'pygame.tests':
from pygame.tests.test_utils.run_tests import run
elif __name__ == '__main__':
import os
import sys
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
if is_pygame_pkg:
import pygame.tests.__main__
else:
import test.__main__
else:
from test.test_utils.run_tests import run
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Views related to EdxNotes.
"""
from __future__ import absolute_import
import json
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.urls import reverse
from django.views.decorators.http import require_GET
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from opaque_keys.edx.keys import CourseKey
from rest_framework import permissions, status
from rest_framework.response import Response
from rest_framework.views import APIView
from six import text_type
from lms.djangoapps.courseware.courses import get_course_with_access
from lms.djangoapps.courseware.model_data import FieldDataCache
from lms.djangoapps.courseware.module_render import get_module_for_descriptor
from edxmako.shortcuts import render_to_response
from edxnotes.exceptions import EdxNotesParseError, EdxNotesServiceUnavailable
from edxnotes.helpers import (
DEFAULT_PAGE,
DEFAULT_PAGE_SIZE,
NoteJSONEncoder,
delete_all_notes_for_user,
get_course_position,
get_edxnotes_id_token,
get_notes,
is_feature_enabled
)
from openedx.core.djangoapps.user_api.accounts.permissions import CanRetireUser
from openedx.core.djangoapps.user_api.models import RetirementStateError, UserRetirementStatus
from util.json_request import JsonResponse, JsonResponseBadRequest
log = logging.getLogger(__name__)
@login_required
def edxnotes(request, course_id):
"""
Displays the EdxNotes page.
Arguments:
request: HTTP request object
course_id: course id
Returns:
Rendered HTTP response.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course, request.user):
raise Http404
notes_info = get_notes(request, course)
has_notes = (len(notes_info.get('results')) > 0)
context = {
"course": course,
"notes_endpoint": reverse("notes", kwargs={"course_id": course_id}),
"notes": notes_info,
"page_size": DEFAULT_PAGE_SIZE,
"debug": settings.DEBUG,
'position': None,
'disabled_tabs': settings.NOTES_DISABLED_TABS,
'has_notes': has_notes,
}
if not has_notes:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2
)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
position = get_course_position(course_module)
if position:
context.update({
'position': position,
})
return render_to_response("edxnotes/edxnotes.html", context)
@require_GET
@login_required
def notes(request, course_id):
"""
Notes view to handle list and search requests.
Query parameters:
page: page number to get
page_size: number of items in the page
text: text string to search. If `text` param is missing then get all the
notes for the current user for this course else get only those notes
which contain the `text` value.
Arguments:
request: HTTP request object
course_id: course id
Returns:
Paginated response as JSON. A sample response is below.
{
"count": 101,
"num_pages": 11,
"current_page": 1,
"results": [
{
"chapter": {
"index": 4,
"display_name": "About Exams and Certificates",
"location": "i4x://org/course/category/name@revision",
"children": [
"i4x://org/course/category/name@revision"
]
},
"updated": "Dec 09, 2015 at 09:31 UTC",
"tags": ["shadow","oil"],
"quote": "foo bar baz",
"section": {
"display_name": "edX Exams",
"location": "i4x://org/course/category/name@revision",
"children": [
"i4x://org/course/category/name@revision",
"i4x://org/course/category/name@revision",
]
},
"created": "2015-12-09T09:31:17.338305Z",
"ranges": [
{
"start": "/div[1]/p[1]",
"end": "/div[1]/p[1]",
"startOffset": 0,
"endOffset": 6
}
],
"user": "50cf92f9a3d8489df95e583549b919df",
"text": "first angry height hungry structure",
"course_id": "edx/DemoX/Demo",
"id": "1231",
"unit": {
"url": "/courses/edx%2FDemoX%2FDemo/courseware/1414ffd5143b4b508f739b563ab468b7/workflow/1",
"display_name": "EdX Exams",
"location": "i4x://org/course/category/name@revision"
},
"usage_id": "i4x://org/course/category/name@revision"
} ],
"next": "http://0.0.0.0:8000/courses/edx%2FDemoX%2FDemo/edxnotes/notes/?page=2&page_size=10",
"start": 0,
"previous": null
}
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
if not is_feature_enabled(course, request.user):
raise Http404
page = request.GET.get('page') or DEFAULT_PAGE
page_size = request.GET.get('page_size') or DEFAULT_PAGE_SIZE
text = request.GET.get('text')
try:
notes_info = get_notes(
request,
course,
page=page,
page_size=page_size,
text=text
)
except (EdxNotesParseError, EdxNotesServiceUnavailable) as err:
return JsonResponseBadRequest({"error": text_type(err)}, status=500)
return HttpResponse(json.dumps(notes_info, cls=NoteJSONEncoder), content_type="application/json")
# pylint: disable=unused-argument
@login_required
def get_token(request, course_id):
"""
Get JWT ID-Token, in case you need new one.
"""
return HttpResponse(get_edxnotes_id_token(request.user), content_type='text/plain')
@login_required
def edxnotes_visibility(request, course_id):
"""
Handle ajax call from "Show notes" checkbox.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
field_data_cache = FieldDataCache([course], course_key, request.user)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
if not is_feature_enabled(course, request.user):
raise Http404
try:
visibility = json.loads(request.body.decode('utf8'))["visibility"]
course_module.edxnotes_visibility = visibility
course_module.save()
return JsonResponse(status=200)
except (ValueError, KeyError):
log.warning(
u"Could not decode request body as JSON and find a boolean visibility field: '%s'", request.body
)
return JsonResponseBadRequest()
class RetireUserView(APIView):
"""
**Use Cases**
A superuser or the user with the username specified by settings.RETIREMENT_SERVICE_WORKER_USERNAME can "retire"
the user's data from the edx-notes-api (aka. Edxnotes) service, which will delete all notes (aka. annotations)
the user has made.
**Example Requests**
* POST /api/edxnotes/v1/retire_user/
{
"username": "an_original_username"
}
**Example Response**
* HTTP 204 with empty body, indicating success.
* HTTP 404 with empty body. This can happen when:
- The requested user does not exist in the retirement queue.
* HTTP 405 (Method Not Allowed) with error message. This can happen when:
- RetirementStateError is thrown: the user is currently in a retirement state which cannot be acted on, such
as a terminal or completed state.
* HTTP 500 with error message. This can happen when:
- EdxNotesServiceUnavailable is thrown: the edx-notes-api IDA is not available.
"""
authentication_classes = (JwtAuthentication,)
permission_classes = (permissions.IsAuthenticated, CanRetireUser)
def post(self, request):
"""
Implements the retirement endpoint.
"""
username = request.data['username']
try:
retirement = UserRetirementStatus.get_retirement_for_retirement_action(username)
delete_all_notes_for_user(retirement.user)
except UserRetirementStatus.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
except RetirementStateError as exc:
return Response(text_type(exc), status=status.HTTP_405_METHOD_NOT_ALLOWED)
except Exception as exc: # pylint: disable=broad-except
return Response(text_type(exc), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(status=status.HTTP_204_NO_CONTENT)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --strict.
Tests errors that can be thrown by gjslint when in strict mode.
"""
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import erroraccumulator
flags.FLAGS.strict = True
class StrictTest(unittest.TestCase):
"""Tests scenarios where strict generates warnings."""
def testUnclosedString(self):
"""Tests warnings are reported when nothing is disabled.
b/11450054.
"""
original = [
'bug = function() {',
' (\'foo\'\');',
'};',
'',
]
expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING,
errors.FILE_IN_BLOCK]
self._AssertErrors(original, expected)
def _AssertErrors(self, original, expected_errors):
"""Asserts that the error fixer corrects original to expected."""
# Trap gjslint's output parse it to get messages added.
error_accumulator = erroraccumulator.ErrorAccumulator()
runner.Run('testing.js', error_accumulator, source=original)
error_nums = [e.code for e in error_accumulator.GetErrors()]
error_nums.sort()
expected_errors.sort()
self.assertListEqual(error_nums, expected_errors)
if __name__ == '__main__':
googletest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"fmt"
"math/rand"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
func randStringBytes(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return string(b)
}
func TestLogSymLink(t *testing.T) {
as := assert.New(t)
containerLogsDir := "/foo/bar"
podFullName := randStringBytes(128)
containerName := randStringBytes(70)
containerID := randStringBytes(80)
// The file name cannot exceed 255 characters. Since .log suffix is required, the prefix cannot exceed 251 characters.
expectedPath := filepath.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s", podFullName, containerName, containerID)[:251]+".log")
as.Equal(expectedPath, logSymlink(containerLogsDir, podFullName, containerName, containerID))
}
func TestLegacyLogSymLink(t *testing.T) {
as := assert.New(t)
containerID := randStringBytes(80)
containerName := randStringBytes(70)
podName := randStringBytes(128)
podNamespace := randStringBytes(10)
// The file name cannot exceed 255 characters. Since .log suffix is required, the prefix cannot exceed 251 characters.
expectedPath := filepath.Join(legacyContainerLogsDir, fmt.Sprintf("%s_%s_%s-%s", podName, podNamespace, containerName, containerID)[:251]+".log")
as.Equal(expectedPath, legacyLogSymlink(containerID, containerName, podName, podNamespace))
}
func TestGetContainerIDFromLegacyLogSymLink(t *testing.T) {
containerID := randStringBytes(80)
containerName := randStringBytes(70)
podName := randStringBytes(128)
podNamespace := randStringBytes(10)
for _, test := range []struct {
name string
logSymLink string
expected string
shouldError bool
}{
{
name: "unable to find separator",
logSymLink: "dummy.log",
expected: "",
shouldError: true,
},
{
name: "invalid suffix",
logSymLink: filepath.Join(legacyContainerLogsDir, fmt.Sprintf("%s_%s_%s-%s", podName, podNamespace, containerName, containerID)[:251]+".invalidsuffix"),
expected: "",
shouldError: true,
},
{
name: "container ID too short",
logSymLink: filepath.Join(legacyContainerLogsDir, fmt.Sprintf("%s_%s_%s-%s", podName, podNamespace, containerName, containerID[:5])+".log"),
expected: "",
shouldError: true,
},
{
name: "valid path",
logSymLink: filepath.Join(legacyContainerLogsDir, fmt.Sprintf("%s_%s_%s-%s", podName, podNamespace, containerName, containerID)[:251]+".log"),
expected: containerID[:40],
shouldError: false,
},
} {
t.Run(test.name, func(t *testing.T) {
containerID, err := getContainerIDFromLegacyLogSymlink(test.logSymLink)
if test.shouldError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, test.expected, containerID)
})
}
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/kubelet/kuberuntime/legacy_test.go
|
base_suite: replica_sets_jscore_passthrough_base
overrides:
- "replica_sets.jscore_passthrough_with_system_buckets_hook"
|
unknown
|
github
|
https://github.com/mongodb/mongo
|
buildscripts/resmokeconfig/matrix_suites/mappings/replica_sets_jscore_passthrough.yml
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gperftools(AutotoolsPackage):
"""Google's fast malloc/free implementation, especially for
multi-threaded applications. Contains tcmalloc, heap-checker,
heap-profiler, and cpu-profiler.
"""
homepage = "https://code.google.com/p/gperftools"
url = "https://googledrive.com/host/0B6NtGsLhIcf7MWxMMF9JdTN3UVk/gperftools-2.3.tar.gz"
version('2.4', '2171cea3bbe053036fb5d5d25176a160',
url="https://github.com/gperftools/gperftools/releases/download/gperftools-2.4/gperftools-2.4.tar.gz")
version('2.3', 'f54dd119f0e46ac1f13264f8d97adf90',
url="https://googledrive.com/host/0B6NtGsLhIcf7MWxMMF9JdTN3UVk/gperftools-2.3.tar.gz")
depends_on("libunwind")
|
unknown
|
codeparrot/codeparrot-clean
| ||
import django
from django import forms
from django.db import models
from crispy_forms.helper import FormHelper
class TestForm(forms.Form):
is_company = forms.CharField(label="company", required=False, widget=forms.CheckboxInput())
email = forms.EmailField(label="email", max_length=30, required=True, widget=forms.TextInput(), help_text="Insert your email")
password1 = forms.CharField(label="password", max_length=30, required=True, widget=forms.PasswordInput())
password2 = forms.CharField(label="re-enter password", max_length=30, required=True, widget=forms.PasswordInput())
first_name = forms.CharField(label="first name", max_length=5, required=True, widget=forms.TextInput())
last_name = forms.CharField(label="last name", max_length=5, required=True, widget=forms.TextInput())
datetime_field = forms.DateTimeField(label="date time", widget=forms.SplitDateTimeWidget())
def clean(self):
super(TestForm, self).clean()
password1 = self.cleaned_data.get('password1', None)
password2 = self.cleaned_data.get('password2', None)
if not password1 and not password2 or password1 != password2:
raise forms.ValidationError("Passwords dont match")
return self.cleaned_data
class TestForm2(TestForm):
def __init__(self, *args, **kwargs):
super(TestForm2, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
class CheckboxesTestForm(forms.Form):
checkboxes = forms.MultipleChoiceField(
choices=(
(1, "Option one"),
(2, "Option two"),
(3, "Option three")
),
initial=(1,),
widget=forms.CheckboxSelectMultiple,
)
alphacheckboxes = forms.MultipleChoiceField(
choices=(
('option_one', "Option one"),
('option_two', "Option two"),
('option_three', "Option three")
),
initial=('option_two', 'option_three'),
widget=forms.CheckboxSelectMultiple,
)
numeric_multiple_checkboxes = forms.MultipleChoiceField(
choices=(
(1, "Option one"),
(2, "Option two"),
(3, "Option three")
),
initial=(1, 2),
widget=forms.CheckboxSelectMultiple,
)
inline_radios = forms.ChoiceField(
choices=(
('option_one', "Option one"),
('option_two', "Option two"),
),
widget=forms.RadioSelect,
initial='option_two',
)
class CrispyTestModel(models.Model):
email = models.CharField(max_length=20)
password = models.CharField(max_length=20)
class TestForm3(forms.ModelForm):
class Meta:
model = CrispyTestModel
fields = ['email', 'password']
exclude = ['password']
def __init__(self, *args, **kwargs):
super(TestForm3, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
class TestForm4(forms.ModelForm):
class Meta:
"""
before Django1.6, one cannot use __all__ shortcut for fields
without getting the following error:
django.core.exceptions.FieldError: Unknown field(s) (a, l, _) specified for CrispyTestModel
because obviously it casts the string to a set
"""
model = CrispyTestModel
if django.VERSION >= (1, 6):
fields = '__all__' # eliminate RemovedInDjango18Warning
class TestForm5(forms.Form):
choices = [
(1, 1),
(2, 2),
(1000, 1000),
]
checkbox_select_multiple = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
choices=choices
)
radio_select = forms.ChoiceField(
widget=forms.RadioSelect,
choices=choices
)
pk = forms.IntegerField()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import random
import re
import urllib
import urlparse
from resources.lib.modules import client
from resources.lib.modules import utils
def request(url, check, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, XHR=False, limit=None, referer=None, cookie=None, compression=True, output='', timeout='30'):
try:
r = client.request(url, close=close, redirect=redirect, proxy=proxy, post=post, headers=headers, mobile=mobile, XHR=XHR, limit=limit, referer=referer, cookie=cookie, compression=compression, output=output, timeout=timeout)
if r is not None and error is not False: return r
if check in str(r) or str(r) == '': return r
proxies = sorted(get(), key=lambda x: random.random())
proxies = sorted(proxies, key=lambda x: random.random())
proxies = proxies[:3]
for p in proxies:
p += urllib.quote_plus(url)
if post is not None:
if isinstance(post, dict):
post = utils.byteify(post)
post = urllib.urlencode(post)
p += urllib.quote_plus('?%s' % post)
r = client.request(p, close=close, redirect=redirect, proxy=proxy, headers=headers, mobile=mobile, XHR=XHR, limit=limit, referer=referer, cookie=cookie, compression=compression, output=output, timeout='20')
if check in str(r) or str(r) == '': return r
except:
pass
def geturl(url):
try:
r = client.request(url, output='geturl')
if r is None: return r
host1 = re.findall('([\w]+)[.][\w]+$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host2 = re.findall('([\w]+)[.][\w]+$', urlparse.urlparse(r.strip().lower()).netloc)[0]
if host1 == host2: return r
proxies = sorted(get(), key=lambda x: random.random())
proxies = sorted(proxies, key=lambda x: random.random())
proxies = proxies[:3]
for p in proxies:
p += urllib.quote_plus(url)
r = client.request(p, output='geturl')
if r is not None: return parse(r)
except:
pass
def parse(url):
try: url = client.replaceHTMLCodes(url)
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
return url
def get():
return [
'https://www.3proxy.us/index.php?hl=2e5&q=',
'https://www.4proxy.us/index.php?hl=2e5&q=',
'http://www.xxlproxy.com/index.php?hl=3e4&q=',
'http://free-proxyserver.com/browse.php?b=20&u=',
'http://proxite.net/browse.php?b=20&u=',
'http://proxydash.com/browse.php?b=20&u=',
'http://webproxy.stealthy.co/browse.php?b=20&u=',
'http://sslpro.eu/browse.php?b=20&u=',
'http://webtunnel.org/browse.php?b=20&u=',
'http://proxycloud.net/browse.php?b=20&u=',
'http://sno9.com/browse.php?b=20&u=',
'http://www.onlineipchanger.com/browse.php?b=20&u=',
'http://www.pingproxy.com/browse.php?b=20&u=',
'https://www.ip123a.com/browse.php?b=20&u=',
'http://buka.link/browse.php?b=20&u=',
'https://zend2.com/open18.php?b=20&u=',
'http://proxy.deals/browse.php?b=20&u=',
'http://freehollandproxy.com/browse.php?b=20&u=',
'http://proxy.rocks/browse.php?b=20&u=',
'http://proxy.discount/browse.php?b=20&u=',
'http://proxy.lgbt/browse.php?b=20&u=',
'http://proxy.vet/browse.php?b=20&u=',
'http://www.unblockmyweb.com/browse.php?b=20&u=',
'http://onewebproxy.com/browse.php?b=20&u=',
'http://pr0xii.com/browse.php?b=20&u=',
'http://mlproxy.science/surf.php?b=20&u=',
'https://www.prontoproxy.com/browse.php?b=20&u=',
'http://fproxy.net/browse.php?b=20&u=',
#'http://www.ruby-group.xyz/browse.php?b=20&u=',
#'http://securefor.com/browse.php?b=20&u=',
#'http://www.singleclick.info/browse.php?b=20&u=',
#'http://www.socialcommunication.xyz/browse.php?b=20&u=',
#'http://www.theprotected.xyz/browse.php?b=20&u=',
#'http://www.highlytrustedgroup.xyz/browse.php?b=20&u=',
#'http://www.medicalawaregroup.xyz/browse.php?b=20&u=',
#'http://www.proxywebsite.us/browse.php?b=20&u=',
'http://www.mybriefonline.xyz/browse.php?b=20&u=',
'http://www.navigate-online.xyz/browse.php?b=20&u='
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
//// [tests/cases/conformance/es6/computedProperties/computedPropertyNames27_ES5.ts] ////
//// [computedPropertyNames27_ES5.ts]
class Base {
}
class C extends Base {
[(super(), "prop")]() { }
}
//// [computedPropertyNames27_ES5.js]
"use strict";
class Base {
}
class C extends Base {
[(super(), "prop")]() { }
}
|
javascript
|
github
|
https://github.com/microsoft/TypeScript
|
tests/baselines/reference/computedPropertyNames27_ES5(target=es2015).js
|
#This module will call on other functions for the character creation process
#imported libraries
import RandomDiceNumbers as rdn
import CharacterCreation as cc
import MenuSelection as ms
import os.path
import CharacterNameTest as cnt
import FirstMenu as fm
import CharacterClasses as ccs
import CharacterStats as cs
import Attributes as att
import sys
#The main function
def creation(option_1):
#naming the new character
count_2 = 10
while count_2 > 0:
if option_1 == 1:
character_name, flag_1 = cc.new_char()
#forces break if player types 'back'
if flag_1 == 999 or flag_1 == 0: #user typed 'back' or selected 'no' or 'NO' in cnt
break
elif flag_1 == 750: #does not meet name requirements
print("\n\n ---That is not a valid entry.---")
count_2 -= 1
count_3 = 10
while count_3 > 0:
#Character creating process
race, flag_3 = cc.race(character_name, count_3) #choosing race
if flag_3 == 999:
break
count_4 = 10
while count_4 > 0:
char_class, flag_4 = cc.class_sel(race, character_name) #choosing class
if flag_4 == 999:
break
count_5 = 10
while count_5 > 0:
attribute_lst, flag_5 = att.get_att(race, char_class, character_name)
if flag_5 == 999:
break
elif flag_5 == 1:
attributes = []
for a in attribute_lst:
attributes.append(a)
strength = attributes[0]
dexterity = attributes[1]
constitution = attributes[2]
intelligence = attributes[3]
wisdom = attributes[4]
charisma = attributes[5]
#Strength specific stats
str_adj_list = cc.str_abil_adj(char_class, strength)
hit_probability = str_adj_list[0]
damage_adjustment = str_adj_list[1]
weight_allowance = str_adj_list[2]
open_doors = str_adj_list[3]
bend_bars__lift_gates = str_adj_list[4]
exp_bonus = str_adj_list[5]
max_press = weight_allowance
if len(str_adj_list) == 10:
hit_probability = str_adj_list[0]
damage_adjustment = str_adj_list[1]
weight_allowance = str_adj_list[2]
open_doors = str_adj_list[3]
bend_bars__lift_gates = str_adj_list[4]
locked_door = str_adj_list[5]
barred_door = str_adj_list[6]
magically_held_door = str_adj_list[7]
wizard_locked_door = str_adj_list[8]
exp_bonus = str_adj_list[9]
#Wisdom specific stat adjustments
magic_attack_adjustment = cc.wis_abil_adj(wisdom)
#Dexterity specific stat adjustments
dex_adj_list = cc.dex_abil_adj(dexterity)
reaction__attacking_adjustment = dex_adj_list[0]
defensive_adjustment = dex_adj_list[1]
#Constitution specific stat adjustment
con_adj_list = cc.con_abil_adj(constitution, char_class)
hp_adj = con_adj_list[0]
system_shock_survival = con_adj_list[1]
resurrection_survival = con_adj_list[2]
#Charisma specific stat adjustment
cha_adj_list = cc.cha_abil_adj(charisma)
max_num_henchman = cha_adj_list[0]
loyalty_base = cha_adj_list[1]
reaction_adj = cha_adj_list[2]
#Starting Hit Points
hit_points = cc.starting_hp(char_class, hp_adj)
#Starting gold
g_p = cc.starting_gold()
c_p = 0
s_p = 0
e_p = 0
p_p = 0
#Starting age
age = cc.age(race, char_class)
#Saving Throws
paralyzation_poison_death_magic = 0
rod_staff_wand = 0
petrification_polymorph = 0
breath_weapon = 0
spell = 0
#Starting Equipment
body_armor = ''
head_gear = ''
shield = ''
#Starting experience
lvl = 1
experience_points = 0
#Armor class
armor_class_base = 10
armor_class = armor_class_base
count_6 = 10
while count_6 > 0:
secondary_skill = cc.sec_skill()
secondary_skill_2 = 'N/A'
if len(secondary_skill) == 2:
secondary_skill = secondary_skill[0]
secondary_skill_2 = secondary_skill[1]
#Writing information to a file
sex = 'Unknown'
height = {"ft.": 5, "in.": 11}
weight = 170
hair = 'Unknown'
eyes = 'Unknown'
skin = 'Unknown'
surprise = 0
system_shock = 0
death_max = 0
deaths_to_date = 0
experience = 0
max_henchman = 0
reaction_adjustment = 0
stat_list = [c_p, s_p, e_p, g_p, p_p,\
paralyzation_poison_death_magic,\
rod_staff_wand,\
petrification_polymorph,\
breath_weapon, spell,\
body_armor, head_gear, shield,\
character_name, char_class, age, sex,\
height, weight, hair, eyes, skin,\
hit_points, experience, max_press,\
open_doors, bend_bars__lift_gates,\
surprise, system_shock, max_henchman,\
loyalty_base, reaction_adjustment,\
death_max, deaths_to_date, resurrection_survival,\
strength, dexterity, constitution, intelligence,\
wisdom, charisma, armor_class_base, armor_class, race,
secondary_skill, secondary_skill_2
]
ccs.new_char(character_name, stat_list)
count_2 = 0
count_3 = 0
count_4 = 0
count_5 = 0
count_6 = 0
return
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package acceptance
import (
"context"
"crypto/rand"
"fmt"
"io/fs"
"math/big"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/acceptance/cluster"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/build/bazel"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/containerd/containerd/platforms"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
)
func defaultContainerConfig() container.Config {
return container.Config{
Image: acceptanceImage,
Env: []string{
fmt.Sprintf("PGUSER=%s", username.RootUser),
fmt.Sprintf("PGPORT=%s", base.DefaultPort),
"PGSSLCERT=/certs/client.root.crt",
"PGSSLKEY=/certs/client.root.key",
},
Entrypoint: []string{"autouseradd", "-u", "roach", "-C", "/home/roach", "--"},
}
}
// testDockerFail ensures the specified docker cmd fails.
func testDockerFail(ctx context.Context, t *testing.T, name string, cmd []string) {
containerConfig := defaultContainerConfig()
containerConfig.Cmd = cmd
if err := testDockerSingleNode(ctx, t, name, containerConfig); err == nil {
t.Error("expected failure")
}
}
// testDockerSuccess ensures the specified docker cmd succeeds.
func testDockerSuccess(ctx context.Context, t *testing.T, name string, cmd []string) {
containerConfig := defaultContainerConfig()
containerConfig.Cmd = cmd
if err := testDockerSingleNode(ctx, t, name, containerConfig); err != nil {
t.Error(err)
}
}
const (
// Iterating against a locally built version of the docker image can be done
// by changing acceptanceImage to the hash of the container.
acceptanceImage = "us-east1-docker.pkg.dev/crl-ci-images/cockroach/acceptance:20250612-132728"
)
func testDocker(
ctx context.Context, t *testing.T, num int, name string, containerConfig container.Config,
) error {
maybeSkipTest(t)
var err error
RunDocker(t, func(t *testing.T) {
var pwd string
pwd, err = os.Getwd()
if err != nil {
return
}
testdataDir := filepath.Join(pwd, "testdata")
if bazel.BuiltWithBazel() {
testdataDir, err = os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
// Copy runfiles symlink content to a temporary directory to avoid broken symlinks in docker.
err = copyRunfiles("testdata", testdataDir)
if err != nil {
t.Fatal(err)
}
defer func() {
_ = os.RemoveAll(testdataDir)
}()
}
hostConfig := container.HostConfig{
NetworkMode: "host",
Binds: []string{testdataDir + ":/mnt/data"},
}
if bazel.BuiltWithBazel() {
interactivetestsDir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
// Copy runfiles symlink content to a temporary directory to avoid broken symlinks in docker.
err = copyRunfiles("../cli/interactive_tests", interactivetestsDir)
if err != nil {
t.Fatal(err)
}
defer func() {
_ = os.RemoveAll(interactivetestsDir)
}()
hostConfig.Binds = append(hostConfig.Binds, interactivetestsDir+":/mnt/interactive_tests")
}
// Add a randomID to the container name to avoid overlap between tests running on
// different shards.
var nBig *big.Int
nBig, err = rand.Int(rand.Reader, big.NewInt(10000000))
if err != nil {
t.Fatal(err)
}
n := nBig.Int64()
name = name + "-" + strconv.Itoa(int(n))
// Prepare the docker cluster.
// We need to do this "under" the directory preparation above so as
// to prevent the test from crashing because the directory gets
// deleted before the container shutdown assertions get a chance to run.
cfg := cluster.TestConfig{
Name: name,
Duration: *flagDuration,
}
for i := 0; i < num; i++ {
cfg.Nodes = append(cfg.Nodes, cluster.NodeConfig{Stores: []cluster.StoreConfig{{}}})
}
l := StartCluster(ctx, t, cfg).(*cluster.DockerCluster)
var preserveLogs bool
defer func() {
// Check the final health of the cluster nodes and
// stop the cluster after that.
l.AssertAndStop(ctx, t)
// Note: we must be careful to clean up the volumes *after*
// the cluster has been shut down (in the `AssertAndStop` call).
// Otherwise, the directory removal will cause the cluster nodes
// to crash and report abnormal termination, even when the test
// succeeds otherwise.
log.Dev.Infof(ctx, "cleaning up docker volume")
l.Cleanup(ctx, preserveLogs)
}()
if len(l.Nodes) > 0 {
containerConfig.Env = append(containerConfig.Env, "PGHOST="+l.Hostname(0))
}
log.Dev.Infof(ctx, "starting one-shot container")
err = l.OneShot(
ctx, acceptanceImage, types.ImagePullOptions{}, containerConfig, hostConfig,
platforms.DefaultSpec(), "docker-"+name,
)
log.Dev.Infof(ctx, "one-shot container terminated: %v", err)
preserveLogs = err != nil
})
return err
}
// Bazel uses symlinks in the runfiles directory. If a directory with symlinks is mounted inside a docker container,
// the symlinks point to not existing destination.
// This function copies the content of the symlinks to another directory,
// so the files can be used inside a docker container. The caller function is responsible for cleaning up.
// This function doesn't copy the original file permissions and uses 755 for directories and files.
func copyRunfiles(source, destination string) error {
return filepath.WalkDir(source, func(path string, dirEntry fs.DirEntry, walkErr error) error {
if walkErr != nil {
return walkErr
}
relPath := strings.Replace(path, source, "", 1)
if relPath == "" {
return nil
}
if dirEntry.IsDir() {
return os.Mkdir(filepath.Join(destination, relPath), 0755)
}
data, err := os.ReadFile(filepath.Join(source, relPath))
if err != nil {
return err
}
return os.WriteFile(filepath.Join(destination, relPath), data, 0755)
})
}
func testDockerSingleNode(
ctx context.Context, t *testing.T, name string, containerConfig container.Config,
) error {
return testDocker(ctx, t, 1, name, containerConfig)
}
func testDockerOneShot(
ctx context.Context, t *testing.T, name string, containerConfig container.Config,
) error {
return testDocker(ctx, t, 0, name, containerConfig)
}
var cmdBase = []string{
"/usr/bin/env",
"COCKROACH_SKIP_UPDATE_CHECK=1",
"COCKROACH_CRASH_REPORTS=",
// Disable metamorphic testing for acceptance tests, since they are
// end-to-end tests and metamorphic constants can make them too slow.
"COCKROACH_INTERNAL_DISABLE_METAMORPHIC_TESTING=true",
"/bin/bash",
"-c",
}
func runTestDockerCLI(t *testing.T, testNameSuffix, testFilePath string) {
containerConfig := defaultContainerConfig()
containerConfig.Cmd = []string{"stat", cluster.CockroachBinaryInContainer}
containerConfig.Env = []string{
"CI=1", // Disables the initial color query by the termenv library.
fmt.Sprintf("PGUSER=%s", username.RootUser),
fmt.Sprintf("COCKROACH_DEV_LICENSE=%s", envutil.EnvOrDefaultString("COCKROACH_DEV_LICENSE", "")),
}
ctx := context.Background()
if err := testDockerOneShot(ctx, t, "cli_test_"+testNameSuffix, containerConfig); err != nil {
skip.IgnoreLintf(t, "TODO(dt): No binary in one-shot container, see #6086: %s", err)
}
containerPath := "/go/src/github.com/cockroachdb/cockroach/cli/interactive_tests"
if bazel.BuiltWithBazel() {
containerPath = "/mnt/interactive_tests"
}
testFile := filepath.Base(testFilePath)
testPath := filepath.Join(containerPath, testFile)
t.Run(testFile, func(t *testing.T) {
log.Dev.Infof(ctx, "-- starting tests from: %s", testFile)
// Symlink the logs directory to /logs, which is visible outside of the
// container and preserved if the test fails. (They don't write to /logs
// directly because they are often run manually outside of Docker, where
// /logs is unlikely to exist.)
cmd := "ln -s /logs logs"
// We run the expect command using 'bash -c "(expect ...)"'.
//
// We cannot run "expect" directly, nor "bash -c 'expect ...'",
// because both cause Expect to become the PID 1 process inside
// the container. On Unix, orphan processes need to be wait()ed
// upon by the PID 1 process when they terminate, lest they
// remain forever in the zombie state. Unfortunately, Expect
// does not contain code to do this. Bash does.
cmd += "; (expect -d -f " + testPath + " " + cluster.CockroachBinaryInContainer + ")"
containerConfig.Cmd = append(cmdBase, cmd)
if err := testDockerOneShot(ctx, t, "cli_test_"+testNameSuffix, containerConfig); err != nil {
t.Error(err)
}
})
}
|
go
|
github
|
https://github.com/cockroachdb/cockroach
|
pkg/acceptance/util_docker.go
|
#
# MicroHH
# Copyright (c) 2011-2020 Chiel van Heerwaarden
# Copyright (c) 2011-2020 Thijs Heus
# Copyright (c) 2014-2020 Bart van Stratum
#
# This file is part of MicroHH
#
# MicroHH is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MicroHH is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MicroHH. If not, see <http://www.gnu.org/licenses/>.
#
import netCDF4 as nc
import numpy as np
import struct as st
import glob
import re
import subprocess
import importlib
import shutil
import os
import sys
import filecmp
import timeit
import csv
import copy
import datetime
import itertools
from copy import deepcopy
# -------------------------
# General help functions
# -------------------------
def _int_or_float_or_str(value):
""" Helper function: convert a string to int/float/str """
try:
if ('.' in value):
return float(value)
else:
return int(float(value))
except BaseException:
return value.rstrip()
def _convert_value(value):
""" Helper function: convert namelist value or list """
if ',' in value:
value = value.split(',')
return [_int_or_float_or_str(val) for val in value]
else:
return _int_or_float_or_str(value)
def _find_namelist_file():
""" Helper function: automatically find the .ini file in the current directory """
namelist_file = glob.glob('*.ini')
if len(namelist_file) == 0:
raise RuntimeError(
'Can\'t find any .ini files in the current directory!')
if len(namelist_file) > 1:
raise RuntimeError(
'There are multiple .ini files: {}'.format(namelist_file))
else:
return namelist_file[0]
# -------------------------
# Classes and functions to read and write MicroHH things
# -------------------------
class Read_namelist:
""" Reads a MicroHH .ini file to memory
All available variables are accessible as e.g.:
nl = Read_namelist() # with no name specified, it searches for a .ini file in the current dir
itot = nl['grid']['itot']
enttime = nl['time']['endtime']
"""
def __init__(self, namelist_file=None, ducktype=True):
if (namelist_file is None):
namelist_file = _find_namelist_file()
self.groups = {} # Dictionary holding all the data
with open(namelist_file) as f:
for line in f:
lstrip = line.strip()
if (len(lstrip) > 0 and lstrip[0] != "#"):
if lstrip[0] == '[' and lstrip[-1] == ']':
curr_group_name = lstrip[1:-1]
self.groups[curr_group_name] = {}
elif ("=" in line):
var_name = lstrip.split('=')[0]
value = lstrip.split('=')[1]
if ducktype:
value = _convert_value(value)
self.groups[curr_group_name][var_name] = value
def __getitem__(self, name):
"""
Get group dictionary with `nl['group_name']` syntax
"""
if name in self.groups.keys():
return self.groups[name]
else:
raise RuntimeError(
'Can\'t find group \"{}\" in .ini file'.format(name))
def __repr__(self):
"""
Print list of availabe groups
"""
return 'Available groups:\n{}'.format(', '.join(self.groups.keys()))
def set_value(self, group, variable, value):
"""
Set value in namelist file/dict, if the group or
variable does not exist, it is newly defined
"""
if group not in self.groups:
self.groups[group] = {}
self.groups[group][variable] = value
def save(self, namelist_file, allow_overwrite=False):
"""
Write namelist from (nested) dictionary back to .ini file
"""
if os.path.exists(namelist_file) and not allow_overwrite:
raise Exception('.ini file \"{}\" already exists!'.format(namelist_file))
with open(namelist_file, 'w') as f:
for group in self.groups:
f.write('[{}]\n'.format(group))
for variable, value in self.groups[group].items():
f.write('{}={}\n'.format(variable, value))
f.write('\n')
def replace_namelist_value(item, new_value, group=None, namelist_file=None):
""" Replace a item value in an existing namelist """
if namelist_file is None:
namelist_file = _find_namelist_file()
with open(namelist_file, "r") as source:
lines = source.readlines()
with open(namelist_file, "w") as source:
current_group = None
has_replaced = False
for line in lines:
lstrip = line.strip()
if len(lstrip)>0 and lstrip[0] == '[' and lstrip[-1] == ']':
current_group = lstrip[1:-1]
if group is None or group==current_group:
source.write(re.sub(r'({}).*'.format(item), r'\1={}'.format(new_value), line))
has_replaced = True
else:
source.write(line)
if (not has_replaced):
raise RuntimeError(
'There is no item \"{0}\" in group \"{1}\" in .ini file'.format(item, group))
def determine_ntasks():
namelist = Read_namelist()['master']
npx = namelist['npx'] if 'npx' in namelist.keys() else 1
npy = namelist['npy'] if 'npy' in namelist.keys() else 1
return npx * npy
class Read_statistics:
""" Read all the NetCDF statistics
Example:
f = Read_statistics('drycblles.default.0000000.nc')
print(f) prints a list with the available variables
The data can be accessed as either f['th'] or f.th, which returns the numpy array with data
The variable names can be accessed as f.names['th'], the units as f.units['th'], the dimensions as f.dimensions['th']
This allows you to automatically format axis labels as e.g.:
pl.xlabel("{0:} ({1:})".format(f.names['th'], f.units['th']))
"""
def __init__(self, stat_file):
f = nc.Dataset(stat_file, 'r')
# Dictionaries which hold the variable names, units, etc.
self.data = {}
self.units = {}
self.names = {}
self.dimensions = {}
# For each variable in the NetCDF file, read all the content and info
for var in f.variables:
self.data[var] = f.variables[var].__array__()
self.units[var] = f.variables[var].units
self.names[var] = f.variables[var].long_name
self.dimensions[var] = f.variables[var].dimensions
f.close()
def __getitem__(self, name):
if name in self.data.keys():
return self.data[name]
else:
raise RuntimeError(
'Can\'t find variable \"{}\" in statistics file'.format(name))
def __getattr__(self, name):
if name in self.data.keys():
return self.data[name]
else:
raise RuntimeError(
'Can\'t find variable \"{}\" in statistics file'.format(name))
def __repr__(self):
return 'Available variables:\n{}'.format(', '.join(self.names.keys()))
class Read_grid:
""" Read the grid file from MicroHH.
If no file name is provided, grid.0000000 from the current directory is read """
def __init__(self, itot, jtot, ktot, filename=None):
self.en = '<' if sys.byteorder == 'little' else '>'
filename = 'grid.0000000' if filename is None else filename
self.TF = round(os.path.getsize(filename) /
(2 * itot + 2 * jtot + 2 * ktot))
if self.TF == 8:
self.prec = 'd'
else:
self.prec = 'f'
self.fin = open(filename, 'rb')
self.dim = {}
self.dim['x'] = self.read(itot)
self.dim['xh'] = self.read(itot)
self.dim['y'] = self.read(jtot)
self.dim['yh'] = self.read(jtot)
self.dim['z'] = self.read(ktot)
self.dim['zh'] = self.read(ktot)
self.fin.close()
del self.fin
def read(self, n):
return np.array(
st.unpack(
'{0}{1}{2}'.format(
self.en, n, self.prec), self.fin.read(
n * self.TF)))
class Read_binary:
""" Read a binary file from MicroHH. """
def __init__(self, grid, filename):
self.en = grid.en
self.prec = grid.prec
self.TF = grid.TF
try:
self.file = open(filename, 'rb')
except BaseException:
raise Exception('Cannot find file {}'.format(filename))
def close(self):
self.file.close()
def read(self, n):
return np.array(
st.unpack(
'{0}{1}{2}'.format(
self.en, n, self.prec), self.file.read(
n * self.TF)))
class Create_ncfile():
def __init__(
self,
grid,
filename,
varname,
dimensions,
precision='',
compression=True):
self.ncfile = nc.Dataset(filename, "w", clobber=False)
if not precision:
precision = 'f{}'.format(grid.TF)
elif precision == 'single':
precision = 'f4'
else:
precision = 'f8'
if(varname == 'u'):
try:
dimensions['xh'] = dimensions.pop('x')
except KeyError:
pass
if(varname == 'v'):
try:
dimensions['yh'] = dimensions.pop('y')
except KeyError:
pass
if(varname == 'w'):
try:
dimensions['zh'] = dimensions.pop('z')
except KeyError:
pass
# create dimensions in netCDF file
self.dim = {}
self.dimvar = {}
for key, value in dimensions.items():
self.dim[key] = self.ncfile.createDimension(key, len(value))
self.dimvar[key] = self.ncfile.createVariable(
key, precision, (key))
if key != 'time':
self.dimvar[key][:] = grid.dim[key][value]
self.var = self.ncfile.createVariable(
varname, precision, tuple(
self.sortdims(
dimensions.keys())), zlib=compression)
def sync(self):
self.ncfile.sync()
def close(self):
self.ncfile.close()
def sortdims(self, lst=[]):
ordered_dims = ['time', 'z', 'zh', 'y', 'yh', 'x', 'xh']
lst_out = [value for value in ordered_dims if value in lst]
return lst_out
def get_cross_indices(variable, mode):
""" Find the cross-section indices given a variable name and mode (in 'xy','xz','yz') """
if mode not in ['xy', 'xz', 'yz']:
raise ValueError('\"mode\" should be in {\"xy\", \"xz\", \"yz\"}')
# Get a list of all the cross-section files
files = glob.glob('{}.{}.*.*'.format(variable, mode))
if len(files) == 0:
raise Exception('Cannot find any cross-section')
# Get a list with all the cross-section files for one time
time = files[0].split('.')[-1]
files = glob.glob('{}.{}.*.{}'.format(variable, mode, time))
# Get the indices
indices = sorted([int(f.split('.')[-2]) for f in files])
return indices
_opts = {
'blue': '\033[94m',
'green': '\033[92m',
'purple': '\033[95m',
'red': '\033[91m',
'yellow': '\033[93m',
'bf': '\033[1m',
'ul': '\033[4m',
'end': '\033[0m'
}
def print_header(message, time=True):
"""
Format of print statements indicating new main routine
"""
if time:
now = datetime.datetime.now()
print(
'[{}] {}{}{}'.format(
now.strftime('%d-%m: %H:%M'),
_opts['green'],
message,
_opts['end']))
else:
print(
'{}{}{}{}'.format(
_opts['green'],
_opts['bf'],
message,
_opts['end']))
def print_message(message):
"""
Format of print statements
"""
print(' - {}'.format(message))
def print_warning(message):
"""
Format of print warnings
"""
print(
'{}{}WARNING:{} {}'.format(
_opts['yellow'],
_opts['bf'],
_opts['end'],
message))
def print_error(message):
"""
Format of print errors
"""
print(
'{}{}ERROR:{} {}'.format(
_opts['red'],
_opts['bf'],
_opts['end'],
message))
def merge_options(options, options_to_add):
"""
Merge dictionaries of dicts with run options.
"""
for group in options_to_add:
if group in options:
options[group].update(options_to_add[group])
else:
options[group] = copy.deepcopy(options_to_add[group])
def run_scripts(scripts):
def exec_function(lib, function, *args):
rc = getattr(lib, function)(*args)
if (rc is not None) and (rc != 0):
raise Exception(
'{}: {}() returned {}'.format(
script, function, rc))
if scripts is not None:
# Loop over, and execute all functions
for script, functions in scripts.items():
if (script == __file__):
lib = sys.modules[__name__]
else:
# Module name = script name minus the `.py`
module = script.replace('.py', '')
# The full module name is relative to the source file, with
# dots instead of slashes
full_module = os.path.relpath(
os.getcwd(), sys.path[0]).replace(
'/', '.') + '.' + module
# Import module; this executes all code that is not in
# classes/functions
if full_module not in sys.modules:
lib = importlib.import_module(full_module)
else:
importlib.reload(sys.modules[full_module])
# If any specific routines are specified, run them
if functions is not None:
for function in functions:
args = function[1:]
exec_function(lib, function[0], *args)
def restart_pre(origin, timestr):
fnames = glob.glob('../' + origin + '/*_input.nc')
fnames += glob.glob('../' + origin + '/grid.0000000')
fnames += glob.glob('../' + origin + '/fftwplan.0000000')
fnames += glob.glob('../' + origin + '/*.' + timestr)
for file in fnames:
shutil.copy(file, '.')
def compare_bitwise(f1, f2):
# Compare with Python's `filecmp`
cmp_python = filecmp.cmp(f1, f2)
# Backup check with OS `cmp`
sp = subprocess.Popen(
'cmp {} {}'.format(f1, f2),
executable='/bin/bash',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = sp.communicate()
sp.wait()
cmp_os = not sp.returncode
return cmp_python, cmp_os
def restart_post(origin, timestr):
file_names = glob.glob('*.' + timestr)
for file_name in file_names:
cmp_python, cmp_os = compare_bitwise('../' + origin + '/' + file_name, file_name)
if not cmp_python and not cmp_os:
raise Warning('{} is not identical (python+OS)'.format(file_name))
elif not cmp_python:
raise Warning('{} is not identical (python)'.format(file_name))
elif not cmp_os:
raise Warning('{} is not identical (OS)'.format(file_name))
def compare(origin, file, starttime=-1, vars={}):
nc_new = nc.Dataset(file, mode="r")
nc_old = nc.Dataset('../' + origin + '/' + file, mode="r")
blacklist = ['iter']
rtol = 1e-3
atol = 1e-8
if len(vars) == 0:
for key in nc_new.variables.keys():
if key not in blacklist:
vars.update({key: [rtol, atol]})
for key, opts in vars.items():
var_new = np.mean(nc_new.variables[key][starttime:, ...], axis=0)
var_old = np.mean(nc_old.variables[key][starttime:, ...], axis=0)
if not np.allclose(
var_new,
var_old,
rtol=opts[0],
atol=opts[1],
equal_nan=True):
with np.errstate(all='ignore'):
raise Warning('{0} in {1} has a relative error of up to {2:.2%}'.format(
key, file, np.max(np.abs((var_new - var_old) / var_old))))
def execute(command):
sp = subprocess.Popen(
command,
executable='/bin/bash',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = sp.communicate()
sp.wait()
# Write the standard output and errors to logflies
with open('stdout.log', 'a') as f:
f.write(out.decode('utf-8'))
with open('stderr.log', 'a') as f:
f.write(err.decode('utf-8'))
if sp.returncode != 0:
raise Exception(
'\'{}\' returned \'{}\'.'.format(
command, sp.returncode))
def run_cases(cases, executable, mode, outputfile=''):
"""
Function that iterates over a list of cases and runs all of them
"""
if not os.path.exists(executable):
raise Exception(
'ERROR: Executable {} does not exists'.format(executable))
# Get the absolute path to the executable
executable_rel = executable
executable = os.path.abspath(executable)
rootdir = os.getcwd()
for case in cases:
print_header(
'Running case \'{}\' for executable \'{}\' in dir \'{}\''.format(
case.name, executable_rel, case.rundir))
# Move to working directory
rootdir = os.getcwd()
rundir = rootdir + '/' + case.casedir + '/' + case.rundir + '/'
casedir = rootdir + '/' + case.casedir + '/'
if case.rundir != '':
try:
shutil.rmtree(rundir)
except Exception:
pass
os.mkdir(rundir)
os.chdir(rundir)
try:
for fname in case.files:
shutil.copy(casedir + fname, rundir)
except BaseException:
print_warning(
case.name +
': Cannot find {} for copying, skipping case!'.format(
casedir +
fname))
os.chdir(rootdir)
continue
else:
case.keep = True
try:
# Update .ini file for testing
ini_file = '{0}.ini'.format(case.name)
nl = Read_namelist(ini_file, ducktype=False)
for group, group_dict in case.options.items():
for variable, value in group_dict.items():
nl.set_value(group, variable, value)
nl.save(ini_file, allow_overwrite=True)
# Find the number of MPI tasks
ntasks = determine_ntasks()
# Create input data, and do other pre-processing
run_scripts(case.pre)
for phase in case.phases:
case.time = timeit.default_timer()
if mode == 'cpu' or mode == 'gpu':
execute('{} {} {}'.format(executable, phase, case.name))
elif mode == 'cpumpi':
execute('mpiexec --oversubscribe -n {} {} {} {}'.format(
ntasks, executable, phase, case.name))
else:
raise ValueError('{} is an illegal value for mode'.format(mode))
case.time = timeit.default_timer() - case.time
# Run the post-processing steps
run_scripts(case.post)
case.success = True
except Exception as e:
print(str(e))
print_error('Case Failed!')
case.success = False
else:
print_message('Success!')
finally:
# Go back to root of all cases
os.chdir(rootdir)
# Write the output file and remove unnecssary dirs
if outputfile != '':
with open(outputfile, 'w') as csv_file:
write = csv.writer(csv_file)
write.writerow(['Name', 'Run Dir', 'Success', 'Time', 'Options'])
for case in cases:
write.writerow(
[case.name, case.rundir, case.success, case.time, case.options])
csv_file.close()
for case in cases:
if case.success and not case.keep:
rundir = rootdir + '/' + case.name + '/' + case.rundir + '/'
shutil.rmtree(rundir)
"""
def generator_restart(cases):
cases_out = []
for case in cases:
nl = Read_namelist('{0}/{0}.ini'.format(case.name))
# Everything relevant is in the time group, so merge that with the
# overriding options
options = {'iotimeprec': 0}
options.update(nl['time'])
if case.options is not None:
options.update(case.options)
iotimeprec = options['iotimeprec']
endtime = options['endtime']
savetime = int(endtime / 2)
endtimestr = '{0:07d}'.format(endtime * 10**(-iotimeprec))
savetimestr = '{0:07d}'.format(savetime * 10**(-iotimeprec))
case_init = case
case_init.rundir = 'init'
case_init.options.update({'savetime': savetime, 'endtime': endtime})
case_restart = copy.deepcopy(case)
case_restart.rundir = 'restart'
case_restart.phases = ['run']
case_restart.options.update(
{'starttime': savetime, 'endtime': endtime})
case_restart.pre = {__file__: [
['restart_pre', case_init.rundir, savetimestr]]}
case_restart.post = {__file__: [
['restart_post', case_init.rundir, endtimestr]]}
cases_out.append(case_init)
cases_out.append(case_restart)
return cases_out
"""
def generator_restart(case, endtime):
cases_out = []
nl = Read_namelist('{}/{}.ini'.format(case.casedir, case.name))
iotimeprec = nl['time']['iotimeprec'] if 'iotimeprec' in nl['time'] else 0
savetime = endtime/2
savetime_io = int(round(savetime * 10**(-iotimeprec)))
endtime_io = int(round(endtime * 10**(-iotimeprec)))
endtimestr = '{0:07d}'.format(endtime_io)
savetimestr = '{0:07d}'.format(savetime_io)
case_init = copy.deepcopy(case)
case_init.rundir = case.rundir + '_init'
merge_options(case_init.options, {'time': {'savetime': savetime, 'endtime': endtime}})
case_restart = copy.deepcopy(case)
case_restart.rundir = case.rundir + '_restart'
case_restart.phases = ['run']
case_restart.pre = {__file__: [
['restart_pre', case_init.rundir, savetimestr]]}
case_restart.post = {__file__: [
['restart_post', case_init.rundir, endtimestr]]}
merge_options(case_restart.options, {'time': {'starttime': savetime, 'savetime': savetime, 'endtime': endtime}})
cases_out.append(case_init)
cases_out.append(case_restart)
return cases_out
def prime_factors(n):
import math
result = []
for i in range(2, int(math.sqrt(n)) + 1):
# while i divides n , print i ad divide n
while n % i == 0:
result.append(i),
n = n / i
if (n > 1):
result.append(int(n))
return result
def generator_scaling(cases, procs, type='strong', dir='y'):
cases_out = []
for case in cases:
if type == 'weak':
nl = Read_namelist('{0}/{0}.ini'.format(case.name))
itot = nl['grid']['itot']
jtot = nl['grid']['jtot']
xsize = nl['grid']['xsize']
ysize = nl['grid']['ysize']
for proc in procs:
if dir == 'x':
option = {'npx': proc}
elif dir == 'y':
option = {'npy': proc}
elif dir == 'xy':
primes = prime_factors(proc)
npy = 1
npx = 1
for i in range(0, len(primes), 2):
npy *= primes[i]
if i + 1 < len(primes):
npx *= primes[i + 1]
option = {'npy': npy, 'npx': npx}
if type == 'weak':
option.update({'itot': itot * npx,
'jtot': jtot * npy,
'xsize': xsize * npx,
'ysize': ysize * npy})
new_case = copy.deepcopy(case)
new_case.options.update(option)
new_case.rundir = '{0:03d}'.format(proc)
cases_out.append(new_case)
return cases_out
"""
def generator_parameter_change(cases, **kwargs):
cases_out = []
if len(kwargs) > 0:
for case in cases:
key, value = list(kwargs.items())[0]
for val in value:
new_case = copy.deepcopy(case)
new_case.options.update({key : val})
new_case.rundir += (key + str(val)).replace('.', '')
cases_out.append(new_case)
del kwargs[key]
if len(kwargs) > 0:
cases_out = generator_parameter_change(cases_out, **kwargs)
return cases_out
"""
def generator_parameter_permutations(base_case, lists):
"""
Function to permutate lists of dictionaries to generate cases to run
"""
cases_out = []
# Put a single dictionary into a list with one item.
if type(lists) is dict:
lists = [lists]
# Convert the dictionaries into tuples to enable to permutate the list.
tuple_lists = []
for l in lists:
tuple_list = []
for name, name_dict in l.items():
tuple_list.append((name, name_dict))
tuple_lists.append(tuple_list)
# Create permutation of all lists. Each item contains 1 value of each list.
lists_permutations = list(itertools.product(*tuple_lists))
for lp in lists_permutations:
case = copy.deepcopy(base_case)
# Construct the directory name from tuple names.
for name_dict in lp:
case.rundir += '_' + name_dict[0]
for name_dict in lp:
merge_options(case.options, name_dict[1])
cases_out.append(case)
return cases_out
class Case:
"""
Class that contains a case to run with the required runtime settings
"""
def __init__(
self,
name,
options=[],
pre={},
post={},
phases=['init', 'run'],
casedir='',
rundir='default_run',
files=[],
keep=True):
self.name = name # Case name
self.options = options # List of options to override
self.pre = pre # List of pre-processing python scripts
self.post = post # List of post-processing python scripts
self.phases = phases # List of the run phases we have to go through
self.casedir = casedir # Directory of the case; self.name by default
self.rundir = rundir # Relative run directory, defaults to `default_run`
self.files = files # List of files necessary to run the case
self.success = None # Whether the entire case was run succesfully or not
self.time = None # Duration of the last phase (usually run)
self.keep = keep # Whether to keep the results of succefull simulations afterwards
# By default; run {name}_input.py in preprocessing phase
self.pre = pre if pre else {'{}_input.py'.format(name): None}
self.files = files if files else [
'{0}.ini'.format(name), '{}_input.py'.format(name)]
self.casedir = casedir if casedir else name
def run_case(
case_name, options_in, options_mpi_in,
executable='microhh', mode='cpu',
case_dir='.', experiment='local'):
options = deepcopy(options_in)
if mode == 'cpumpi':
merge_options(options, options_mpi_in)
cases = [
Case(
case_name,
casedir=case_dir,
rundir=experiment,
options=options)]
run_cases(
cases,
executable,
mode,
outputfile='{}/{}_{}.csv'.format(case_dir, case_name, experiment))
for case in cases:
if not case.success:
return 1
return 0
def run_permutations(
case_name, options_in, options_mpi_in, permutations_in,
executable='microhh', mode='cpu',
case_dir='.', experiment='local'):
options = deepcopy(options_in)
if mode == 'cpumpi':
merge_options(options, options_mpi_in)
base_case = Case(
case_name,
casedir=case_dir,
rundir=experiment,
options=options)
cases = generator_parameter_permutations(base_case, permutations_in)
run_cases(
cases,
executable,
mode,
outputfile='{}/{}_{}.csv'.format(case_dir, case_name, experiment))
for case in cases:
if not case.success:
return 1
return 0
def run_restart(
case_name, options_in, options_mpi_in, permutations_in=None,
executable='microhh', mode='cpu',
case_dir='.', experiment='local'):
# Deep copy the small version of the reference case and disable stats.
options = deepcopy(options_in)
if mode == 'cpumpi':
merge_options(options, options_mpi_in)
if permutations_in is None:
base_cases = [Case(
case_name,
casedir=case_dir,
rundir=experiment,
options=options)]
else:
base_case = Case(
case_name,
casedir=case_dir,
rundir=experiment,
options=options)
base_cases = generator_parameter_permutations(base_case, permutations_in)
cases = []
for case in base_cases:
cases.extend(generator_restart(case, options['time']['endtime']))
run_cases(
cases,
executable,
mode,
outputfile='{}/{}_restart_{}.csv'.format(case_dir, case_name, experiment))
for case in cases:
if not case.success:
return 1
return 0
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "v0alpha1.candlestick.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "datasource",
"version": "v0",
"datasource": {
"name": "grafana"
},
"spec": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Tooltip",
"editable": true,
"elements": {
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "Price \u0026 Volume",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"csvFileName": "ohlc_dogecoin.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "candlestick",
"version": "",
"spec": {
"options": {
"candleStyle": "candles",
"colorStrategy": "open-close",
"colors": {
"down": "red",
"up": "green"
},
"fields": {},
"includeAllFields": true,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mode": "candles+volume"
},
"fieldConfig": {
"defaults": {
"unit": "currencyUSD",
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineStyle": {
"dash": [
10,
10
],
"fill": "dash"
},
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "sma"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
},
{
"id": "custom.lineWidth",
"value": 5
},
{
"id": "custom.lineStyle",
"value": {
"dash": [
0,
20
],
"fill": "dot"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "bolup"
},
"properties": [
{
"id": "custom.fillBelowTo",
"value": "boldn"
},
{
"id": "custom.fillOpacity",
"value": 8
},
{
"id": "color",
"value": {
"fixedColor": "blue",
"mode": "fixed"
}
}
]
}
]
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Volume Only, Alt Colors, 100% Opacity",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"csvFileName": "ohlc_dogecoin.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "candlestick",
"version": "",
"spec": {
"options": {
"candleStyle": "candles",
"colorStrategy": "open-close",
"colors": {
"down": "red",
"up": "yellow"
},
"fields": {},
"includeAllFields": false,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mode": "volume"
},
"fieldConfig": {
"defaults": {
"unit": "currencyUSD",
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "open"
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "close"
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": true,
"tooltip": true,
"viz": true
}
}
]
}
]
}
}
}
}
},
"panel-6": {
"kind": "Panel",
"spec": {
"id": 6,
"title": "Price Only, OHLC Bars",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"csvFileName": "ohlc_dogecoin.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "candlestick",
"version": "",
"spec": {
"options": {
"candleStyle": "ohlcbars",
"colorStrategy": "open-close",
"colors": {
"down": "red",
"up": "blue"
},
"fields": {},
"includeAllFields": false,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mode": "candles"
},
"fieldConfig": {
"defaults": {
"unit": "currencyUSD",
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
},
"panel-7": {
"kind": "Panel",
"spec": {
"id": 7,
"title": "Price Only, Hollow Candles",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"csvFileName": "ohlc_dogecoin.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "candlestick",
"version": "",
"spec": {
"options": {
"candleStyle": "candles",
"colorStrategy": "close-close",
"colors": {
"down": "red",
"up": "green"
},
"fields": {},
"includeAllFields": false,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mode": "candles"
},
"fieldConfig": {
"defaults": {
"unit": "currencyUSD",
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 12,
"height": 16,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 0,
"width": 12,
"height": 20,
"element": {
"kind": "ElementReference",
"name": "panel-7"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 16,
"width": 12,
"height": 10,
"element": {
"kind": "ElementReference",
"name": "panel-6"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 20,
"width": 12,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"gdev",
"panel-tests",
"graph-ng"
],
"timeSettings": {
"timezone": "",
"from": "2021-07-13T22:13:30.740Z",
"to": "2021-07-13T22:46:18.921Z",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Candlestick",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
}
|
json
|
github
|
https://github.com/grafana/grafana
|
apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-candlestick/v0alpha1.candlestick.v42.v2beta1.json
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send Conditional CLI commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_conditional_command
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Execute a single command based on condition on devices
running Lenovo CNOS
description:
- This module allows you to modify the running configuration of a switch. It
provides a way to execute a single CNOS command on a network device by
evaluating the current running configuration and executing the command only
if the specific settings have not been already configured.
The CNOS command is passed as an argument of the method.
This module functions the same as the cnos_command module.
The only exception is that following inventory variable can be specified
["condition = <flag string>"]
When this inventory variable is specified as the variable of a task, the
command is executed for the network element that matches the flag string.
Usually, commands are executed across a group of network devices. When
there is a requirement to skip the execution of the command on one or
more devices, it is recommended to use this module. This module uses SSH to
manage network device configuration. For more information about this module
from Lenovo and customizing it usage for your use cases, please visit
U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_conditional_command.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
clicommand:
description:
- This specifies the CLI command as an attribute to this method.
The command is passed using double quotes. The variables can be
placed directly on to the CLI commands or can be invoked
from the vars directory.
required: true
default: Null
condition:
description:
- If you specify condition=false in the inventory file against any
device, the command execution is skipped for that device.
required: true
default: Null
flag:
description:
- If a task needs to be executed, you have to set the flag the same
as it is specified in the inventory for that device.
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module
cnos_conditional_command. These are written in the main.yml file of the tasks
directory.
---
- name: Applying CLI template on VLAG Tier1 Leaf Switch1
cnos_conditional_command:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_conditional_command_
{{ inventory_hostname }}_output.txt"
condition: "{{ hostvars[inventory_hostname]['condition']}}"
flag: leaf_switch2
command: "spanning-tree mode enable"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Command Applied"
'''
import sys
import time
import socket
import array
import json
import time
import re
import os
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
clicommand=dict(required=True),
outputfile=dict(required=True),
condition=dict(required=True),
flag=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False,
no_log=True), ), supports_check_mode=False)
condition = module.params['condition']
flag = module.params['flag']
cliCommand = module.params['clicommand']
outputfile = module.params['outputfile']
output = ''
if (condition is None or condition != flag):
module.exit_json(changed=True, msg="Command Skipped for this switch")
return ''
# Send the CLi command
cmd = [{'command': cliCommand, 'prompt': None, 'answer': None}]
output = output + str(cnos.run_cnos_commands(module, cmd))
# Write to memory
save_cmd = [{'command': 'save', 'prompt': None, 'answer': None}]
cmd.extend(save_cmd)
output = output + str(cnos.run_cnos_commands(module, cmd))
# Save it into the file
path = outputfile.rsplit('/', 1)
# cnos.debugOutput(path[0])
if not os.path.exists(path[0]):
os.makedirs(path[0])
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True,
msg="CLI Command executed and results saved in file ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Tornado handlers for the sessions web service.
Authors:
* Zach Sailer
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import json
from tornado import web
from ...base.handlers import IPythonHandler, json_errors
from IPython.utils.jsonutil import date_default
from IPython.html.utils import url_path_join, url_escape
#-----------------------------------------------------------------------------
# Session web service handlers
#-----------------------------------------------------------------------------
class SessionRootHandler(IPythonHandler):
@web.authenticated
@json_errors
def get(self):
# Return a list of running sessions
sm = self.session_manager
sessions = sm.list_sessions()
self.finish(json.dumps(sessions, default=date_default))
@web.authenticated
@json_errors
def post(self):
# Creates a new session
#(unless a session already exists for the named nb)
sm = self.session_manager
nbm = self.notebook_manager
km = self.kernel_manager
model = self.get_json_body()
if model is None:
raise web.HTTPError(400, "No JSON data provided")
try:
name = model['notebook']['name']
except KeyError:
raise web.HTTPError(400, "Missing field in JSON data: name")
try:
path = model['notebook']['path']
except KeyError:
raise web.HTTPError(400, "Missing field in JSON data: path")
# Check to see if session exists
if sm.session_exists(name=name, path=path):
model = sm.get_session(name=name, path=path)
else:
kernel_id = km.start_kernel(path=path)
model = sm.create_session(name=name, path=path, kernel_id=kernel_id)
location = url_path_join(self.base_url, 'api', 'sessions', model['id'])
self.set_header('Location', url_escape(location))
self.set_status(201)
self.finish(json.dumps(model, default=date_default))
class SessionHandler(IPythonHandler):
SUPPORTED_METHODS = ('GET', 'PATCH', 'DELETE')
@web.authenticated
@json_errors
def get(self, session_id):
# Returns the JSON model for a single session
sm = self.session_manager
model = sm.get_session(session_id=session_id)
self.finish(json.dumps(model, default=date_default))
@web.authenticated
@json_errors
def patch(self, session_id):
# Currently, this handler is strictly for renaming notebooks
sm = self.session_manager
model = self.get_json_body()
if model is None:
raise web.HTTPError(400, "No JSON data provided")
changes = {}
if 'notebook' in model:
notebook = model['notebook']
if 'name' in notebook:
changes['name'] = notebook['name']
if 'path' in notebook:
changes['path'] = notebook['path']
sm.update_session(session_id, **changes)
model = sm.get_session(session_id=session_id)
self.finish(json.dumps(model, default=date_default))
@web.authenticated
@json_errors
def delete(self, session_id):
# Deletes the session with given session_id
sm = self.session_manager
km = self.kernel_manager
session = sm.get_session(session_id=session_id)
sm.delete_session(session_id)
km.shutdown_kernel(session['kernel']['id'])
self.set_status(204)
self.finish()
#-----------------------------------------------------------------------------
# URL to handler mappings
#-----------------------------------------------------------------------------
_session_id_regex = r"(?P<session_id>\w+-\w+-\w+-\w+-\w+)"
default_handlers = [
(r"/api/sessions/%s" % _session_id_regex, SessionHandler),
(r"/api/sessions", SessionRootHandler)
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from nova import test
from nova.virt import event
class TestEvents(test.NoDBTestCase):
def test_event_repr(self):
t = time.time()
uuid = '1234'
lifecycle = event.EVENT_LIFECYCLE_RESUMED
e = event.Event(t)
self.assertEqual(str(e), "<Event: %s>" % t)
e = event.InstanceEvent(uuid, timestamp=t)
self.assertEqual(str(e), "<InstanceEvent: %s, %s>" % (t, uuid))
e = event.LifecycleEvent(uuid, lifecycle, timestamp=t)
self.assertEqual(str(e), "<LifecycleEvent: %s, %s => Resumed>" %
(t, uuid))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import gpu_info
class SystemInfo(object):
"""Provides low-level system information."""
def __init__(self, model_name, gpu_dict):
if (model_name == None) or (gpu_dict == None):
raise Exception("Missing model_name or gpu_dict argument")
self._model_name = model_name
self._gpu = gpu_info.GPUInfo.FromDict(gpu_dict)
@classmethod
def FromDict(cls, attrs):
"""Constructs a SystemInfo from a dictionary of attributes.
Attributes currently required to be present in the dictionary:
model_name (string): a platform-dependent string
describing the model of machine, or the empty string if not
supported.
gpu (object containing GPUInfo's required attributes)
"""
return cls(attrs["model_name"], attrs["gpu"])
@property
def model_name(self):
"""A string describing the machine model.
This is a highly platform-dependent value and not currently
specified for any machine type aside from Macs. On Mac OS, this
is the model identifier, reformatted slightly; for example,
'MacBookPro 10.1'."""
return self._model_name
@property
def gpu(self):
"""A GPUInfo object describing the graphics processor(s) on the system."""
return self._gpu
|
unknown
|
codeparrot/codeparrot-clean
| ||
def power(snumber, x, y):
return ((((((x + 10) * y) + snumber)*(x+10))//100) % 10)-5
# A B
# C D
def build_grid_sum(grid):
grid_sums = [[0]*300 for _ in range(300)]
for y, vy in enumerate(grid):
for x, vx in enumerate(vy):
B = grid_sums[y-1][x] if y > 0 else 0
C = grid_sums[y][x-1] if x > 0 else 0
A = grid_sums[y-1][x-1] if y > 0 and x > 0 else 0
grid_sums[y][x] = C + B - A + vx
return grid_sums
snumber = 2694
grid = [[power(snumber, x, y) for x in range(0, 300)] for y in range(0, 300)]
grid_sums = build_grid_sum(grid)
def power_between(grid_sums, x1, y1, x2, y2):
""" calcs the power between top left corner x1, y1 and bottom right corner x2, y2"""
return grid_sums[y2][x2] + grid_sums[y1][x1] - grid_sums[y2][x1] - grid_sums[y1][x2]
# p1
max_power = 0
best_coords = None
block_size = 3
for y in range(300 - block_size):
for x in range(300 - block_size):
total_power = power_between(
grid_sums, x, y, x+block_size, y+block_size)
if total_power > max_power:
best_coords = (x, y)
max_power = total_power
# coords are 0-based...
print(best_coords[0]+1, best_coords[1]+1)
# p2
max_power = 0
best_coords = None
best_block_size = None
for block_size in range(1, 301):
for y in range(300 - block_size):
for x in range(300 - block_size):
total_power = power_between(
grid_sums, x, y, x+block_size, y+block_size)
if total_power > max_power:
best_coords = (x, y)
max_power = total_power
best_block_size = block_size
# coords are 0-based...
print(best_coords[0]+1, best_coords[1]+1, best_block_size)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2014-2019 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.network.tls.platform
internal class PlatformVersion(val major: String, val minor: Int) {
companion object {
operator fun invoke(rawVersion: String): PlatformVersion {
try {
val versionString = rawVersion.split('-', '_')
if (versionString.size == 2) {
val (major, minor) = versionString
return PlatformVersion(major, minor.toInt())
}
return PlatformVersion(major = rawVersion, minor = -1)
} catch (cause: Throwable) {
return MINIMAL_SUPPORTED
}
}
private val MINIMAL_SUPPORTED: PlatformVersion = PlatformVersion("1.6.0", 0)
}
}
internal val platformVersion: PlatformVersion by lazy {
PlatformVersion(System.getProperty("java.version"))
}
|
kotlin
|
github
|
https://github.com/ktorio/ktor
|
ktor-network/ktor-network-tls/jvm/src/io/ktor/network/tls/platform/PlatformVersion.kt
|
{
"extends": "./tsconfig.json",
"compilerOptions": {
"outDir": "./out-tsc/spec",
"types": [
"jasmine",
"node"
]
},
"include": [
"src/**/*.spec.ts",
"src/**/*.d.ts"
]
}
|
json
|
github
|
https://github.com/angular/angular
|
integration/cli-hello-world-ivy-i18n/tsconfig.spec.json
|
from __future__ import absolute_import
import contextlib
import locale
import logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError, BadCommand
from pip.compat import console_to_str, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources, six
from pip._vendor.six.moves import input
from pip._vendor.six.moves import cStringIO
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'find_command', 'ask', 'Inf',
'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks']
logger = logging.getLogger(__name__)
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def find_command(cmd, paths=None, pathext=None):
"""Searches the PATH for the given command and returns its path"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, six.string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep) if len(ext)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
raise BadCommand('Cannot find command %r' % cmd)
def get_pathext(default_pathext=None):
"""Returns the path extensions from environment or a default"""
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
pathext = os.environ.get('PATHEXT', default_pathext)
return pathext
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() # this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_path_relative('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..'] * len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
return os.path.normcase(os.path.realpath(os.path.expanduser(path)))
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
# TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
local_test = lambda d: True
if include_editables:
editable_test = lambda d: True
else:
editable_test = lambda d: not dist_is_editable(d)
if editables_only:
editables_only_test = lambda d: dist_is_editable(d)
else:
editables_only_test = lambda d: True
if user_only:
user_test = dist_in_usersite
else:
user_test = lambda d: True
return [d for d in pkg_resources.working_set
if local_test(d)
and d.key not in skip
and editable_test(d)
and editables_only_test(d)
and user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not os.path.exists(dir):
os.makedirs(dir)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif (filename.lower().endswith('.bz2')
or filename.lower().endswith('.tbz')):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
if not os.path.exists(path):
os.makedirs(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip'
or filename.endswith('.zip')
or filename.endswith('.whl')
or zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip'
or tarfile.is_tarfile(filename)
or splitext(filename)[1].lower() in (
'.tar', '.tar.gz', '.tar.bz2', '.tgz', '.tbz')):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html')
and is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def remove_tracebacks(output):
pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?'
r'Syntax(?:Error|Warning): (?:.*)')
output = re.sub(pattern, '', output)
if PY2:
return output
# compileall.compile_dir() prints different messages to stdout
# in Python 3
return re.sub(r"\*\*\* Error compiling (?:.*)", '', output)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True,
command_level=logging.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
if stdout is not None:
stdout = remove_tracebacks(console_to_str(proc.stdout.read()))
stdout = cStringIO(stdout)
while 1:
line = stdout.readline()
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
# if not logger.stdout_level_matches(level) and False:
# # TODO(dstufft): Handle progress bar.
# logger.show_progress()
else:
logger.debug(line)
else:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
'\n'.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
else:
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
if stdout is not None:
return remove_tracebacks(''.join(all_output))
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.test;
public class SampleApplication {
public static void main(String[] args) {
System.out.println("I haz been run");
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
build-plugin/spring-boot-maven-plugin/src/intTest/projects/run/src/main/java/org/test/SampleApplication.java
|
# Solution to Good Fences Make Sad and Disgusted Neighbors
# Using Algorithm X with multi cover (generalized exact cover)
# 2018 MIT Mystery Hunt
# Runs in 2-3 minutes.
import grf
from collections import Counter
layouts = """
3222310000320343141036633541146451103533135534530423523533101
3431536632403430342000124433012553521345204152330313453102344
5510012101103300000363000010330012110000000000012123000032011
2134513013355343554336634254534353334200142521342343533350544
1234520123431015454233230143554310145534243355314445432234425
1233330363234033004440001453322010202223225000020412103400015
5453542050451011133544421053256415541055103413635502333354445
5014142011232443234245342215232412534215200330133002036310033
"""
layouts = [line.strip() for line in layouts.splitlines() if line.strip()]
# Cells have (x, y) coordinates in a tilted coordinate system.
# Top left corner is (0, 0). Top right corner is (n, 0). Left corner is (0, n).
linelengths = 5, 6, 7, 8, 9, 8, 7, 6, 5
linestarts = 0, 0, 0, 0, 0, 1, 2, 3, 4
cells = [(x, y) for y, (l, s) in enumerate(zip(linelengths, linestarts)) for x in range(s, s + l)]
# Two cells are adjacent if their coordinate differences are in this list.
eways = [(0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (-1, -1)]
def isneighbor(cell0, cell1):
x0, y0 = cell0
x1, y1 = cell1
return (x1 - x0, y1 - y0) in eways
def allneighbors(cell):
x, y = cell
return [(x + dx, y + dy) for dx, dy in eways]
# An edge is defined by the pair of cells it's between. For edges on the outside of the grid, one
# of these cells will not be in the cells list.
edges = set(
tuple(sorted([cell0, cell1]))
for cell0 in cells
for cell1 in allneighbors(cell0)
)
# A vertex is defined by the three cells it's between.
vertices = set(
tuple(sorted([cell0, cell1, cell2]))
for cell0 in cells
for cell1 in allneighbors(cell0)
for cell2 in allneighbors(cell0)
if cell1 < cell2 and isneighbor(cell1, cell2)
)
def vertexon(vertex, edge):
return all(cell in vertex for cell in edge)
# Can't think of any way to express the constraint that all edges must be connected. Just filter
# solutions based on this check.
def singlefence(solution):
edges = [edge for ctype, edge in solution if ctype == "fenceat"]
fconnect = lambda edge0, edge1: any(vertexon(vertex, edge0) and vertexon(vertex, edge1) for vertex in vertices)
graph = grf.fconnect_to_graph(edges, fconnect)
return grf.is_connected(graph)
def printsolution(solution):
layout = { (x, y): " " for x in range(40) for y in range(40) }
def cellpos(cell):
x, y = cell
return 4 * x - 2 * y + 12, 3 + 3 * y
def edgeposes(edge):
xs, ys = zip(*[cellpos(cell) for cell in edge])
yield sum(xs) // len(xs), sum(ys) // len(ys)
yield (sum(xs) + 1) // len(xs), (sum(ys) + 1) // len(ys)
for vertex in vertices:
if vertexon(vertex, edge):
yield vertexpos(vertex)
def vertexpos(vertex):
xs, ys = zip(*[cellpos(cell) for cell in vertex])
return sum(xs) // len(xs), sum(ys) // len(ys)
for ctype, value in solution:
if ctype == "sad":
layout[cellpos(value)] = "S"
if ctype == "disgusted":
layout[cellpos(value)] = "D"
if ctype == "fenceat":
for pos in edgeposes(value):
layout[pos] = "#"
for y in range(40):
print(*[layout[(x, y)] for x in range(40)])
def solve(layout, title):
grid = { cell: int(c) for cell, c in zip(cells, layout) }
node_ranges = {}
subsets = {}
for cell, n in grid.items():
# Each cell must have a mood assigned.
node_ranges[("mood", cell)] = 1
# This is a trick to express the disgust condition. A cell gets +1 lofence and +1 hifence
# for every fence that borders it. If the cell's mood is disgust, then these must both total
# to n. In that case, the cell gives itself +(6-n) of each, bringing the total lofence and
# total hifence both to n. If the cell's mood is sad, then these can be anything. In that
# case the cell gives itself +6 hifence and +0 lofence, so no matter how many fences it has
# they both fall into the right range.
node_ranges[("lofence", cell)] = 0, 6
node_ranges[("hifence", cell)] = 6, 12
node_ranges[("losad", cell)] = 0, 6
node_ranges[("hisad", cell)] = 6, 12
subsets[("sad", cell)] = (
[("mood", cell)] +
[("hifence", cell)] * 6 +
[("losad", cell)] * (6 - n) +
[("hisad", cell)] * (6 - n) +
[("losad", cell1) for cell1 in grid if isneighbor(cell, cell1)] +
[("hisad", cell1) for cell1 in grid if isneighbor(cell, cell1)]
)
subsets[("disgusted", cell)] = (
[("mood", cell)] +
[("hisad", cell)] * 6 +
[("lofence", cell)] * (6 - n) +
[("hifence", cell)] * (6 - n)
)
for edge in edges:
subsets[("fenceat", edge)] = [("lofence", cell) for cell in edge if cell in grid] + [("hifence", cell) for cell in edge if cell in grid]
for vertex in vertices:
# We require every vertex to have fencesto = 2. For vertices that are on the fence, this
# means that exactly two of the three edges that connect at the vertex must be on the fence.
# For vertices that are not on the fence, the nofence condition means that none of the edges
# may be on the fence.
node_ranges[("fencesto", vertex)] = 2
subsets[("nofence", vertex)] = [("fencesto", vertex)] * 2
for edge in edges:
if vertexon(vertex, edge):
subsets[("fenceat", edge)].append(("fencesto", vertex))
print(title)
for solution in grf.multi_covers(subsets, node_ranges=node_ranges):
if singlefence(solution):
printsolution(solution)
for jlayout, layout in enumerate(layouts, 1):
solve(layout, "Puzzle #{}".format(jlayout))
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import print_function
import sys
import os
import optparse
import textwrap
from .modulegraph import ModuleGraph
def main():
# Parse command line
usage = textwrap.dedent('''\
Usage:
modulegraph [options] scriptfile ...
Valid options:
* -d: Increase debug level
* -q: Clear debug level
* -m: arguments are module names, not script files
* -x name: Add 'name' to the excludes list
* -p name: Add 'name' to the module search path
* -g: Output a .dot graph
* -h: Output a html file
''')
parser = optparse.OptionParser(usage=usage, add_help_option=False)
parser.add_option('-d', action='count', dest='debug', default=1)
parser.add_option('-q', action='store_const', dest='debug', const=0)
parser.add_option('-m', action='store_true', dest='domods', default=False)
parser.add_option('-x', action='append', dest='excludes', default=[])
parser.add_option('-p', action='append', dest='addpath', default=[])
parser.add_option('-g', action='store_const', dest='output', const='dot')
parser.add_option('-h', action='store_const', dest='output', const='html')
opts, args = parser.parse_args()
if not args:
print("No script specified", file=sys.stderr)
print(usage, file=sys.stderr)
sys.exit(1)
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = opts.addpath + path
if opts.debug > 1:
print("path:", file=sys.stderr)
for item in path:
print(" ", repr(item), file=sys.stderr)
# Create the module finder and turn its crank
mf = ModuleGraph(path, excludes=opts.excludes, debug=opts.debug)
for arg in args:
if opts.domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.run_script(arg)
if opts.output == 'dot':
mf.graphreport()
elif opts.output == 'html':
mf.create_xref()
else:
mf.report()
sys.exit(0)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\n[interrupt]")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlalchemy as sa
from neutron.db import model_base
class NetworkState(model_base.BASEV2):
"""Represents state of vlan_id on physical network."""
__tablename__ = 'network_states'
physical_network = sa.Column(sa.String(64), nullable=False,
primary_key=True)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False)
def __init__(self, physical_network, vlan_id):
self.physical_network = physical_network
self.vlan_id = vlan_id
self.allocated = False
def __repr__(self):
return "<NetworkState(%s,%d,%s)>" % (self.physical_network,
self.vlan_id, self.allocated)
class NetworkBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical network and vlan."""
__tablename__ = 'network_bindings'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
physical_network = sa.Column(sa.String(64))
vlan_id = sa.Column(sa.Integer, nullable=False)
def __init__(self, network_id, physical_network, vlan_id):
self.network_id = network_id
self.physical_network = physical_network
self.vlan_id = vlan_id
def __repr__(self):
return "<NetworkBinding(%s,%s,%d)>" % (self.network_id,
self.physical_network,
self.vlan_id)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
from .common import *
DEBUG = True
TEMPLATE_DEBUG = True
HTTPS = 'off'
FEATURES['DISABLE_START_DATES'] = False
FEATURES['ENABLE_SQL_TRACKING_LOGS'] = True
FEATURES['SUBDOMAIN_COURSE_LISTINGS'] = False # Enable to test subdomains--otherwise, want all courses to show up
FEATURES['SUBDOMAIN_BRANDING'] = True
FEATURES['FORCE_UNIVERSITY_DOMAIN'] = None # show all university courses if in dev (ie don't use HTTP_HOST)
FEATURES['ENABLE_MANUAL_GIT_RELOAD'] = True
FEATURES['ENABLE_PSYCHOMETRICS'] = False # real-time psychometrics (eg item response theory analysis in instructor dashboard)
FEATURES['ENABLE_SERVICE_STATUS'] = True
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True # Enable email for all Studio courses
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Give all courses email (don't require django-admin perms)
FEATURES['ENABLE_HINTER_INSTRUCTOR_VIEW'] = True
FEATURES['ENABLE_INSTRUCTOR_LEGACY_DASHBOARD'] = True
FEATURES['MULTIPLE_ENROLLMENT_ROLES'] = True
FEATURES['ENABLE_SHOPPING_CART'] = True
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
FEATURES['ENABLE_S3_GRADE_DOWNLOADS'] = True
FEATURES['IS_EDX_DOMAIN'] = True # Is this an edX-owned domain? (used on instructor dashboard)
FEATURES['ENABLE_PAYMENT_FAKE'] = True
FEEDBACK_SUBMISSION_EMAIL = "dummy@example.com"
WIKI_ENABLED = True
DJFS = {
'type': 'osfs',
'directory_root': 'lms/static/djpyfs',
'url_root': '/static/djpyfs'
}
# If there is a database called 'read_replica', you can use the use_read_replica_if_available
# function in util/query.py, which is useful for very large database reads
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "edx.db",
}
}
CACHES = {
# This is the cache used for most things.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
'course_structure_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_course_structure_mem_cache',
},
}
XQUEUE_INTERFACE = {
"url": "https://sandbox-xqueue.edx.org",
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
COURSE_LISTINGS = {
'default': ['BerkeleyX/CS169.1x/2012_Fall',
'BerkeleyX/CS188.1x/2012_Fall',
'HarvardX/CS50x/2012',
'HarvardX/PH207x/2012_Fall',
'MITx/3.091x/2012_Fall',
'MITx/6.002x/2012_Fall',
'MITx/6.00x/2012_Fall'],
'berkeley': ['BerkeleyX/CS169/fa12',
'BerkeleyX/CS188/fa12'],
'harvard': ['HarvardX/CS50x/2012H'],
'mit': ['MITx/3.091/MIT_2012_Fall'],
'sjsu': ['MITx/6.002x-EE98/2012_Fall_SJSU'],
}
SUBDOMAIN_BRANDING = {
'sjsu': 'MITx',
'mit': 'MITx',
'berkeley': 'BerkeleyX',
'harvard': 'HarvardX',
'openedx': 'openedx',
'edge': 'edge',
}
# List of `university` landing pages to display, even though they may not
# have an actual course with that org set
VIRTUAL_UNIVERSITIES = []
# Organization that contain other organizations
META_UNIVERSITIES = {'UTx': ['UTAustinX']}
COMMENTS_SERVICE_KEY = "PUT_YOUR_API_KEY_HERE"
############################## Course static files ##########################
if os.path.isdir(DATA_DIR):
# Add the full course repo if there is no static directory
STATICFILES_DIRS += [
# TODO (cpennington): When courses are stored in a database, this
# should no longer be added to STATICFILES
(course_dir, DATA_DIR / course_dir)
for course_dir in os.listdir(DATA_DIR)
if (os.path.isdir(DATA_DIR / course_dir) and
not os.path.isdir(DATA_DIR / course_dir / 'static'))
]
# Otherwise, add only the static directory from the course dir
STATICFILES_DIRS += [
# TODO (cpennington): When courses are stored in a database, this
# should no longer be added to STATICFILES
(course_dir, DATA_DIR / course_dir / 'static')
for course_dir in os.listdir(DATA_DIR)
if (os.path.isdir(DATA_DIR / course_dir / 'static'))
]
################################# edx-platform revision string #####################
EDX_PLATFORM_VERSION_STRING = os.popen('cd %s; git describe' % REPO_ROOT).read().strip()
############################ Open ended grading config #####################
OPEN_ENDED_GRADING_INTERFACE = {
'url': 'http://127.0.0.1:3033/',
'username': 'lms',
'password': 'abcd',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
############################## LMS Migration ##################################
FEATURES['ENABLE_LMS_MIGRATION'] = True
FEATURES['ACCESS_REQUIRE_STAFF_FOR_COURSE'] = False # require that user be in the staff_* group to be able to enroll
FEATURES['XQA_SERVER'] = 'http://xqa:server@content-qa.edX.mit.edu/xqa'
INSTALLED_APPS += ('lms_migration',)
LMS_MIGRATION_ALLOWED_IPS = ['127.0.0.1']
################################ OpenID Auth #################################
FEATURES['AUTH_USE_OPENID'] = True
FEATURES['AUTH_USE_OPENID_PROVIDER'] = True
FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] = True
INSTALLED_APPS += ('external_auth',)
INSTALLED_APPS += ('django_openid_auth',)
OPENID_CREATE_USERS = False
OPENID_UPDATE_DETAILS_FROM_SREG = True
OPENID_SSO_SERVER_URL = 'https://www.google.com/accounts/o8/id' # TODO: accept more endpoints
OPENID_USE_AS_ADMIN_LOGIN = False
OPENID_PROVIDER_TRUSTED_ROOTS = ['*']
############################## OAUTH2 Provider ################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
######################## MIT Certificates SSL Auth ############################
FEATURES['AUTH_USE_CERTIFICATES'] = False
########################### External REST APIs #################################
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar', 'djpyfs',)
MIDDLEWARE_CLASSES += (
'django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
)
#################### FILE UPLOADS (for discussion forums) #####################
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = ENV_ROOT / "uploads"
MEDIA_URL = "/static/uploads/"
STATICFILES_DIRS.append(("uploads", MEDIA_ROOT))
FILE_UPLOAD_TEMP_DIR = ENV_ROOT / "uploads"
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
FEATURES['AUTH_USE_SHIB'] = True
FEATURES['RESTRICT_ENROLL_BY_REG_METHOD'] = True
########################### PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
########################## ANALYTICS TESTING ########################
ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/"
ANALYTICS_API_KEY = ""
##### Segment.io ######
# If there's an environment variable set, grab it and turn on Segment.io
SEGMENT_IO_LMS_KEY = os.environ.get('SEGMENT_IO_LMS_KEY')
if SEGMENT_IO_LMS_KEY:
FEATURES['SEGMENT_IO_LMS'] = True
###################### Payment ######################
CC_PROCESSOR['CyberSource']['SHARED_SECRET'] = os.environ.get('CYBERSOURCE_SHARED_SECRET', '')
CC_PROCESSOR['CyberSource']['MERCHANT_ID'] = os.environ.get('CYBERSOURCE_MERCHANT_ID', '')
CC_PROCESSOR['CyberSource']['SERIAL_NUMBER'] = os.environ.get('CYBERSOURCE_SERIAL_NUMBER', '')
CC_PROCESSOR['CyberSource']['PURCHASE_ENDPOINT'] = '/shoppingcart/payment_fake/'
CC_PROCESSOR['CyberSource2']['SECRET_KEY'] = os.environ.get('CYBERSOURCE_SECRET_KEY', '')
CC_PROCESSOR['CyberSource2']['ACCESS_KEY'] = os.environ.get('CYBERSOURCE_ACCESS_KEY', '')
CC_PROCESSOR['CyberSource2']['PROFILE_ID'] = os.environ.get('CYBERSOURCE_PROFILE_ID', '')
CC_PROCESSOR['CyberSource2']['PURCHASE_ENDPOINT'] = '/shoppingcart/payment_fake/'
########################## USER API ##########################
EDX_API_KEY = None
####################### Shoppingcart ###########################
FEATURES['ENABLE_SHOPPING_CART'] = True
### This enables the Metrics tab for the Instructor dashboard ###########
FEATURES['CLASS_DASHBOARD'] = True
### This settings is for the course registration code length ############
REGISTRATION_CODE_LENGTH = 8
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
//go:build linux
package bridge
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/moby/moby/v2/internal/testutil/netnsutils"
"github.com/vishvananda/netlink"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestMirroredWSL2Workaround(t *testing.T) {
for _, tc := range []struct {
desc string
loopback0 bool
wslinfoPerm os.FileMode // 0 for no-file
expMirrored bool
}{
{
desc: "No loopback0",
},
{
desc: "WSL2 mirrored",
loopback0: true,
wslinfoPerm: 0o777,
expMirrored: true,
},
{
desc: "loopback0 but wslinfo not executable",
loopback0: true,
wslinfoPerm: 0o666,
},
{
desc: "loopback0 but no wslinfo",
loopback0: true,
},
} {
t.Run(tc.desc, func(t *testing.T) {
defer netnsutils.SetupTestOSContext(t)()
simulateWSL2MirroredMode(t, tc.loopback0, tc.wslinfoPerm)
assert.Check(t, is.Equal(isRunningUnderWSL2MirroredMode(context.Background()), tc.expMirrored))
})
}
}
// simulateWSL2MirroredMode simulates the WSL2 mirrored mode by creating a
// loopback0 interface and optionally creating a wslinfo file with the given
// permissions.
func simulateWSL2MirroredMode(t *testing.T, loopback0 bool, wslinfoPerm os.FileMode) {
if loopback0 {
iface := &netlink.Dummy{
LinkAttrs: netlink.LinkAttrs{
Name: "loopback0",
},
}
err := netlink.LinkAdd(iface)
assert.NilError(t, err)
}
wslinfoPathOrig := wslinfoPath
if wslinfoPerm != 0 {
tmpdir := t.TempDir()
p := filepath.Join(tmpdir, "wslinfo")
err := os.WriteFile(p, []byte("#!/bin/sh\necho dummy file\n"), wslinfoPerm)
assert.NilError(t, err)
wslinfoPath = p
}
t.Cleanup(func() {
wslinfoPath = wslinfoPathOrig
})
}
|
go
|
github
|
https://github.com/moby/moby
|
daemon/libnetwork/drivers/bridge/wsl2_linux_test.go
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import numpy as np
import rl_data
import sym
import argparse
import logging
import os
import gym
from datetime import datetime
import time
import sys
try:
from importlib import reload
except ImportError:
pass
parser = argparse.ArgumentParser(description='Traing A3C with OpenAI Gym')
parser.add_argument('--test', action='store_true', help='run testing', default=False)
parser.add_argument('--log-file', type=str, help='the name of log file')
parser.add_argument('--log-dir', type=str, default="./log", help='directory of the log file')
parser.add_argument('--model-prefix', type=str, help='the prefix of the model to load')
parser.add_argument('--save-model-prefix', type=str, help='the prefix of the model to save')
parser.add_argument('--load-epoch', type=int, help="load the model on an epoch using the model-prefix")
parser.add_argument('--kv-store', type=str, default='device', help='the kvstore type')
parser.add_argument('--gpus', type=str, help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--num-epochs', type=int, default=120, help='the number of training epochs')
parser.add_argument('--num-examples', type=int, default=1000000, help='the number of training examples')
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--input-length', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--wd', type=float, default=0)
parser.add_argument('--t-max', type=int, default=4)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--beta', type=float, default=0.08)
args = parser.parse_args()
def log_config(log_dir=None, log_file=None, prefix=None, rank=0):
reload(logging)
head = '%(asctime)-15s Node[' + str(rank) + '] %(message)s'
if log_dir:
logging.basicConfig(level=logging.DEBUG, format=head)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not log_file:
log_file = (prefix if prefix else '') + datetime.now().strftime('_%Y_%m_%d-%H_%M.log')
log_file = log_file.replace('/', '-')
else:
log_file = log_file
log_file_full_name = os.path.join(log_dir, log_file)
handler = logging.FileHandler(log_file_full_name, mode='w')
formatter = logging.Formatter(head)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
logging.info('start with arguments %s', args)
else:
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
def train():
# kvstore
kv = mx.kvstore.create(args.kv_store)
model_prefix = args.model_prefix
if model_prefix is not None:
model_prefix += "-%d" % (kv.rank)
save_model_prefix = args.save_model_prefix
if save_model_prefix is None:
save_model_prefix = model_prefix
log_config(args.log_dir, args.log_file, save_model_prefix, kv.rank)
devs = mx.cpu() if args.gpus is None else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = args.num_examples / args.batch_size
if args.kv_store == 'dist_sync':
epoch_size /= kv.num_workers
# disable kvstore for single device
if 'local' in kv.type and (
args.gpus is None or len(args.gpus.split(',')) is 1):
kv = None
# module
dataiter = rl_data.GymDataIter('Breakout-v0', args.batch_size, args.input_length, web_viz=True)
net = sym.get_symbol_atari(dataiter.act_dim)
module = mx.mod.Module(net, data_names=[d[0] for d in dataiter.provide_data], label_names=('policy_label', 'value_label'), context=devs)
module.bind(data_shapes=dataiter.provide_data,
label_shapes=[('policy_label', (args.batch_size,)), ('value_label', (args.batch_size, 1))],
grad_req='add')
# load model
if args.load_epoch is not None:
assert model_prefix is not None
_, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, args.load_epoch)
else:
arg_params = aux_params = None
# save model
checkpoint = None if save_model_prefix is None else mx.callback.do_checkpoint(save_model_prefix)
init = mx.init.Mixed(['fc_value_weight|fc_policy_weight', '.*'],
[mx.init.Uniform(0.001), mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2)])
module.init_params(initializer=init,
arg_params=arg_params, aux_params=aux_params)
# optimizer
module.init_optimizer(kvstore=kv, optimizer='adam',
optimizer_params={'learning_rate': args.lr, 'wd': args.wd, 'epsilon': 1e-3})
# logging
np.set_printoptions(precision=3, suppress=True)
T = 0
dataiter.reset()
score = np.zeros((args.batch_size, 1))
final_score = np.zeros((args.batch_size, 1))
for epoch in range(args.num_epochs):
if save_model_prefix:
module.save_params('%s-%04d.params'%(save_model_prefix, epoch))
for _ in range(int(epoch_size/args.t_max)):
tic = time.time()
# clear gradients
for exe in module._exec_group.grad_arrays:
for g in exe:
g[:] = 0
S, A, V, r, D = [], [], [], [], []
for t in range(args.t_max + 1):
data = dataiter.data()
module.forward(mx.io.DataBatch(data=data, label=None), is_train=False)
act, _, val = module.get_outputs()
V.append(val.asnumpy())
if t < args.t_max:
act = act.asnumpy()
act = [np.random.choice(dataiter.act_dim, p=act[i]) for i in range(act.shape[0])]
reward, done = dataiter.act(act)
S.append(data)
A.append(act)
r.append(reward.reshape((-1, 1)))
D.append(done.reshape((-1, 1)))
err = 0
R = V[args.t_max]
for i in reversed(range(args.t_max)):
R = r[i] + args.gamma * (1 - D[i]) * R
adv = np.tile(R - V[i], (1, dataiter.act_dim))
batch = mx.io.DataBatch(data=S[i], label=[mx.nd.array(A[i]), mx.nd.array(R)])
module.forward(batch, is_train=True)
pi = module.get_outputs()[1]
h = -args.beta*(mx.nd.log(pi+1e-7)*pi)
out_acts = np.amax(pi.asnumpy(), 1)
out_acts=np.reshape(out_acts,(-1,1))
out_acts_tile=np.tile(-np.log(out_acts + 1e-7),(1, dataiter.act_dim))
module.backward([mx.nd.array(out_acts_tile*adv), h])
print('pi', pi[0].asnumpy())
print('h', h[0].asnumpy())
err += (adv**2).mean()
score += r[i]
final_score *= (1-D[i])
final_score += score * D[i]
score *= 1-D[i]
T += D[i].sum()
module.update()
logging.info('fps: %f err: %f score: %f final: %f T: %f'%(args.batch_size/(time.time()-tic), err/args.t_max, score.mean(), final_score.mean(), T))
print(score.squeeze())
print(final_score.squeeze())
def test():
log_config()
devs = mx.cpu() if args.gpus is None else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# module
dataiter = robo_data.RobosimsDataIter('scenes', args.batch_size, args.input_length, web_viz=True)
print(dataiter.provide_data)
net = sym.get_symbol_thor(dataiter.act_dim)
module = mx.mod.Module(net, data_names=[d[0] for d in dataiter.provide_data], label_names=('policy_label', 'value_label'), context=devs)
module.bind(data_shapes=dataiter.provide_data,
label_shapes=[('policy_label', (args.batch_size,)), ('value_label', (args.batch_size, 1))],
for_training=False)
# load model
assert args.load_epoch is not None
assert args.model_prefix is not None
module.load_params('%s-%04d.params'%(args.model_prefix, args.load_epoch))
N = args.num_epochs * args.num_examples / args.batch_size
R = 0
T = 1e-20
score = np.zeros((args.batch_size,))
for t in range(N):
dataiter.clear_history()
data = dataiter.next()
module.forward(data, is_train=False)
act = module.get_outputs()[0].asnumpy()
act = [np.random.choice(dataiter.act_dim, p=act[i]) for i in range(act.shape[0])]
dataiter.act(act)
time.sleep(0.05)
_, reward, _, done = dataiter.history[0]
T += done.sum()
score += reward
R += (done*score).sum()
score *= (1-done)
if t % 100 == 0:
logging.info('n %d score: %f T: %f'%(t, R/T, T))
if __name__ == '__main__':
if args.test:
test()
else:
train()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""M2Crypto wrapper for OpenSSL X509 API.
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved.
Portions created by Open Source Applications Foundation (OSAF) are
Copyright (C) 2004-2007 OSAF. All Rights Reserved.
Author: Heikki Toivonen
"""
# M2Crypto
from M2Crypto import ASN1, BIO, Err, EVP, util
import m2
FORMAT_DER = 0
FORMAT_PEM = 1
class X509Error(Exception): pass
m2.x509_init(X509Error)
V_OK = m2.X509_V_OK
def new_extension(name, value, critical=0, _pyfree=1):
"""
Create new X509_Extension instance.
"""
if name == 'subjectKeyIdentifier' and \
value.strip('0123456789abcdefABCDEF:') is not '':
raise ValueError('value must be precomputed hash')
lhash = m2.x509v3_lhash()
ctx = m2.x509v3_set_conf_lhash(lhash)
x509_ext_ptr = m2.x509v3_ext_conf(lhash, ctx, name, value)
x509_ext = X509_Extension(x509_ext_ptr, _pyfree)
x509_ext.set_critical(critical)
return x509_ext
class X509_Extension:
"""
X509 Extension
"""
m2_x509_extension_free = m2.x509_extension_free
def __init__(self, x509_ext_ptr=None, _pyfree=1):
self.x509_ext = x509_ext_ptr
self._pyfree = _pyfree
def __del__(self):
if getattr(self, '_pyfree', 0) and self.x509_ext:
self.m2_x509_extension_free(self.x509_ext)
def _ptr(self):
return self.x509_ext
def set_critical(self, critical=1):
"""
Mark this extension critical or noncritical. By default an
extension is not critical.
@type critical: int
@param critical: Nonzero sets this extension as critical.
Calling this method without arguments will
set this extension to critical.
"""
return m2.x509_extension_set_critical(self.x509_ext, critical)
def get_critical(self):
"""
Return whether or not this is a critical extension.
@rtype: int
@return: Nonzero if this is a critical extension.
"""
return m2.x509_extension_get_critical(self.x509_ext)
def get_name(self):
"""
Get the extension name, for example 'subjectAltName'.
"""
return m2.x509_extension_get_name(self.x509_ext)
def get_value(self, flag=0, indent=0):
"""
Get the extension value, for example 'DNS:www.example.com'.
@param flag: Flag to control what and how to print.
@param indent: How many spaces to print before actual value.
"""
buf=BIO.MemoryBuffer()
m2.x509_ext_print(buf.bio_ptr(), self.x509_ext, flag, indent)
return buf.read_all()
class X509_Extension_Stack:
"""
X509 Extension Stack
@warning: Do not modify the underlying OpenSSL stack
except through this interface, or use any OpenSSL functions that do so
indirectly. Doing so will get the OpenSSL stack and the internal pystack
of this class out of sync, leading to python memory leaks, exceptions
or even python crashes!
"""
m2_sk_x509_extension_free = m2.sk_x509_extension_free
def __init__(self, stack=None, _pyfree=0):
if stack is not None:
self.stack = stack
self._pyfree = _pyfree
num = m2.sk_x509_extension_num(self.stack)
for i in range(num):
self.pystack.append(X509_Extension(m2.sk_x509_extension_value(self.stack, i),
_pyfree=_pyfree))
else:
self.stack = m2.sk_x509_extension_new_null()
self._pyfree = 1
self.pystack = [] # This must be kept in sync with self.stack
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_sk_x509_extension_free(self.stack)
def __len__(self):
assert m2.sk_x509_extension_num(self.stack) == len(self.pystack)
return len(self.pystack)
def __getitem__(self, idx):
return self.pystack[idx]
def __iter__(self):
return iter(self.pystack)
def _ptr(self):
return self.stack
def push(self, x509_ext):
"""
Push X509_Extension object onto the stack.
@type x509_ext: M2Crypto.X509.X509_Extension
@param x509_ext: X509_Extension object to be pushed onto the stack.
@return: The number of extensions on the stack.
"""
self.pystack.append(x509_ext)
ret = m2.sk_x509_extension_push(self.stack, x509_ext._ptr())
assert ret == len(self.pystack)
return ret
def pop(self):
"""
Pop X509_Extension object from the stack.
@return: X509_Extension popped
"""
x509_ext_ptr = m2.sk_x509_extension_pop(self.stack)
if x509_ext_ptr is None:
assert len(self.pystack) == 0
return None
return self.pystack.pop()
class X509_Name_Entry:
"""
X509 Name Entry
"""
m2_x509_name_entry_free = m2.x509_name_entry_free
def __init__(self, x509_name_entry, _pyfree=0):
self.x509_name_entry = x509_name_entry
self._pyfree = _pyfree
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_name_entry_free(self.x509_name_entry)
def _ptr(self):
return self.x509_name_entry
def set_object(self, asn1obj):
return m2.x509_name_entry_set_object(self.x509_name_entry,
asn1obj._ptr())
def set_data(self, data, type=ASN1.MBSTRING_ASC):
return m2.x509_name_entry_set_data(self.x509_name_entry,
type, data)
def get_object(self):
return ASN1.ASN1_Object(m2.x509_name_entry_get_object(self.x509_name_entry))
def get_data(self):
return ASN1.ASN1_String(m2.x509_name_entry_get_data(self.x509_name_entry))
def create_by_txt( self, field, type, entry, len):
return m2.x509_name_entry_create_by_txt(self.x509_name_entry._ptr(),
field, type, entry, len)
class X509_Name:
"""
X509 Name
"""
nid = {'C' : m2.NID_countryName,
'SP' : m2.NID_stateOrProvinceName,
'ST' : m2.NID_stateOrProvinceName,
'stateOrProvinceName' : m2.NID_stateOrProvinceName,
'L' : m2.NID_localityName,
'localityName' : m2.NID_localityName,
'O' : m2.NID_organizationName,
'organizationName' : m2.NID_organizationName,
'OU' : m2.NID_organizationalUnitName,
'organizationUnitName' : m2.NID_organizationalUnitName,
'CN' : m2.NID_commonName,
'commonName' : m2.NID_commonName,
'Email' : m2.NID_pkcs9_emailAddress,
'emailAddress' : m2.NID_pkcs9_emailAddress,
'serialNumber' : m2.NID_serialNumber,
'SN' : m2.NID_surname,
'surname' : m2.NID_surname,
'GN' : m2.NID_givenName,
'givenName' : m2.NID_givenName
}
m2_x509_name_free = m2.x509_name_free
def __init__(self, x509_name=None, _pyfree=0):
if x509_name is not None:
assert m2.x509_name_type_check(x509_name), "'x509_name' type error"
self.x509_name = x509_name
self._pyfree = _pyfree
else:
self.x509_name = m2.x509_name_new ()
self._pyfree = 1
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_name_free(self.x509_name)
def __str__(self):
assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
return m2.x509_name_oneline(self.x509_name)
def __getattr__(self, attr):
if attr in self.nid:
assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
return m2.x509_name_by_nid(self.x509_name, self.nid[attr])
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError, (self, attr)
def __setattr__(self, attr, value):
if attr in self.nid:
assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
return m2.x509_name_set_by_nid(self.x509_name, self.nid[attr], value)
self.__dict__[attr] = value
def __len__(self):
return m2.x509_name_entry_count(self.x509_name)
def __getitem__(self, idx):
if not 0 <= idx < self.entry_count():
raise IndexError("index out of range")
return X509_Name_Entry(m2.x509_name_get_entry(self.x509_name, idx))
def __iter__(self):
for i in xrange(self.entry_count()):
yield self[i]
def _ptr(self):
#assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
return self.x509_name
def add_entry_by_txt(self, field, type, entry, len, loc, set):
return m2.x509_name_add_entry_by_txt(self.x509_name, field, type,
entry, len, loc, set )
def entry_count( self ):
return m2.x509_name_entry_count( self.x509_name )
def get_entries_by_nid(self, nid):
ret = []
lastpos = -1
while True:
lastpos = m2.x509_name_get_index_by_nid(self.x509_name, nid,
lastpos)
if lastpos == -1:
break
ret.append(self[lastpos])
return ret
def as_text(self, indent=0, flags=m2.XN_FLAG_COMPAT):
"""
as_text returns the name as a string.
@param indent: Each line in multiline format is indented
by this many spaces.
@param flags: Flags that control how the output should be formatted.
"""
assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
buf=BIO.MemoryBuffer()
m2.x509_name_print_ex(buf.bio_ptr(), self.x509_name, indent, flags)
return buf.read_all()
def as_der(self):
assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
return m2.x509_name_get_der(self.x509_name)
def as_hash(self):
assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
return m2.x509_name_hash(self.x509_name)
class X509:
"""
X.509 Certificate
"""
m2_x509_free = m2.x509_free
def __init__(self, x509=None, _pyfree=0):
if x509 is not None:
assert m2.x509_type_check(x509), "'x509' type error"
self.x509 = x509
self._pyfree = _pyfree
else:
self.x509 = m2.x509_new ()
self._pyfree = 1
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_free(self.x509)
def _ptr(self):
assert m2.x509_type_check(self.x509), "'x509' type error"
return self.x509
def as_text(self):
assert m2.x509_type_check(self.x509), "'x509' type error"
buf=BIO.MemoryBuffer()
m2.x509_print(buf.bio_ptr(), self.x509)
return buf.read_all()
def as_der(self):
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.i2d_x509(self.x509)
def as_pem(self):
buf=BIO.MemoryBuffer()
m2.x509_write_pem(buf.bio_ptr(), self.x509)
return buf.read_all()
def save_pem(self, filename):
"""
save_pem
"""
bio=BIO.openfile(filename, 'wb')
return m2.x509_write_pem(bio.bio_ptr(), self.x509)
def save(self, filename, format=FORMAT_PEM):
"""
Saves X.509 certificate to a file. Default output
format is PEM.
@type filename: string
@param filename: Name of the file the cert will be saved to.
@type format: int
@param format: Controls what output format is used to save the cert.
Either FORMAT_PEM or FORMAT_DER to save in PEM or DER format.
Raises a ValueError if an unknow format is used.
"""
bio = BIO.openfile(filename, 'wb')
if format == FORMAT_PEM:
return m2.x509_write_pem(bio.bio_ptr(), self.x509)
elif format == FORMAT_DER:
return m2.i2d_x509_bio(bio.bio_ptr(), self.x509)
else:
raise ValueError("Unknown filetype. Must be either FORMAT_PEM or FORMAT_DER")
def set_version(self, version):
"""
Set version.
@type version: int
@param version: Version number.
@rtype: int
@return: Returns 0 on failure.
"""
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.x509_set_version(self.x509, version)
def set_not_before(self, asn1_utctime):
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.x509_set_not_before(self.x509, asn1_utctime._ptr())
def set_not_after(self, asn1_utctime):
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.x509_set_not_after(self.x509, asn1_utctime._ptr())
def set_subject_name(self, name):
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.x509_set_subject_name(self.x509, name.x509_name)
def set_issuer_name(self, name):
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.x509_set_issuer_name(self.x509, name.x509_name)
def get_version(self):
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.x509_get_version(self.x509)
def get_serial_number(self):
assert m2.x509_type_check(self.x509), "'x509' type error"
asn1_integer = m2.x509_get_serial_number(self.x509)
return m2.asn1_integer_get(asn1_integer)
def set_serial_number(self, serial):
"""
Set serial number.
@type serial: int
@param serial: Serial number.
"""
assert m2.x509_type_check(self.x509), "'x509' type error"
# This "magically" changes serial since asn1_integer
# is C pointer to x509's internal serial number.
asn1_integer = m2.x509_get_serial_number(self.x509)
return m2.asn1_integer_set(asn1_integer, serial)
# XXX Or should I do this?
#asn1_integer = m2.asn1_integer_new()
#m2.asn1_integer_set(asn1_integer, serial)
#return m2.x509_set_serial_number(self.x509, asn1_integer)
def get_not_before(self):
assert m2.x509_type_check(self.x509), "'x509' type error"
return ASN1.ASN1_UTCTIME(m2.x509_get_not_before(self.x509))
def get_not_after(self):
assert m2.x509_type_check(self.x509), "'x509' type error"
return ASN1.ASN1_UTCTIME(m2.x509_get_not_after(self.x509))
def get_pubkey(self):
assert m2.x509_type_check(self.x509), "'x509' type error"
return EVP.PKey(m2.x509_get_pubkey(self.x509), _pyfree=1)
def set_pubkey(self, pkey):
"""
Set the public key for the certificate
@type pkey: EVP_PKEY
@param pkey: Public key
"""
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.x509_set_pubkey(self.x509, pkey.pkey)
def get_issuer(self):
assert m2.x509_type_check(self.x509), "'x509' type error"
return X509_Name(m2.x509_get_issuer_name(self.x509))
def set_issuer(self, name):
"""
Set issuer name.
@type name: X509_Name
@param name: subjectName field.
"""
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.x509_set_issuer_name(self.x509, name.x509_name)
def get_subject(self):
assert m2.x509_type_check(self.x509), "'x509' type error"
return X509_Name(m2.x509_get_subject_name(self.x509))
def set_subject(self, name):
"""
Set subject name.
@type name: X509_Name
@param name: subjectName field.
"""
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.x509_set_subject_name(self.x509, name.x509_name)
def add_ext(self, ext):
"""
Add X509 extension to this certificate.
@type ext: X509_Extension
@param ext: Extension
"""
assert m2.x509_type_check(self.x509), "'x509' type error"
return m2.x509_add_ext(self.x509, ext.x509_ext, -1)
def get_ext(self, name):
"""
Get X509 extension by name.
@type name: Name of the extension
@param name: str
@return: X509_Extension
"""
# Optimizations to reduce attribute accesses
m2x509_get_ext = m2.x509_get_ext
m2x509_extension_get_name = m2.x509_extension_get_name
x509 = self.x509
for i in range(m2.x509_get_ext_count(x509)):
extPtr = m2x509_get_ext(x509, i)
if m2x509_extension_get_name(extPtr) == name:
return X509_Extension(extPtr, _pyfree=0)
raise LookupError
def get_ext_at(self, index):
"""
Get X509 extension by index.
@type index: Name of the extension
@param index: int
@return: X509_Extension
"""
if index < 0 or index >= self.get_ext_count():
raise IndexError
return X509_Extension(m2.x509_get_ext(self.x509, index),
_pyfree=0)
def get_ext_count(self):
"""
Get X509 extension count.
"""
return m2.x509_get_ext_count(self.x509)
def sign(self, pkey, md):
"""
Sign the certificate.
@type pkey: EVP_PKEY
@param pkey: Public key
@type md: str
@param md: Message digest algorithm to use for signing,
for example 'sha1'.
"""
assert m2.x509_type_check(self.x509), "'x509' type error"
mda = getattr(m2, md, None)
if mda is None:
raise ValueError, ('unknown message digest', md)
return m2.x509_sign(self.x509, pkey.pkey, mda())
def verify(self, pkey=None):
assert m2.x509_type_check(self.x509), "'x509' type error"
if pkey:
return m2.x509_verify(self.x509, pkey.pkey)
else:
return m2.x509_verify(self.x509, self.get_pubkey().pkey)
def check_ca(self):
"""
Check if the certificate is a Certificate Authority (CA) certificate.
@return: 0 if the certificate is not CA, nonzero otherwise.
@requires: OpenSSL 0.9.8 or newer
"""
return m2.x509_check_ca(self.x509)
def check_purpose(self, id, ca):
"""
Check if the certificate's purpose matches the asked purpose.
@param id: Purpose id. See X509_PURPOSE_* constants.
@param ca: 1 if the certificate should be CA, 0 otherwise.
@return: 0 if the certificate purpose does not match, nonzero otherwise.
"""
return m2.x509_check_purpose(self.x509, id, ca)
def get_fingerprint(self, md='md5'):
"""
Get the fingerprint of the certificate.
@param md: Message digest algorithm to use.
@return: String containing the fingerprint in hex format.
"""
der = self.as_der()
md = EVP.MessageDigest(md)
md.update(der)
digest = md.final()
return hex(util.octx_to_num(digest))[2:-1].upper()
def load_cert(file, format=FORMAT_PEM):
"""
Load certificate from file.
@type file: string
@param file: Name of file containing certificate in either DER or PEM format.
@type format: int, either FORMAT_PEM or FORMAT_DER
@param format: Describes the format of the file to be loaded, either PEM or DER.
@rtype: M2Crypto.X509.X509
@return: M2Crypto.X509.X509 object.
"""
bio = BIO.openfile(file)
if format == FORMAT_PEM:
return load_cert_bio(bio)
elif format == FORMAT_DER:
cptr = m2.d2i_x509(bio._ptr())
if cptr is None:
raise X509Error(Err.get_error())
return X509(cptr, _pyfree=1)
else:
raise ValueError("Unknown format. Must be either FORMAT_DER or FORMAT_PEM")
def load_cert_bio(bio, format=FORMAT_PEM):
"""
Load certificate from a bio.
@type bio: M2Crypto.BIO.BIO
@param bio: BIO pointing at a certificate in either DER or PEM format.
@type format: int, either FORMAT_PEM or FORMAT_DER
@param format: Describes the format of the cert to be loaded, either PEM or DER.
@rtype: M2Crypto.X509.X509
@return: M2Crypto.X509.X509 object.
"""
if format == FORMAT_PEM:
cptr = m2.x509_read_pem(bio._ptr())
elif format == FORMAT_DER:
cptr = m2.d2i_x509(bio._ptr())
else:
raise ValueError("Unknown format. Must be either FORMAT_DER or FORMAT_PEM")
if cptr is None:
raise X509Error(Err.get_error())
return X509(cptr, _pyfree=1)
def load_cert_string(string, format=FORMAT_PEM):
"""
Load certificate from a string.
@type string: string
@param string: String containing a certificate in either DER or PEM format.
@type format: int, either FORMAT_PEM or FORMAT_DER
@param format: Describes the format of the cert to be loaded, either PEM or DER.
@rtype: M2Crypto.X509.X509
@return: M2Crypto.X509.X509 object.
"""
bio = BIO.MemoryBuffer(string)
return load_cert_bio(bio, format)
def load_cert_der_string(string):
"""
Load certificate from a string.
@type string: string
@param string: String containing a certificate in DER format.
@rtype: M2Crypto.X509.X509
@return: M2Crypto.X509.X509 object.
"""
bio = BIO.MemoryBuffer(string)
cptr = m2.d2i_x509(bio._ptr())
if cptr is None:
raise X509Error(Err.get_error())
return X509(cptr, _pyfree=1)
class X509_Store_Context:
"""
X509 Store Context
"""
m2_x509_store_ctx_free = m2.x509_store_ctx_free
def __init__(self, x509_store_ctx, _pyfree=0):
self.ctx = x509_store_ctx
self._pyfree = _pyfree
def __del__(self):
if self._pyfree:
self.m2_x509_store_ctx_free(self.ctx)
def _ptr(self):
return self.ctx
def get_current_cert(self):
"""
Get current X.509 certificate.
@warning: The returned certificate is NOT refcounted, so you can not
rely on it being valid once the store context goes away or is modified.
"""
return X509(m2.x509_store_ctx_get_current_cert(self.ctx), _pyfree=0)
def get_error(self):
"""
Get error code.
"""
return m2.x509_store_ctx_get_error(self.ctx)
def get_error_depth(self):
"""
Get error depth.
"""
return m2.x509_store_ctx_get_error_depth(self.ctx)
def get1_chain(self):
"""
Get certificate chain.
@return: Reference counted (i.e. safe to use even after the store
context goes away) stack of certificates in the chain.
@rtype: X509_Stack
"""
return X509_Stack(m2.x509_store_ctx_get1_chain(self.ctx), 1, 1)
class X509_Store:
"""
X509 Store
"""
m2_x509_store_free = m2.x509_store_free
def __init__(self, store=None, _pyfree=0):
if store is not None:
self.store = store
self._pyfree = _pyfree
else:
self.store = m2.x509_store_new()
self._pyfree = 1
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_store_free(self.store)
def _ptr(self):
return self.store
def load_info(self, file):
ret = m2.x509_store_load_locations(self.store, file)
if ret < 1:
raise X509Error(Err.get_error())
return ret
load_locations = load_info
def add_x509(self, x509):
assert isinstance(x509, X509)
return m2.x509_store_add_cert(self.store, x509._ptr())
add_cert = add_x509
class X509_Stack:
"""
X509 Stack
@warning: Do not modify the underlying OpenSSL stack
except through this interface, or use any OpenSSL functions that do so
indirectly. Doing so will get the OpenSSL stack and the internal pystack
of this class out of sync, leading to python memory leaks, exceptions
or even python crashes!
"""
m2_sk_x509_free = m2.sk_x509_free
def __init__(self, stack=None, _pyfree=0, _pyfree_x509=0):
if stack is not None:
self.stack = stack
self._pyfree = _pyfree
self.pystack = [] # This must be kept in sync with self.stack
num = m2.sk_x509_num(self.stack)
for i in range(num):
self.pystack.append(X509(m2.sk_x509_value(self.stack, i),
_pyfree=_pyfree_x509))
else:
self.stack = m2.sk_x509_new_null()
self._pyfree = 1
self.pystack = [] # This must be kept in sync with self.stack
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_sk_x509_free(self.stack)
def __len__(self):
assert m2.sk_x509_num(self.stack) == len(self.pystack)
return len(self.pystack)
def __getitem__(self, idx):
return self.pystack[idx]
def __iter__(self):
return iter(self.pystack)
def _ptr(self):
return self.stack
def push(self, x509):
"""
push an X509 certificate onto the stack.
@param x509: X509 object.
@return: The number of X509 objects currently on the stack.
"""
assert isinstance(x509, X509)
self.pystack.append(x509)
ret = m2.sk_x509_push(self.stack, x509._ptr())
assert ret == len(self.pystack)
return ret
def pop(self):
"""
pop a certificate from the stack.
@return: X509 object that was popped, or None if there is nothing
to pop.
"""
x509_ptr = m2.sk_x509_pop(self.stack)
if x509_ptr is None:
assert len(self.pystack) == 0
return None
return self.pystack.pop()
def as_der(self):
"""
Return the stack as a DER encoded string
"""
return m2.get_der_encoding_stack(self.stack)
def new_stack_from_der(der_string):
"""
Create a new X509_Stack from DER string.
@return: X509_Stack
"""
stack_ptr = m2.make_stack_from_der_sequence(der_string)
if stack_ptr is None:
raise X509Error(Err.get_error())
return X509_Stack(stack_ptr, 1, 1)
class Request:
"""
X509 Certificate Request.
"""
m2_x509_req_free = m2.x509_req_free
def __init__(self, req=None, _pyfree=0):
if req is not None:
self.req = req
self._pyfree = _pyfree
else:
self.req = m2.x509_req_new()
self._pyfree = 1
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_req_free(self.req)
def as_text(self):
buf=BIO.MemoryBuffer()
m2.x509_req_print(buf.bio_ptr(), self.req)
return buf.read_all()
def as_pem(self):
buf=BIO.MemoryBuffer()
m2.x509_req_write_pem(buf.bio_ptr(), self.req)
return buf.read_all()
def as_der(self):
buf = BIO.MemoryBuffer()
m2.i2d_x509_req_bio(buf.bio_ptr(), self.req)
return buf.read_all()
def save_pem(self, filename):
bio=BIO.openfile(filename, 'wb')
return m2.x509_req_write_pem(bio.bio_ptr(), self.req)
def save(self, filename, format=FORMAT_PEM):
"""
Saves X.509 certificate request to a file. Default output
format is PEM.
@type filename: string
@param filename: Name of the file the request will be saved to.
@type format: int
@param format: Controls what output format is used to save the request.
Either FORMAT_PEM or FORMAT_DER to save in PEM or DER format.
Raises ValueError if an unknown format is used.
"""
bio = BIO.openfile(filename, 'wb')
if format == FORMAT_PEM:
return m2.x509_req_write_pem(bio.bio_ptr(), self.req)
elif format == FORMAT_DER:
return m2.i2d_x509_req_bio(bio.bio_ptr(), self.req)
else:
raise ValueError("Unknown filetype. Must be either FORMAT_DER or FORMAT_PEM")
def get_pubkey(self):
"""
Get the public key for the request.
@rtype: EVP_PKEY
@return: Public key from the request.
"""
return EVP.PKey(m2.x509_req_get_pubkey(self.req), _pyfree=1)
def set_pubkey(self, pkey):
"""
Set the public key for the request.
@type pkey: EVP_PKEY
@param pkey: Public key
@rtype: int
@return: Return 1 for success and 0 for failure.
"""
return m2.x509_req_set_pubkey( self.req, pkey.pkey )
def get_version(self):
"""
Get version.
@rtype: int
@return: Returns version.
"""
return m2.x509_req_get_version(self.req)
def set_version(self, version):
"""
Set version.
@type version: int
@param version: Version number.
@rtype: int
@return: Returns 0 on failure.
"""
return m2.x509_req_set_version( self.req, version )
def get_subject(self):
return X509_Name(m2.x509_req_get_subject_name( self.req ))
def set_subject_name(self, name):
"""
Set subject name.
@type name: X509_Name
@param name: subjectName field.
"""
return m2.x509_req_set_subject_name( self.req, name.x509_name )
set_subject = set_subject_name
def add_extensions(self, ext_stack):
"""
Add X509 extensions to this request.
@type ext_stack: X509_Extension_Stack
@param ext_stack: Stack of extensions to add.
"""
return m2.x509_req_add_extensions(self.req, ext_stack._ptr())
def verify(self, pkey):
return m2.x509_req_verify(self.req, pkey.pkey)
def sign(self, pkey, md):
mda = getattr(m2, md, None)
if mda is None:
raise ValueError, ('unknown message digest', md)
return m2.x509_req_sign(self.req, pkey.pkey, mda())
def load_request(file, format=FORMAT_PEM):
"""
Load certificate request from file.
@type file: string
@param file: Name of file containing certificate request in either PEM or DER format.
@type format: int, either FORMAT_PEM or FORMAT_DER
@param format: Describes the format of the file to be loaded, either PEM or DER.
@rtype: M2Crypto.X509.Request
@return: M2Crypto.X509.Request object.
"""
f=BIO.openfile(file)
if format == FORMAT_PEM:
cptr=m2.x509_req_read_pem(f.bio_ptr())
elif format == FORMAT_DER:
cptr = m2.d2i_x509_req(f.bio_ptr())
else:
raise ValueError("Unknown filetype. Must be either FORMAT_PEM or FORMAT_DER")
f.close()
if cptr is None:
raise X509Error(Err.get_error())
return Request(cptr, 1)
def load_request_bio(bio, format=FORMAT_PEM):
"""
Load certificate request from a bio.
@type bio: M2Crypto.BIO.BIO
@param bio: BIO pointing at a certificate request in either DER or PEM format.
@type format: int, either FORMAT_PEM or FORMAT_DER
@param format: Describes the format of the request to be loaded, either PEM or DER.
@rtype: M2Crypto.X509.Request
@return: M2Crypto.X509.Request object.
"""
if format == FORMAT_PEM:
cptr = m2.x509_req_read_pem(bio._ptr())
elif format == FORMAT_DER:
cptr = m2.d2i_x509_req(bio._ptr())
else:
raise ValueError("Unknown format. Must be either FORMAT_DER or FORMAT_PEM")
if cptr is None:
raise X509Error(Err.get_error())
return Request(cptr, _pyfree=1)
def load_request_string(string, format=FORMAT_PEM):
"""
Load certificate request from a string.
@type string: string
@param string: String containing a certificate request in either DER or PEM format.
@type format: int, either FORMAT_PEM or FORMAT_DER
@param format: Describes the format of the request to be loaded, either PEM or DER.
@rtype: M2Crypto.X509.Request
@return: M2Crypto.X509.Request object.
"""
bio = BIO.MemoryBuffer(string)
return load_request_bio(bio, format)
def load_request_der_string(string):
"""
Load certificate request from a string.
@type string: string
@param string: String containing a certificate request in DER format.
@rtype: M2Crypto.X509.Request
@return: M2Crypto.X509.Request object.
"""
bio = BIO.MemoryBuffer(string)
return load_request_bio(bio, FORMAT_DER)
class CRL:
"""
X509 Certificate Revocation List
"""
m2_x509_crl_free = m2.x509_crl_free
def __init__(self, crl=None, _pyfree=0):
if crl is not None:
self.crl = crl
self._pyfree = _pyfree
else:
self.crl = m2.x509_crl_new()
self._pyfree = 1
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_crl_free(self.crl)
def as_text(self):
"""
Return CRL in PEM format in a string.
@rtype: string
@return: String containing the CRL in PEM format.
"""
buf=BIO.MemoryBuffer()
m2.x509_crl_print(buf.bio_ptr(), self.crl)
return buf.read_all()
def load_crl(file):
"""
Load CRL from file.
@type file: string
@param file: Name of file containing CRL in PEM format.
@rtype: M2Crypto.X509.CRL
@return: M2Crypto.X509.CRL object.
"""
f=BIO.openfile(file)
cptr=m2.x509_crl_read_pem(f.bio_ptr())
f.close()
if cptr is None:
raise X509Error(Err.get_error())
return CRL(cptr, 1)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/native/UpSample.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_empty_affine_quantized.h>
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
#include <ATen/ops/upsample_nearest3d_native.h>
#endif
#include <c10/util/irange.h>
#include <cstring>
namespace at::native {
// Define a typedef to dispatch to nearest_idx or nearest_exact_idx
typedef int64_t (*nn_compute_source_index_fn_t)(const float, int64_t, int64_t);
// at::native functions for the native_functions.yaml
template <typename scalar_t, nn_compute_source_index_fn_t nn_compute_source_index_fn>
static void upsample_nearest3d_out_frame(
scalar_t* odata,
scalar_t* idata,
int64_t input_depth,
int64_t input_height,
int64_t input_width,
int64_t output_depth,
int64_t output_height,
int64_t output_width,
int64_t nbatch,
int64_t channels,
std::optional<double> scales_d,
std::optional<double> scales_h,
std::optional<double> scales_w) {
float depth_scale = compute_scales_value<float>(scales_d, input_depth, output_depth);
float height_scale = compute_scales_value<float>(scales_h, input_height, output_height);
float width_scale = compute_scales_value<float>(scales_w, input_width, output_width);
channels = channels * nbatch;
if (channels == 0 || output_depth == 0 || output_height == 0 || output_width == 0) {
return;
}
auto* i_p = reinterpret_cast<typename scalar_t::underlying*>(idata);
auto* o_p = reinterpret_cast<typename scalar_t::underlying*>(odata);
// special case: just copy
if (input_depth == output_depth && input_height == output_height && input_width == output_width) {
std::memcpy(o_p, i_p, channels * input_depth * input_height * input_width * sizeof(typename scalar_t::underlying));
return;
}
for (const auto d2 : c10::irange(output_depth)) {
const int64_t d1 =
nn_compute_source_index_fn(depth_scale, d2, input_depth);
for (const auto h2 : c10::irange(output_height)) {
const int64_t h1 =
nn_compute_source_index_fn(height_scale, h2, input_height);
for (const auto w2 : c10::irange(output_width)) {
const int64_t w1 =
nn_compute_source_index_fn(width_scale, w2, input_width);
const auto* pos1 = &i_p[d1 * input_height * input_width + h1 * input_width + w1];
auto* pos2 = &o_p[d2 * output_height * output_width + h2 * output_width + w2];
for ([[maybe_unused]] const auto c : c10::irange(channels)) {
pos2[0] = pos1[0];
pos1 += input_depth * input_height * input_width;
pos2 += output_depth * output_height * output_width;
}
}
}
}
}
template <typename scalar_t, nn_compute_source_index_fn_t nn_compute_source_index_fn>
static void upsample_nearest3d_out_frame_nhwc(
scalar_t* odata,
scalar_t* idata,
int64_t input_depth,
int64_t input_height,
int64_t input_width,
int64_t output_depth,
int64_t output_height,
int64_t output_width,
int64_t nbatch,
int64_t channels,
std::optional<double> scales_d,
std::optional<double> scales_h,
std::optional<double> scales_w) {
float depth_scale = compute_scales_value<float>(scales_d, input_depth, output_depth);
float height_scale = compute_scales_value<float>(scales_h, input_height, output_height);
float width_scale = compute_scales_value<float>(scales_w, input_width, output_width);
for (const auto b : c10::irange(nbatch)) {
auto* i_p = reinterpret_cast<typename scalar_t::underlying*>(idata + b * input_depth * input_height * input_width * channels);
auto* o_p = reinterpret_cast<typename scalar_t::underlying*>(odata + b * output_depth * output_height * output_width * channels);
// special case: just copy
if (input_depth == output_depth && input_height == output_height && input_width == output_width) {
std::memcpy(o_p, i_p, channels * input_depth * input_height * input_width * sizeof(typename scalar_t::underlying));
return;
}
for (const auto d2 : c10::irange(output_depth)) {
const int64_t d1 =
nn_compute_source_index_fn(depth_scale, d2, input_depth);
for (const auto h2 : c10::irange(output_height)) {
const int64_t h1 =
nn_compute_source_index_fn(height_scale, h2, input_height);
for (const auto w2 : c10::irange(output_width)) {
const int64_t w1 =
nn_compute_source_index_fn(width_scale, w2, input_width);
const auto* pos1 = &i_p[(d1 * input_height * input_width + h1 * input_width + w1)*channels];
auto* pos2 = &o_p[(d2 * output_height * output_width + h2 * output_width + w2)*channels];
std::memcpy(pos2, pos1, channels * sizeof(typename scalar_t::underlying));
}
}
}
}
}
template <nn_compute_source_index_fn_t nn_compute_source_index_fn>
static Tensor _upsample_nearest3d_quantized_cpu(
const Tensor& input,
IntArrayRef output_size,
std::optional<double> scales_d,
std::optional<double> scales_h,
std::optional<double> scales_w) {
TORCH_CHECK(
output_size.size() == 3,
"It is expected output_size equals to 3, but got size ",
output_size.size());
TORCH_CHECK(
input.numel() != 0 && input.dim() == 5,
"Non-empty 5D data tensor expected but got a tensor with sizes ",
input.sizes());
int64_t output_depth = output_size[0];
int64_t output_height = output_size[1];
int64_t output_width = output_size[2];
int64_t nbatch = input.size(0);
int64_t channels = input.size(1);
int64_t input_depth = input.size(2);
int64_t input_height = input.size(3);
int64_t input_width = input.size(4);
AT_ASSERT(input_width > 0 && output_width > 0);
if (input.is_contiguous(c10::MemoryFormat::ChannelsLast3d)) {
Tensor output = at::_empty_affine_quantized(
{nbatch, channels, output_depth, output_height, output_width},
input.options().memory_format(input.suggest_memory_format()),
input.q_scale(),
input.q_zero_point(),
std::nullopt);
AT_DISPATCH_QINT_TYPES(input.scalar_type(), "upsample_nearest3d", [&] {
auto* idata = static_cast<scalar_t*>(input.data_ptr());
auto* odata = static_cast<scalar_t*>(output.data_ptr());
upsample_nearest3d_out_frame_nhwc<scalar_t, nn_compute_source_index_fn>(
odata,
idata,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
nbatch,
channels,
scales_d,
scales_h,
scales_w);
});
return output;
} else {
Tensor output = at::_empty_affine_quantized(
{nbatch, channels, output_depth, output_height, output_width},
input.options(),
input.q_scale(),
input.q_zero_point());
auto input_contig = input.contiguous();
AT_DISPATCH_QINT_TYPES(input_contig.scalar_type(), "upsample_nearest3d", [&] {
auto* idata = static_cast<scalar_t*>(input_contig.data_ptr());
auto* odata = static_cast<scalar_t*>(output.data_ptr());
upsample_nearest3d_out_frame<scalar_t, nn_compute_source_index_fn>(
odata,
idata,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
nbatch,
channels,
scales_d,
scales_h,
scales_w);
});
return output;
}
}
Tensor upsample_nearest3d_quantized_cpu(
const Tensor& input,
IntArrayRef osize,
std::optional<double> scale_d,
std::optional<double> scale_h,
std::optional<double> scale_w) {
return _upsample_nearest3d_quantized_cpu<nearest_neighbor_compute_source_index>(
input, osize, scale_d, scale_h, scale_w);
}
Tensor _upsample_nearest_exact3d_quantized_cpu(
const Tensor& input,
IntArrayRef osize,
std::optional<double> scale_d,
std::optional<double> scale_h,
std::optional<double> scale_w) {
return _upsample_nearest3d_quantized_cpu<nearest_neighbor_exact_compute_source_index>(
input, osize, scale_d, scale_h, scale_w);
}
} // namespace at::native
|
cpp
|
github
|
https://github.com/pytorch/pytorch
|
aten/src/ATen/native/quantized/cpu/UpSampleNearest3d.cpp
|
// Copyright 2022 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package version
import (
"context"
"fmt"
"reflect"
"testing"
"github.com/coreos/go-semver/semver"
"github.com/stretchr/testify/assert"
"go.uber.org/zap/zaptest"
"go.etcd.io/etcd/api/v3/version"
)
func TestMemberMinimalVersion(t *testing.T) {
tests := []struct {
memberVersions map[string]*version.Versions
wantVersion *semver.Version
}{
{
map[string]*version.Versions{"a": {Server: "2.0.0"}},
semver.Must(semver.NewVersion("2.0.0")),
},
// unknown
{
map[string]*version.Versions{"a": nil},
nil,
},
{
map[string]*version.Versions{"a": {Server: "2.0.0"}, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}},
semver.Must(semver.NewVersion("2.0.0")),
},
{
map[string]*version.Versions{"a": {Server: "2.1.0"}, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}},
semver.Must(semver.NewVersion("2.1.0")),
},
{
map[string]*version.Versions{"a": nil, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}},
nil,
},
}
for i, tt := range tests {
monitor := NewMonitor(zaptest.NewLogger(t), &storageMock{
memberVersions: tt.memberVersions,
})
minV := monitor.membersMinimalServerVersion()
if !reflect.DeepEqual(minV, tt.wantVersion) {
t.Errorf("#%d: ver = %+v, want %+v", i, minV, tt.wantVersion)
}
}
}
func TestDecideStorageVersion(t *testing.T) {
tests := []struct {
name string
clusterVersion *semver.Version
storageVersion *semver.Version
expectStorageVersion *semver.Version
}{
{
name: "No action if cluster version is nil",
},
{
name: "Should set storage version if cluster version is set",
clusterVersion: &version.V3_5,
expectStorageVersion: &version.V3_5,
},
{
name: "No action if storage version was already set",
storageVersion: &version.V3_5,
expectStorageVersion: &version.V3_5,
},
{
name: "No action if storage version equals cluster version",
clusterVersion: &version.V3_5,
storageVersion: &version.V3_5,
expectStorageVersion: &version.V3_5,
},
{
name: "Should set storage version to cluster version",
clusterVersion: &version.V3_6,
storageVersion: &version.V3_5,
expectStorageVersion: &version.V3_6,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &storageMock{
clusterVersion: tt.clusterVersion,
storageVersion: tt.storageVersion,
}
monitor := NewMonitor(zaptest.NewLogger(t), s)
monitor.UpdateStorageVersionIfNeeded()
if !reflect.DeepEqual(s.storageVersion, tt.expectStorageVersion) {
t.Errorf("Unexpected storage version value, got = %+v, want %+v", s.storageVersion, tt.expectStorageVersion)
}
})
}
}
func TestVersionMatchTarget(t *testing.T) {
tests := []struct {
name string
targetVersion *semver.Version
versionMap map[string]*version.Versions
expectedFinished bool
}{
{
"When downgrade finished",
&semver.Version{Major: 3, Minor: 4},
map[string]*version.Versions{
"mem1": {Server: "3.4.1", Cluster: "3.4.0"},
"mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"},
"mem3": {Server: "3.4.2", Cluster: "3.4.0"},
},
true,
},
{
"When cannot parse peer version",
&semver.Version{Major: 3, Minor: 4},
map[string]*version.Versions{
"mem1": {Server: "3.4", Cluster: "3.4.0"},
"mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"},
"mem3": {Server: "3.4.2", Cluster: "3.4.0"},
},
false,
},
{
"When downgrade not finished",
&semver.Version{Major: 3, Minor: 4},
map[string]*version.Versions{
"mem1": {Server: "3.4.1", Cluster: "3.4.0"},
"mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"},
"mem3": {Server: "3.5.2", Cluster: "3.5.0"},
},
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
monitor := NewMonitor(zaptest.NewLogger(t), &storageMock{
memberVersions: tt.versionMap,
})
actual := monitor.versionsMatchTarget(tt.targetVersion)
if actual != tt.expectedFinished {
t.Errorf("expected downgrade finished is %v; got %v", tt.expectedFinished, actual)
}
})
}
}
func TestUpdateClusterVersionIfNeeded(t *testing.T) {
tests := []struct {
name string
clusterVersion *semver.Version
memberVersions map[string]*version.Versions
downgrade *DowngradeInfo
expectClusterVersion *semver.Version
expectError error
}{
{
name: "Default to 3.0 if there are no members",
expectClusterVersion: &version.V3_0,
},
{
name: "Should pick lowest server version from members",
memberVersions: map[string]*version.Versions{
"a": {Server: "3.6.0"},
"b": {Server: "3.5.0"},
},
expectClusterVersion: &version.V3_5,
},
{
name: "Should support not full releases",
memberVersions: map[string]*version.Versions{
"b": {Server: "3.5.0-alpha.0"},
},
expectClusterVersion: &version.V3_5,
},
{
name: "Sets minimal version when member has broken version",
memberVersions: map[string]*version.Versions{
"a": {Server: "3.6.0"},
"b": {Server: "yyyy"},
},
expectClusterVersion: &version.V3_0,
},
{
name: "Should not downgrade cluster version without explicit downgrade request",
memberVersions: map[string]*version.Versions{
"a": {Server: "3.5.0"},
"b": {Server: "3.6.0"},
},
clusterVersion: &version.V3_6,
expectClusterVersion: &version.V3_6,
},
{
name: "Should not upgrade cluster version if there is still member old member",
memberVersions: map[string]*version.Versions{
"a": {Server: "3.5.0"},
"b": {Server: "3.6.0"},
},
clusterVersion: &version.V3_5,
expectClusterVersion: &version.V3_5,
},
{
name: "Should upgrade cluster version if all members have upgraded (have higher server version)",
memberVersions: map[string]*version.Versions{
"a": {Server: "3.6.0"},
"b": {Server: "3.6.0"},
},
clusterVersion: &version.V3_5,
expectClusterVersion: &version.V3_6,
},
{
name: "Should downgrade cluster version if downgrade is set to allow older members to join",
memberVersions: map[string]*version.Versions{
"a": {Server: "3.6.0"},
"b": {Server: "3.6.0"},
},
clusterVersion: &version.V3_6,
downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
expectClusterVersion: &version.V3_5,
},
{
name: "Don't downgrade below supported range",
memberVersions: map[string]*version.Versions{
"a": {Server: "3.6.0"},
"b": {Server: "3.6.0"},
},
clusterVersion: &version.V3_5,
downgrade: &DowngradeInfo{TargetVersion: "3.4.0", Enabled: true},
expectClusterVersion: &version.V3_5,
expectError: fmt.Errorf("invalid downgrade target"),
},
{
name: "Don't downgrade above cluster version",
memberVersions: map[string]*version.Versions{
"a": {Server: "3.5.0"},
"b": {Server: "3.5.0"},
},
clusterVersion: &version.V3_5,
downgrade: &DowngradeInfo{TargetVersion: "3.6.0", Enabled: true},
expectClusterVersion: &version.V3_5,
expectError: fmt.Errorf("invalid downgrade target"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &storageMock{
clusterVersion: tt.clusterVersion,
memberVersions: tt.memberVersions,
downgradeInfo: tt.downgrade,
}
monitor := NewMonitor(zaptest.NewLogger(t), s)
err := monitor.UpdateClusterVersionIfNeeded()
assert.Equal(t, tt.expectClusterVersion, s.clusterVersion)
assert.Equal(t, tt.expectError, err)
// Ensure results are stable
newVersion, err := monitor.decideClusterVersion()
assert.Nil(t, newVersion)
assert.Equal(t, tt.expectError, err)
})
}
}
func TestCancelDowngradeIfNeeded(t *testing.T) {
tests := []struct {
name string
memberVersions map[string]*version.Versions
downgrade *DowngradeInfo
expectDowngrade *DowngradeInfo
}{
{
name: "No action if there no downgrade in progress",
},
{
name: "Cancel downgrade if there are no members",
downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
expectDowngrade: nil,
},
// Next entries go through all states that should happen during downgrade
{
name: "No action if downgrade was not started",
memberVersions: map[string]*version.Versions{
"a": {Cluster: "3.6.0", Server: "3.6.1"},
"b": {Cluster: "3.6.0", Server: "3.6.2"},
},
},
{
name: "Continue downgrade if just started",
memberVersions: map[string]*version.Versions{
"a": {Cluster: "3.5.0", Server: "3.6.1"},
"b": {Cluster: "3.5.0", Server: "3.6.2"},
},
downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
expectDowngrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
},
{
name: "Continue downgrade if there is at least one member with not matching",
memberVersions: map[string]*version.Versions{
"a": {Cluster: "3.5.0", Server: "3.5.1"},
"b": {Cluster: "3.5.0", Server: "3.6.2"},
},
downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
expectDowngrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
},
{
name: "Cancel downgrade if all members have downgraded",
memberVersions: map[string]*version.Versions{
"a": {Cluster: "3.5.0", Server: "3.5.1"},
"b": {Cluster: "3.5.0", Server: "3.5.2"},
},
downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
expectDowngrade: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &storageMock{
memberVersions: tt.memberVersions,
downgradeInfo: tt.downgrade,
}
monitor := NewMonitor(zaptest.NewLogger(t), s)
// Run multiple times to ensure that results are stable
for i := 0; i < 3; i++ {
monitor.CancelDowngradeIfNeeded()
assert.Equal(t, tt.expectDowngrade, s.downgradeInfo)
}
})
}
}
func TestUpdateStorageVersionIfNeeded(t *testing.T) {
tests := []struct {
name string
clusterVersion *semver.Version
storageVersion *semver.Version
expectStorageVersion *semver.Version
}{
{
name: "No action if cluster version is nil",
},
{
name: "Should set storage version if cluster version is set",
clusterVersion: &version.V3_5,
expectStorageVersion: &version.V3_5,
},
{
name: "No action if storage version was already set",
storageVersion: &version.V3_5,
expectStorageVersion: &version.V3_5,
},
{
name: "No action if storage version equals cluster version",
clusterVersion: &version.V3_5,
storageVersion: &version.V3_5,
expectStorageVersion: &version.V3_5,
},
{
name: "Should set storage version to cluster version",
clusterVersion: &version.V3_6,
storageVersion: &version.V3_5,
expectStorageVersion: &version.V3_6,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &storageMock{
clusterVersion: tt.clusterVersion,
storageVersion: tt.storageVersion,
}
monitor := NewMonitor(zaptest.NewLogger(t), s)
// Run multiple times to ensure that results are stable
for i := 0; i < 3; i++ {
monitor.UpdateStorageVersionIfNeeded()
assert.Equal(t, tt.expectStorageVersion, s.storageVersion)
}
})
}
}
type storageMock struct {
memberVersions map[string]*version.Versions
clusterVersion *semver.Version
storageVersion *semver.Version
downgradeInfo *DowngradeInfo
}
var _ Server = (*storageMock)(nil)
func (s *storageMock) UpdateClusterVersion(version string) {
s.clusterVersion = semver.New(version)
}
func (s *storageMock) LinearizableReadNotify(ctx context.Context) error {
return nil
}
func (s *storageMock) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error {
return nil
}
func (s *storageMock) DowngradeCancel(ctx context.Context) error {
s.downgradeInfo = nil
return nil
}
func (s *storageMock) GetClusterVersion() *semver.Version {
return s.clusterVersion
}
func (s *storageMock) GetDowngradeInfo() *DowngradeInfo {
return s.downgradeInfo
}
func (s *storageMock) GetMembersVersions() map[string]*version.Versions {
return s.memberVersions
}
func (s *storageMock) GetStorageVersion() *semver.Version {
return s.storageVersion
}
func (s *storageMock) UpdateStorageVersion(v semver.Version) error {
s.storageVersion = &v
return nil
}
|
go
|
github
|
https://github.com/etcd-io/etcd
|
server/etcdserver/version/monitor_test.go
|
# -*- Mode: Python; -*-
#
# Copyright 2010 University of Washington
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
import ns.core
import ns.csma
import ns.internet
import ns.network
import ns.tap_bridge
def main(argv):
#
# We are interacting with the outside, real, world. This means we have to
# interact in real-time and therefore we have to use the real-time simulator
# and take the time to calculate checksums.
#
ns.core.GlobalValue.Bind("SimulatorImplementationType", ns.core.StringValue("ns3::RealtimeSimulatorImpl"))
ns.core.GlobalValue.Bind("ChecksumEnabled", ns.core.BooleanValue("true"))
#
# Create two ghost nodes. The first will represent the virtual machine host
# on the left side of the network; and the second will represent the VM on
# the right side.
#
nodes = ns.network.NodeContainer()
nodes.Create (2)
#
# Use a CsmaHelper to get a CSMA channel created, and the needed net
# devices installed on both of the nodes. The data rate and delay for the
# channel can be set through the command-line parser.
#
csma = ns.csma.CsmaHelper()
devices = csma.Install(nodes)
#
# Use the TapBridgeHelper to connect to the pre-configured tap devices for
# the left side. We go with "UseLocal" mode since the wifi devices do not
# support promiscuous mode (because of their natures0. This is a special
# case mode that allows us to extend a linux bridge into ns-3 IFF we will
# only see traffic from one other device on that bridge. That is the case
# for this configuration.
#
tapBridge = ns.tap_bridge.TapBridgeHelper()
tapBridge.SetAttribute ("Mode", ns.core.StringValue ("UseLocal"))
tapBridge.SetAttribute ("DeviceName", ns.core.StringValue ("tap-left"))
tapBridge.Install (nodes.Get (0), devices.Get (0))
#
# Connect the right side tap to the right side wifi device on the right-side
# ghost node.
#
tapBridge.SetAttribute ("DeviceName", ns.core.StringValue ("tap-right"))
tapBridge.Install (nodes.Get (1), devices.Get (1))
#
# Run the simulation for ten minutes to give the user time to play around
#
ns.core.Simulator.Stop (ns.core.Seconds (600))
ns.core.Simulator.Run(signal_check_frequency = -1)
ns.core.Simulator.Destroy()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Module that wraps all OpenSSL hash algorithms */
/*
* Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org)
* Licensed to PSF under a Contributor Agreement.
*
* Derived from a skeleton of shamodule.c containing work performed by:
*
* Andrew Kuchling (amk@amk.ca)
* Greg Stein (gstein@lyra.org)
*
*/
/* Don't warn about deprecated functions, */
#ifndef OPENSSL_API_COMPAT
// 0x10101000L == 1.1.1, 30000 == 3.0.0
#define OPENSSL_API_COMPAT 0x10101000L
#endif
#define OPENSSL_NO_DEPRECATED 1
#ifndef Py_BUILD_CORE_BUILTIN
# define Py_BUILD_CORE_MODULE 1
#endif
#include "Python.h"
#include "pycore_hashtable.h"
#include "pycore_strhex.h" // _Py_strhex()
#include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_LOAD_PTR_RELAXED
#include "hashlib.h"
/* EVP is the preferred interface to hashing in OpenSSL */
#include <openssl/evp.h>
#include <openssl/crypto.h> // FIPS_mode()
/* We use the object interface to discover what hashes OpenSSL supports. */
#include <openssl/objects.h>
#include <openssl/err.h>
#include <stdbool.h>
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
# define Py_HAS_OPENSSL3_SUPPORT
# include <openssl/core_names.h> // OSSL_MAC_PARAM_DIGEST
# include <openssl/params.h> // OSSL_PARAM_*()
#else
# include <openssl/hmac.h> // HMAC()
#endif
#ifndef OPENSSL_THREADS
# error "OPENSSL_THREADS is not defined, Python requires thread-safe OpenSSL"
#endif
#define MUNCH_SIZE INT_MAX
#if defined(NID_sha3_224) && defined(NID_sha3_256) && defined(NID_sha3_384) && defined(NID_sha3_512)
#define PY_OPENSSL_HAS_SHA3 1
#endif
#if defined(NID_shake128) || defined(NID_shake256)
#define PY_OPENSSL_HAS_SHAKE 1
#endif
#if defined(NID_blake2s256) || defined(NID_blake2b512)
#define PY_OPENSSL_HAS_BLAKE2 1
#endif
#ifdef Py_HAS_OPENSSL3_SUPPORT
#define PY_EVP_MD EVP_MD
#define PY_EVP_MD_fetch(algorithm, properties) EVP_MD_fetch(NULL, algorithm, properties)
#define PY_EVP_MD_up_ref(md) EVP_MD_up_ref(md)
#define PY_EVP_MD_free(md) EVP_MD_free(md)
#define PY_EVP_MD_CTX_md(CTX) EVP_MD_CTX_get0_md(CTX)
#define PY_HMAC_CTX_TYPE EVP_MAC_CTX
#define PY_HMAC_CTX_free EVP_MAC_CTX_free
#define PY_HMAC_update EVP_MAC_update
#else
#define PY_EVP_MD const EVP_MD
#define PY_EVP_MD_fetch(algorithm, properties) EVP_get_digestbyname(algorithm)
#define PY_EVP_MD_up_ref(md) do {} while(0)
#define PY_EVP_MD_free(md) do {} while(0)
#define PY_EVP_MD_CTX_md(CTX) EVP_MD_CTX_md(CTX)
#define PY_HMAC_CTX_TYPE HMAC_CTX
#define PY_HMAC_CTX_free HMAC_CTX_free
#define PY_HMAC_update HMAC_Update
#endif
/*
* Return 1 if *md* is an extendable-output Function (XOF) and 0 otherwise.
* SHAKE128 and SHAKE256 are XOF functions but not BLAKE2B algorithms.
*
* This is a backport of the EVP_MD_xof() helper added in OpenSSL 3.4.
*/
static inline int
PY_EVP_MD_xof(PY_EVP_MD *md)
{
return md != NULL && ((EVP_MD_flags(md) & EVP_MD_FLAG_XOF) != 0);
}
/* hash alias map and fast lookup
*
* Map between Python's preferred names and OpenSSL internal names. Maintain
* cache of fetched EVP MD objects. The EVP_get_digestbyname() and
* EVP_MD_fetch() API calls have a performance impact.
*
* The py_hashentry_t items are stored in a _Py_hashtable_t with py_name and
* py_alias as keys.
*/
typedef enum Py_hash_type {
Py_ht_evp, // usedforsecurity=True / default
Py_ht_evp_nosecurity, // usedforsecurity=False
Py_ht_mac, // HMAC
Py_ht_pbkdf2, // PKBDF2
} Py_hash_type;
typedef struct {
const char *py_name;
const char *py_alias;
const char *ossl_name;
int ossl_nid;
int refcnt;
PY_EVP_MD *evp;
PY_EVP_MD *evp_nosecurity;
} py_hashentry_t;
// Fundamental to TLS, assumed always present in any libcrypto:
#define Py_hash_md5 "md5"
#define Py_hash_sha1 "sha1"
#define Py_hash_sha224 "sha224"
#define Py_hash_sha256 "sha256"
#define Py_hash_sha384 "sha384"
#define Py_hash_sha512 "sha512"
// Not all OpenSSL-like libcrypto libraries provide these:
#if defined(NID_sha512_224)
# define Py_hash_sha512_224 "sha512_224"
#endif
#if defined(NID_sha512_256)
# define Py_hash_sha512_256 "sha512_256"
#endif
#if defined(NID_sha3_224)
# define Py_hash_sha3_224 "sha3_224"
#endif
#if defined(NID_sha3_256)
# define Py_hash_sha3_256 "sha3_256"
#endif
#if defined(NID_sha3_384)
# define Py_hash_sha3_384 "sha3_384"
#endif
#if defined(NID_sha3_512)
# define Py_hash_sha3_512 "sha3_512"
#endif
#if defined(NID_shake128)
# define Py_hash_shake_128 "shake_128"
#endif
#if defined(NID_shake256)
# define Py_hash_shake_256 "shake_256"
#endif
#if defined(NID_blake2s256)
# define Py_hash_blake2s "blake2s"
#endif
#if defined(NID_blake2b512)
# define Py_hash_blake2b "blake2b"
#endif
#define PY_HASH_ENTRY(py_name, py_alias, ossl_name, ossl_nid) \
{py_name, py_alias, ossl_name, ossl_nid, 0, NULL, NULL}
static const py_hashentry_t py_hashes[] = {
/* md5 */
PY_HASH_ENTRY(Py_hash_md5, "MD5", SN_md5, NID_md5),
/* sha1 */
PY_HASH_ENTRY(Py_hash_sha1, "SHA1", SN_sha1, NID_sha1),
/* sha2 family */
PY_HASH_ENTRY(Py_hash_sha224, "SHA224", SN_sha224, NID_sha224),
PY_HASH_ENTRY(Py_hash_sha256, "SHA256", SN_sha256, NID_sha256),
PY_HASH_ENTRY(Py_hash_sha384, "SHA384", SN_sha384, NID_sha384),
PY_HASH_ENTRY(Py_hash_sha512, "SHA512", SN_sha512, NID_sha512),
/* truncated sha2 */
#ifdef Py_hash_sha512_224
PY_HASH_ENTRY(Py_hash_sha512_224, "SHA512_224", SN_sha512_224, NID_sha512_224),
#endif
#ifdef Py_hash_sha512_256
PY_HASH_ENTRY(Py_hash_sha512_256, "SHA512_256", SN_sha512_256, NID_sha512_256),
#endif
/* sha3 */
#ifdef Py_hash_sha3_224
PY_HASH_ENTRY(Py_hash_sha3_224, NULL, SN_sha3_224, NID_sha3_224),
#endif
#ifdef Py_hash_sha3_256
PY_HASH_ENTRY(Py_hash_sha3_256, NULL, SN_sha3_256, NID_sha3_256),
#endif
#ifdef Py_hash_sha3_384
PY_HASH_ENTRY(Py_hash_sha3_384, NULL, SN_sha3_384, NID_sha3_384),
#endif
#ifdef Py_hash_sha3_512
PY_HASH_ENTRY(Py_hash_sha3_512, NULL, SN_sha3_512, NID_sha3_512),
#endif
/* sha3 shake */
#ifdef Py_hash_shake_128
PY_HASH_ENTRY(Py_hash_shake_128, NULL, SN_shake128, NID_shake128),
#endif
#ifdef Py_hash_shake_256
PY_HASH_ENTRY(Py_hash_shake_256, NULL, SN_shake256, NID_shake256),
#endif
/* blake2 digest */
#ifdef Py_hash_blake2s
PY_HASH_ENTRY(Py_hash_blake2s, "blake2s256", SN_blake2s256, NID_blake2s256),
#endif
#ifdef Py_hash_blake2b
PY_HASH_ENTRY(Py_hash_blake2b, "blake2b512", SN_blake2b512, NID_blake2b512),
#endif
PY_HASH_ENTRY(NULL, NULL, NULL, 0),
};
static Py_uhash_t
py_hashentry_t_hash_name(const void *key) {
return Py_HashBuffer(key, strlen((const char *)key));
}
static int
py_hashentry_t_compare_name(const void *key1, const void *key2) {
return strcmp((const char *)key1, (const char *)key2) == 0;
}
static void
py_hashentry_t_destroy_value(void *entry) {
py_hashentry_t *h = (py_hashentry_t *)entry;
if (--(h->refcnt) == 0) {
if (h->evp != NULL) {
PY_EVP_MD_free(h->evp);
h->evp = NULL;
}
if (h->evp_nosecurity != NULL) {
PY_EVP_MD_free(h->evp_nosecurity);
h->evp_nosecurity = NULL;
}
PyMem_Free(entry);
}
}
static _Py_hashtable_t *
py_hashentry_table_new(void) {
_Py_hashtable_t *ht = _Py_hashtable_new_full(
py_hashentry_t_hash_name,
py_hashentry_t_compare_name,
NULL,
py_hashentry_t_destroy_value,
NULL
);
if (ht == NULL) {
return NULL;
}
for (const py_hashentry_t *h = py_hashes; h->py_name != NULL; h++) {
py_hashentry_t *entry = (py_hashentry_t *)PyMem_Malloc(sizeof(py_hashentry_t));
if (entry == NULL) {
goto error;
}
memcpy(entry, h, sizeof(py_hashentry_t));
if (_Py_hashtable_set(ht, (const void*)entry->py_name, (void*)entry) < 0) {
PyMem_Free(entry);
goto error;
}
entry->refcnt = 1;
if (h->py_alias != NULL) {
if (_Py_hashtable_set(ht, (const void*)entry->py_alias, (void*)entry) < 0) {
PyMem_Free(entry);
goto error;
}
entry->refcnt++;
}
}
return ht;
error:
_Py_hashtable_destroy(ht);
return NULL;
}
// --- Module state -----------------------------------------------------------
static PyModuleDef _hashlibmodule;
typedef struct {
PyTypeObject *HASH_type; // based on EVP_MD
PyTypeObject *HMAC_type;
#ifdef PY_OPENSSL_HAS_SHAKE
PyTypeObject *HASHXOF_type; // based on EVP_MD
#endif
PyObject *constructs;
PyObject *unsupported_digestmod_error;
_Py_hashtable_t *hashtable;
#ifdef Py_HAS_OPENSSL3_SUPPORT
EVP_MAC *evp_hmac;
#endif
} _hashlibstate;
static inline _hashlibstate*
get_hashlib_state(PyObject *module)
{
void *state = PyModule_GetState(module);
assert(state != NULL);
return (_hashlibstate *)state;
}
// --- Module objects ---------------------------------------------------------
typedef struct {
HASHLIB_OBJECT_HEAD
EVP_MD_CTX *ctx; /* OpenSSL message digest context */
} HASHobject;
#define HASHobject_CAST(op) ((HASHobject *)(op))
typedef struct {
HASHLIB_OBJECT_HEAD
#ifdef Py_HAS_OPENSSL3_SUPPORT
EVP_MAC_CTX *ctx; /* OpenSSL HMAC EVP-based context */
int evp_md_nid; /* needed to find the message digest name */
#else
HMAC_CTX *ctx; /* OpenSSL HMAC plain context */
#endif
} HMACobject;
#define HMACobject_CAST(op) ((HMACobject *)(op))
// --- Module clinic configuration --------------------------------------------
/*[clinic input]
module _hashlib
class _hashlib.HASH "HASHobject *" "&PyType_Type"
class _hashlib.HASHXOF "HASHobject *" "&PyType_Type"
class _hashlib.HMAC "HMACobject *" "&PyType_Type"
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=6b5c9ce5c28bdc58]*/
#include "clinic/_hashopenssl.c.h"
/* LCOV_EXCL_START */
/* Thin wrapper around ERR_reason_error_string() returning non-NULL text. */
static const char *
py_wrapper_ERR_reason_error_string(unsigned long errcode)
{
const char *reason = ERR_reason_error_string(errcode);
return reason ? reason : "no reason";
}
#ifdef Py_HAS_OPENSSL3_SUPPORT
/*
* Set an exception with additional information.
*
* This is only useful in OpenSSL 3.0 and later as the default reason
* usually lacks information and function locations are no longer encoded
* in the error code.
*/
static void
set_exception_with_ssl_errinfo(PyObject *exc_type, PyObject *exc_text,
const char *lib, const char *reason)
{
assert(exc_type != NULL);
assert(exc_text != NULL);
if (lib && reason) {
PyErr_Format(exc_type, "[%s] %U (reason: %s)", lib, exc_text, reason);
}
else if (lib) {
PyErr_Format(exc_type, "[%s] %U", lib, exc_text);
}
else if (reason) {
PyErr_Format(exc_type, "%U (reason: %s)", exc_text, reason);
}
else {
PyErr_SetObject(exc_type, exc_text);
}
}
#endif
/* Set an exception of given type using the given OpenSSL error code. */
static void
set_ssl_exception_from_errcode(PyObject *exc_type, unsigned long errcode)
{
assert(exc_type != NULL);
assert(errcode != 0);
/* ERR_ERROR_STRING(3) ensures that the messages below are ASCII */
const char *lib = ERR_lib_error_string(errcode);
#ifdef Py_HAS_OPENSSL3_SUPPORT
// Since OpenSSL 3.0, ERR_func_error_string() always returns NULL.
const char *func = NULL;
#else
const char *func = ERR_func_error_string(errcode);
#endif
const char *reason = py_wrapper_ERR_reason_error_string(errcode);
if (lib && func) {
PyErr_Format(exc_type, "[%s: %s] %s", lib, func, reason);
}
else if (lib) {
PyErr_Format(exc_type, "[%s] %s", lib, reason);
}
else {
PyErr_SetString(exc_type, reason);
}
}
/*
* Get an appropriate exception type for the given OpenSSL error code.
*
* The exception type depends on the error code reason.
*/
static PyObject *
get_smart_ssl_exception_type(unsigned long errcode, PyObject *default_exc_type)
{
switch (ERR_GET_REASON(errcode)) {
case ERR_R_MALLOC_FAILURE:
return PyExc_MemoryError;
default:
return default_exc_type;
}
}
/*
* Set an exception of given type.
*
* By default, the exception's message is constructed by using the last SSL
* error that occurred. If no error occurred, the 'fallback_message' is used
* to create an exception message.
*/
static void
raise_ssl_error(PyObject *exc_type, const char *fallback_message)
{
assert(fallback_message != NULL);
unsigned long errcode = ERR_peek_last_error();
if (errcode) {
ERR_clear_error();
set_ssl_exception_from_errcode(exc_type, errcode);
}
else {
PyErr_SetString(exc_type, fallback_message);
}
}
/* Same as raise_ssl_error() but with a C-style formatted message. */
static void
raise_ssl_error_f(PyObject *exc_type, const char *fallback_format, ...)
{
assert(fallback_format != NULL);
unsigned long errcode = ERR_peek_last_error();
if (errcode) {
ERR_clear_error();
set_ssl_exception_from_errcode(exc_type, errcode);
}
else {
va_list vargs;
va_start(vargs, fallback_format);
PyErr_FormatV(exc_type, fallback_format, vargs);
va_end(vargs);
}
}
/* Same as raise_ssl_error_f() with smart exception types. */
static void
raise_smart_ssl_error_f(PyObject *exc_type, const char *fallback_format, ...)
{
unsigned long errcode = ERR_peek_last_error();
if (errcode) {
ERR_clear_error();
exc_type = get_smart_ssl_exception_type(errcode, exc_type);
set_ssl_exception_from_errcode(exc_type, errcode);
}
else {
va_list vargs;
va_start(vargs, fallback_format);
PyErr_FormatV(exc_type, fallback_format, vargs);
va_end(vargs);
}
}
/*
* Raise a ValueError with a default message after an error occurred.
* It can also be used without previous calls to SSL built-in functions.
*/
static inline void
notify_ssl_error_occurred(const char *message)
{
raise_ssl_error(PyExc_ValueError, message);
}
/* Same as notify_ssl_error_occurred() for failed OpenSSL functions. */
static inline void
notify_ssl_error_occurred_in(const char *funcname)
{
raise_ssl_error_f(PyExc_ValueError,
"error in OpenSSL function %s()", funcname);
}
/* Same as notify_ssl_error_occurred_in() with smart exception types. */
static inline void
notify_smart_ssl_error_occurred_in(const char *funcname)
{
raise_smart_ssl_error_f(PyExc_ValueError,
"error in OpenSSL function %s()", funcname);
}
#ifdef Py_HAS_OPENSSL3_SUPPORT
static void
raise_unsupported_algorithm_impl(PyObject *exc_type,
const char *fallback_format,
const void *format_arg)
{
// Since OpenSSL 3.0, if the algorithm is not supported or fetching fails,
// the reason lacks the algorithm name.
int errcode = ERR_peek_last_error();
switch (ERR_GET_REASON(errcode)) {
case ERR_R_UNSUPPORTED: {
PyObject *text = PyUnicode_FromFormat(fallback_format, format_arg);
if (text != NULL) {
const char *lib = ERR_lib_error_string(errcode);
set_exception_with_ssl_errinfo(exc_type, text, lib, NULL);
Py_DECREF(text);
}
break;
}
case ERR_R_FETCH_FAILED: {
PyObject *text = PyUnicode_FromFormat(fallback_format, format_arg);
if (text != NULL) {
const char *lib = ERR_lib_error_string(errcode);
const char *reason = ERR_reason_error_string(errcode);
set_exception_with_ssl_errinfo(exc_type, text, lib, reason);
Py_DECREF(text);
}
break;
}
default:
raise_ssl_error_f(exc_type, fallback_format, format_arg);
break;
}
assert(PyErr_Occurred());
}
#else
/* Before OpenSSL 3.0, error messages included enough information. */
#define raise_unsupported_algorithm_impl raise_ssl_error_f
#endif
static inline void
raise_unsupported_algorithm_error(_hashlibstate *state, PyObject *digestmod)
{
raise_unsupported_algorithm_impl(
state->unsupported_digestmod_error,
HASHLIB_UNSUPPORTED_ALGORITHM,
digestmod
);
}
static inline void
raise_unsupported_str_algorithm_error(_hashlibstate *state, const char *name)
{
raise_unsupported_algorithm_impl(
state->unsupported_digestmod_error,
HASHLIB_UNSUPPORTED_STR_ALGORITHM,
name
);
}
#undef raise_unsupported_algorithm_impl
/* LCOV_EXCL_STOP */
/*
* OpenSSL provides a way to go from NIDs to digest names for hash functions
* but lacks this granularity for MAC objects where it is not possible to get
* the underlying digest name (only the block size and digest size are allowed
* to be recovered).
*
* In addition, OpenSSL aliases pollute the list of known digest names
* as OpenSSL appears to have its own definition of alias. In particular,
* the resulting list still contains duplicate and alternate names for several
* algorithms.
*
* Therefore, digest names, whether they are used by hash functions or HMAC,
* are handled through EVP_MD objects or directly by using some NID.
*/
/* Get a cached entry by OpenSSL NID. */
static const py_hashentry_t *
get_hashentry_by_nid(int nid)
{
for (const py_hashentry_t *h = py_hashes; h->py_name != NULL; h++) {
if (h->ossl_nid == nid) {
return h;
}
}
return NULL;
}
/*
* Convert the NID to a string via OBJ_nid2*() functions.
*
* If 'nid' cannot be resolved, set an exception and return NULL.
*/
static const char *
get_asn1_utf8name_by_nid(int nid)
{
const char *name = OBJ_nid2ln(nid);
if (name == NULL) {
/* In OpenSSL 3.0 and later, OBJ_nid*() are thread-safe and may raise.
* However, not all versions of OpenSSL set a last error, so we simply
* ignore the last error if none exists.
*
* See https://github.com/python/cpython/issues/142451.
*/
unsigned long errcode = ERR_peek_last_error();
if (errcode && ERR_GET_REASON(errcode) != OBJ_R_UNKNOWN_NID) {
goto error;
}
// fallback to short name and unconditionally propagate errors
name = OBJ_nid2sn(nid);
if (name == NULL) {
goto error;
}
}
return name;
error:
raise_ssl_error_f(PyExc_ValueError, "cannot resolve NID %d", nid);
return NULL;
}
/*
* Convert the NID to an OpenSSL digest name.
*
* On error, set an exception and return NULL.
*/
static const char *
get_hashlib_utf8name_by_nid(int nid)
{
const py_hashentry_t *e = get_hashentry_by_nid(nid);
return e ? e->py_name : get_asn1_utf8name_by_nid(nid);
}
#ifdef Py_HAS_OPENSSL3_SUPPORT
/*
* Convert the NID to an OpenSSL "canonical" cached, SN_* or LN_* digest name.
*
* On error, set an exception and return NULL.
*/
static const char *
get_openssl_utf8name_by_nid(int nid)
{
const py_hashentry_t *e = get_hashentry_by_nid(nid);
return e ? e->ossl_name : get_asn1_utf8name_by_nid(nid);
}
#endif
/* Same as get_hashlib_utf8name_by_nid() but using an EVP_MD object. */
static const char *
get_hashlib_utf8name_by_evp_md(const EVP_MD *md)
{
assert(md != NULL);
return get_hashlib_utf8name_by_nid(EVP_MD_nid(md));
}
/*
* Return 1 if the property query clause [1] must be "-fips" and 0 otherwise.
*
* [1] https://docs.openssl.org/master/man7/property
*/
static inline int
disable_fips_property(Py_hash_type py_ht)
{
switch (py_ht) {
case Py_ht_evp:
case Py_ht_mac:
case Py_ht_pbkdf2:
return 0;
case Py_ht_evp_nosecurity:
return 1;
default:
Py_FatalError("unsupported hash type");
}
}
/*
* Get a new reference to an EVP_MD object described by name and purpose.
*
* If 'name' is an OpenSSL indexed name, the return value is cached.
*/
static PY_EVP_MD *
get_openssl_evp_md_by_utf8name(_hashlibstate *state, const char *name,
Py_hash_type py_ht)
{
PY_EVP_MD *digest = NULL, *other_digest = NULL;
py_hashentry_t *entry = _Py_hashtable_get(state->hashtable, name);
if (entry != NULL) {
if (!disable_fips_property(py_ht)) {
digest = FT_ATOMIC_LOAD_PTR_RELAXED(entry->evp);
if (digest == NULL) {
digest = PY_EVP_MD_fetch(entry->ossl_name, NULL);
#ifdef Py_GIL_DISABLED
// exchange just in case another thread did same thing at same time
other_digest = _Py_atomic_exchange_ptr(&entry->evp, (void *)digest);
#else
entry->evp = digest;
#endif
}
}
else {
digest = FT_ATOMIC_LOAD_PTR_RELAXED(entry->evp_nosecurity);
if (digest == NULL) {
digest = PY_EVP_MD_fetch(entry->ossl_name, "-fips");
#ifdef Py_GIL_DISABLED
// exchange just in case another thread did same thing at same time
other_digest = _Py_atomic_exchange_ptr(&entry->evp_nosecurity, (void *)digest);
#else
entry->evp_nosecurity = digest;
#endif
}
}
// if another thread same thing at same time make sure we got same ptr
assert(other_digest == NULL || other_digest == digest);
if (digest != NULL && other_digest == NULL) {
PY_EVP_MD_up_ref(digest);
}
}
else {
// Fall back for looking up an unindexed OpenSSL specific name.
const char *props = disable_fips_property(py_ht) ? "-fips" : NULL;
(void)props; // will only be used in OpenSSL 3.0 and later
digest = PY_EVP_MD_fetch(name, props);
}
if (digest == NULL) {
raise_unsupported_str_algorithm_error(state, name);
return NULL;
}
return digest;
}
/*
* Get a new reference to an EVP_MD described by 'digestmod' and purpose.
*
* On error, set an exception and return NULL.
*
* Parameters
*
* digestmod A digest name or a _hashopenssl builtin function
* py_ht The message digest purpose.
*/
static PY_EVP_MD *
get_openssl_evp_md(_hashlibstate *state, PyObject *digestmod, Py_hash_type py_ht)
{
const char *name;
if (PyUnicode_Check(digestmod)) {
name = PyUnicode_AsUTF8(digestmod);
}
else {
PyObject *dict = state->constructs;
assert(dict != NULL);
PyObject *borrowed_ref = PyDict_GetItemWithError(dict, digestmod);
name = borrowed_ref == NULL ? NULL : PyUnicode_AsUTF8(borrowed_ref);
}
if (name == NULL) {
if (!PyErr_Occurred()) {
raise_unsupported_algorithm_error(state, digestmod);
}
return NULL;
}
return get_openssl_evp_md_by_utf8name(state, name, py_ht);
}
#ifdef Py_HAS_OPENSSL3_SUPPORT
/*
* Get the "canonical" name of an EVP_MD described by 'digestmod' and purpose.
*
* On error, set an exception and return NULL.
*
* This function should not be used to construct the exposed Python name,
* but rather to invoke OpenSSL EVP_* functions.
*/
static const char *
get_openssl_digest_name(_hashlibstate *state,
PyObject *digestmod, Py_hash_type py_ht,
EVP_MD **evp_md)
{
PY_EVP_MD *md = get_openssl_evp_md(state, digestmod, py_ht);
if (md == NULL) {
if (evp_md != NULL) {
*evp_md = NULL;
}
return NULL;
}
int nid = EVP_MD_nid(md);
const char *name = get_openssl_utf8name_by_nid(nid);
if (name == NULL) {
if (evp_md != NULL) {
*evp_md = NULL;
}
PY_EVP_MD_free(md);
raise_unsupported_algorithm_error(state, digestmod);
return NULL;
}
if (evp_md != NULL) {
*evp_md = md;
}
else {
PY_EVP_MD_free(md);
}
return name;
}
#endif
// --- OpenSSL HASH wrappers --------------------------------------------------
/* Thin wrapper around EVP_MD_CTX_new() which sets an exception on failure. */
static EVP_MD_CTX *
py_wrapper_EVP_MD_CTX_new(void)
{
EVP_MD_CTX *ctx = EVP_MD_CTX_new();
if (ctx == NULL) {
PyErr_NoMemory();
return NULL;
}
return ctx;
}
// --- HASH interface ---------------------------------------------------------
static HASHobject *
new_hash_object(PyTypeObject *type)
{
HASHobject *retval = PyObject_New(HASHobject, type);
if (retval == NULL) {
return NULL;
}
HASHLIB_INIT_MUTEX(retval);
retval->ctx = py_wrapper_EVP_MD_CTX_new();
if (retval->ctx == NULL) {
Py_DECREF(retval);
return NULL;
}
return retval;
}
static int
_hashlib_HASH_hash(HASHobject *self, const void *vp, Py_ssize_t len)
{
unsigned int process;
const unsigned char *cp = (const unsigned char *)vp;
while (0 < len) {
if (len > (Py_ssize_t)MUNCH_SIZE)
process = MUNCH_SIZE;
else
process = Py_SAFE_DOWNCAST(len, Py_ssize_t, unsigned int);
if (!EVP_DigestUpdate(self->ctx, (const void*)cp, process)) {
notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_DigestUpdate));
return -1;
}
len -= process;
cp += process;
}
return 0;
}
/* Internal methods for a hash object */
static void
_hashlib_HASH_dealloc(PyObject *op)
{
HASHobject *self = HASHobject_CAST(op);
PyTypeObject *tp = Py_TYPE(self);
EVP_MD_CTX_free(self->ctx);
PyObject_Free(self);
Py_DECREF(tp);
}
static int
_hashlib_HASH_copy_locked(HASHobject *self, EVP_MD_CTX *new_ctx_p)
{
int result;
HASHLIB_ACQUIRE_LOCK(self);
result = EVP_MD_CTX_copy(new_ctx_p, self->ctx);
HASHLIB_RELEASE_LOCK(self);
if (result == 0) {
notify_smart_ssl_error_occurred_in(Py_STRINGIFY(EVP_MD_CTX_copy));
return -1;
}
return 0;
}
/* External methods for a hash object */
/*[clinic input]
_hashlib.HASH.copy
Return a copy of the hash object.
[clinic start generated code]*/
static PyObject *
_hashlib_HASH_copy_impl(HASHobject *self)
/*[clinic end generated code: output=2545541af18d53d7 input=814b19202cd08a26]*/
{
HASHobject *newobj;
if ((newobj = new_hash_object(Py_TYPE(self))) == NULL)
return NULL;
if (_hashlib_HASH_copy_locked(self, newobj->ctx) < 0) {
Py_DECREF(newobj);
return NULL;
}
return (PyObject *)newobj;
}
static Py_ssize_t
_hashlib_HASH_digest_compute(HASHobject *self, unsigned char *digest)
{
EVP_MD_CTX *ctx = py_wrapper_EVP_MD_CTX_new();
if (ctx == NULL) {
return -1;
}
if (_hashlib_HASH_copy_locked(self, ctx) < 0) {
goto error;
}
Py_ssize_t digest_size = EVP_MD_CTX_size(ctx);
if (!EVP_DigestFinal(ctx, digest, NULL)) {
notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_DigestFinal));
goto error;
}
EVP_MD_CTX_free(ctx);
return digest_size;
error:
EVP_MD_CTX_free(ctx);
return -1;
}
/*[clinic input]
_hashlib.HASH.digest
Return the digest value as a bytes object.
[clinic start generated code]*/
static PyObject *
_hashlib_HASH_digest_impl(HASHobject *self)
/*[clinic end generated code: output=3fc6f9671d712850 input=d8d528d6e50af0de]*/
{
unsigned char digest[EVP_MAX_MD_SIZE];
Py_ssize_t n = _hashlib_HASH_digest_compute(self, digest);
return n < 0 ? NULL : PyBytes_FromStringAndSize((const char *)digest, n);
}
/*[clinic input]
_hashlib.HASH.hexdigest
Return the digest value as a string of hexadecimal digits.
[clinic start generated code]*/
static PyObject *
_hashlib_HASH_hexdigest_impl(HASHobject *self)
/*[clinic end generated code: output=1b8e60d9711e7f4d input=ae7553f78f8372d8]*/
{
unsigned char digest[EVP_MAX_MD_SIZE];
Py_ssize_t n = _hashlib_HASH_digest_compute(self, digest);
return n < 0 ? NULL : _Py_strhex((const char *)digest, n);
}
/*[clinic input]
_hashlib.HASH.update
obj: object
/
Update this hash object's state with the provided string.
[clinic start generated code]*/
static PyObject *
_hashlib_HASH_update_impl(HASHobject *self, PyObject *obj)
/*[clinic end generated code: output=62ad989754946b86 input=aa1ce20e3f92ceb6]*/
{
int result;
Py_buffer view;
GET_BUFFER_VIEW_OR_ERROUT(obj, &view);
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, view.len,
result = _hashlib_HASH_hash(self, view.buf, view.len)
);
PyBuffer_Release(&view);
return result < 0 ? NULL : Py_None;
}
static PyMethodDef HASH_methods[] = {
_HASHLIB_HASH_COPY_METHODDEF
_HASHLIB_HASH_DIGEST_METHODDEF
_HASHLIB_HASH_HEXDIGEST_METHODDEF
_HASHLIB_HASH_UPDATE_METHODDEF
{NULL, NULL} /* sentinel */
};
static PyObject *
_hashlib_HASH_get_blocksize(PyObject *op, void *Py_UNUSED(closure))
{
HASHobject *self = HASHobject_CAST(op);
long block_size = EVP_MD_CTX_block_size(self->ctx);
return PyLong_FromLong(block_size);
}
static PyObject *
_hashlib_HASH_get_digestsize(PyObject *op, void *Py_UNUSED(closure))
{
HASHobject *self = HASHobject_CAST(op);
long size = EVP_MD_CTX_size(self->ctx);
return PyLong_FromLong(size);
}
static PyObject *
_hashlib_HASH_get_name(PyObject *op, void *Py_UNUSED(closure))
{
HASHobject *self = HASHobject_CAST(op);
const EVP_MD *md = PY_EVP_MD_CTX_md(self->ctx);
if (md == NULL) {
notify_ssl_error_occurred("missing EVP_MD for HASH context");
return NULL;
}
const char *name = get_hashlib_utf8name_by_evp_md(md);
assert(name != NULL || PyErr_Occurred());
return name == NULL ? NULL : PyUnicode_FromString(name);
}
static PyGetSetDef HASH_getsets[] = {
{"digest_size", _hashlib_HASH_get_digestsize, NULL, NULL, NULL},
{"block_size", _hashlib_HASH_get_blocksize, NULL, NULL, NULL},
{"name", _hashlib_HASH_get_name, NULL, NULL, PyDoc_STR("algorithm name.")},
{NULL} /* Sentinel */
};
static PyObject *
_hashlib_HASH_repr(PyObject *self)
{
PyObject *name = _hashlib_HASH_get_name(self, NULL);
if (name == NULL) {
return NULL;
}
PyObject *repr = PyUnicode_FromFormat("<%U %T object @ %p>",
name, self, self);
Py_DECREF(name);
return repr;
}
PyDoc_STRVAR(HASHobject_type_doc,
"HASH(name, string=b\'\')\n"
"--\n"
"\n"
"A hash is an object used to calculate a checksum of a string of information.\n"
"\n"
"Methods:\n"
"\n"
"update() -- updates the current digest with an additional string\n"
"digest() -- return the current digest value\n"
"hexdigest() -- return the current digest as a string of hexadecimal digits\n"
"copy() -- return a copy of the current hash object\n"
"\n"
"Attributes:\n"
"\n"
"name -- the hash algorithm being used by this object\n"
"digest_size -- number of bytes in this hashes output");
static PyType_Slot HASHobject_type_slots[] = {
{Py_tp_dealloc, _hashlib_HASH_dealloc},
{Py_tp_repr, _hashlib_HASH_repr},
{Py_tp_doc, (char *)HASHobject_type_doc},
{Py_tp_methods, HASH_methods},
{Py_tp_getset, HASH_getsets},
{0, 0},
};
static PyType_Spec HASHobject_type_spec = {
.name = "_hashlib.HASH",
.basicsize = sizeof(HASHobject),
.flags = (
Py_TPFLAGS_DEFAULT
| Py_TPFLAGS_BASETYPE
| Py_TPFLAGS_DISALLOW_INSTANTIATION
| Py_TPFLAGS_IMMUTABLETYPE
),
.slots = HASHobject_type_slots
};
#ifdef PY_OPENSSL_HAS_SHAKE
/*[clinic input]
_hashlib.HASHXOF.digest
length: Py_ssize_t(allow_negative=False)
Return the digest value as a bytes object.
[clinic start generated code]*/
static PyObject *
_hashlib_HASHXOF_digest_impl(HASHobject *self, Py_ssize_t length)
/*[clinic end generated code: output=dcb09335dd2fe908 input=224d047da2c12a42]*/
{
EVP_MD_CTX *temp_ctx;
if (length == 0) {
return Py_GetConstant(Py_CONSTANT_EMPTY_BYTES);
}
PyBytesWriter *writer = PyBytesWriter_Create(length);
if (writer == NULL) {
return NULL;
}
temp_ctx = py_wrapper_EVP_MD_CTX_new();
if (temp_ctx == NULL) {
PyBytesWriter_Discard(writer);
return NULL;
}
if (_hashlib_HASH_copy_locked(self, temp_ctx) < 0) {
goto error;
}
if (!EVP_DigestFinalXOF(temp_ctx,
(unsigned char*)PyBytesWriter_GetData(writer),
length))
{
notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_DigestFinalXOF));
goto error;
}
EVP_MD_CTX_free(temp_ctx);
return PyBytesWriter_Finish(writer);
error:
PyBytesWriter_Discard(writer);
EVP_MD_CTX_free(temp_ctx);
return NULL;
}
/*[clinic input]
_hashlib.HASHXOF.hexdigest
length: Py_ssize_t(allow_negative=False)
Return the digest value as a string of hexadecimal digits.
[clinic start generated code]*/
static PyObject *
_hashlib_HASHXOF_hexdigest_impl(HASHobject *self, Py_ssize_t length)
/*[clinic end generated code: output=519431cafa014f39 input=4a41b8ab5d3bfee2]*/
{
unsigned char *digest;
EVP_MD_CTX *temp_ctx;
PyObject *retval;
if (length == 0) {
return Py_GetConstant(Py_CONSTANT_EMPTY_STR);
}
digest = (unsigned char*)PyMem_Malloc(length);
if (digest == NULL) {
(void)PyErr_NoMemory();
return NULL;
}
temp_ctx = py_wrapper_EVP_MD_CTX_new();
if (temp_ctx == NULL) {
PyMem_Free(digest);
return NULL;
}
/* Get the raw (binary) digest value */
if (_hashlib_HASH_copy_locked(self, temp_ctx) < 0) {
goto error;
}
if (!EVP_DigestFinalXOF(temp_ctx, digest, length)) {
notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_DigestFinalXOF));
goto error;
}
EVP_MD_CTX_free(temp_ctx);
retval = _Py_strhex((const char *)digest, length);
PyMem_Free(digest);
return retval;
error:
PyMem_Free(digest);
EVP_MD_CTX_free(temp_ctx);
return NULL;
}
static PyMethodDef HASHXOFobject_methods[] = {
_HASHLIB_HASHXOF_DIGEST_METHODDEF
_HASHLIB_HASHXOF_HEXDIGEST_METHODDEF
{NULL, NULL} /* sentinel */
};
static PyObject *
_hashlib_HASHXOF_digest_size(PyObject *Py_UNUSED(self),
void *Py_UNUSED(closure))
{
return PyLong_FromLong(0);
}
static PyGetSetDef HASHXOFobject_getsets[] = {
{"digest_size", _hashlib_HASHXOF_digest_size, NULL, NULL, NULL},
{NULL} /* Sentinel */
};
PyDoc_STRVAR(HASHXOFobject_type_doc,
"HASHXOF(name, string=b\'\')\n"
"--\n"
"\n"
"A hash is an object used to calculate a checksum of a string of information.\n"
"\n"
"Methods:\n"
"\n"
"update() -- updates the current digest with an additional string\n"
"digest(length) -- return the current digest value\n"
"hexdigest(length) -- return the current digest as a string of hexadecimal digits\n"
"copy() -- return a copy of the current hash object\n"
"\n"
"Attributes:\n"
"\n"
"name -- the hash algorithm being used by this object\n"
"digest_size -- number of bytes in this hashes output");
static PyType_Slot HASHXOFobject_type_slots[] = {
{Py_tp_doc, (char *)HASHXOFobject_type_doc},
{Py_tp_methods, HASHXOFobject_methods},
{Py_tp_getset, HASHXOFobject_getsets},
{0, 0},
};
static PyType_Spec HASHXOFobject_type_spec = {
.name = "_hashlib.HASHXOF",
.basicsize = sizeof(HASHobject),
.flags = (
Py_TPFLAGS_DEFAULT
| Py_TPFLAGS_BASETYPE
| Py_TPFLAGS_DISALLOW_INSTANTIATION
| Py_TPFLAGS_IMMUTABLETYPE
),
.slots = HASHXOFobject_type_slots
};
#endif
static PyObject *
_hashlib_HASH(_hashlibstate *state, const char *digestname, PyObject *data_obj,
int usedforsecurity)
{
Py_buffer view = { 0 };
PY_EVP_MD *digest = NULL;
PyTypeObject *type;
HASHobject *self = NULL;
if (data_obj != NULL) {
GET_BUFFER_VIEW_OR_ERROUT(data_obj, &view);
}
Py_hash_type purpose = usedforsecurity ? Py_ht_evp : Py_ht_evp_nosecurity;
digest = get_openssl_evp_md_by_utf8name(state, digestname, purpose);
if (digest == NULL) {
goto exit;
}
type = PY_EVP_MD_xof(digest) ? state->HASHXOF_type : state->HASH_type;
self = new_hash_object(type);
if (self == NULL) {
goto exit;
}
#if defined(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW) && OPENSSL_VERSION_NUMBER < 0x30000000L
// In OpenSSL 1.1.1 the non FIPS allowed flag is context specific while
// in 3.0.0 it is a different EVP_MD provider.
if (!usedforsecurity) {
EVP_MD_CTX_set_flags(self->ctx, EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
}
#endif
int result = EVP_DigestInit_ex(self->ctx, digest, NULL);
if (!result) {
notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_DigestInit_ex));
Py_CLEAR(self);
goto exit;
}
if (view.buf && view.len) {
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
view.len,
result = _hashlib_HASH_hash(self, view.buf, view.len)
);
if (result == -1) {
assert(PyErr_Occurred());
Py_CLEAR(self);
goto exit;
}
}
exit:
if (data_obj != NULL) {
PyBuffer_Release(&view);
}
if (digest != NULL) {
PY_EVP_MD_free(digest);
}
return (PyObject *)self;
}
// In Python 3.19, we can remove the "STRING" argument and would also be able
// to remove the macro (or keep it as an alias for better naming) since calls
// to _hashlib_HASH_new_impl() would fit on 80 characters.
#define CALL_HASHLIB_NEW(MODULE, NAME, DATA, STRING, USEDFORSECURITY) \
return _hashlib_HASH_new_impl(MODULE, NAME, DATA, USEDFORSECURITY, STRING)
/* The module-level function: new() */
/*[clinic input]
_hashlib.new as _hashlib_HASH_new
name: str
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Return a new hash object using the named algorithm.
An optional string argument may be provided and will be
automatically hashed.
The MD5 and SHA1 algorithms are always supported.
[clinic start generated code]*/
static PyObject *
_hashlib_HASH_new_impl(PyObject *module, const char *name, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=b905aaf9840c1bbd input=c34af6c6e696d44e]*/
{
PyObject *data_obj;
if (_Py_hashlib_data_argument(&data_obj, data, string) < 0) {
return NULL;
}
_hashlibstate *state = get_hashlib_state(module);
return _hashlib_HASH(state, name, data_obj, usedforsecurity);
}
/*[clinic input]
_hashlib.openssl_md5
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a md5 hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_md5_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=ca8cf184d90f7432 input=e7c0adbd6a867db1]*/
{
CALL_HASHLIB_NEW(module, Py_hash_md5, data, string, usedforsecurity);
}
/*[clinic input]
_hashlib.openssl_sha1
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a sha1 hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_sha1_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=1736fb7b310d64be input=f7e5bb1711e952d8]*/
{
CALL_HASHLIB_NEW(module, Py_hash_sha1, data, string, usedforsecurity);
}
/*[clinic input]
_hashlib.openssl_sha224
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a sha224 hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_sha224_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=0d6ff57be5e5c140 input=3820fff7ed3a53b8]*/
{
CALL_HASHLIB_NEW(module, Py_hash_sha224, data, string, usedforsecurity);
}
/*[clinic input]
_hashlib.openssl_sha256
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a sha256 hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_sha256_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=412ea7111555b6e7 input=9a2f115cf1f7e0eb]*/
{
CALL_HASHLIB_NEW(module, Py_hash_sha256, data, string, usedforsecurity);
}
/*[clinic input]
_hashlib.openssl_sha384
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a sha384 hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_sha384_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=2e0dc395b59ed726 input=1ea48f6f01e77cfb]*/
{
CALL_HASHLIB_NEW(module, Py_hash_sha384, data, string, usedforsecurity);
}
/*[clinic input]
_hashlib.openssl_sha512
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a sha512 hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_sha512_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=4bdd760388dbfc0f input=3cf56903e07d1f5c]*/
{
CALL_HASHLIB_NEW(module, Py_hash_sha512, data, string, usedforsecurity);
}
#ifdef PY_OPENSSL_HAS_SHA3
/*[clinic input]
_hashlib.openssl_sha3_224
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a sha3-224 hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_sha3_224_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=6d8dc2a924f3ba35 input=7f14f16a9f6a3158]*/
{
CALL_HASHLIB_NEW(module, Py_hash_sha3_224, data, string, usedforsecurity);
}
/*[clinic input]
_hashlib.openssl_sha3_256
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a sha3-256 hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_sha3_256_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=9e520f537b3a4622 input=7987150939d5e352]*/
{
CALL_HASHLIB_NEW(module, Py_hash_sha3_256, data, string, usedforsecurity);
}
/*[clinic input]
_hashlib.openssl_sha3_384
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a sha3-384 hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_sha3_384_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=d239ba0463fd6138 input=fc943401f67e3b81]*/
{
CALL_HASHLIB_NEW(module, Py_hash_sha3_384, data, string, usedforsecurity);
}
/*[clinic input]
_hashlib.openssl_sha3_512
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a sha3-512 hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_sha3_512_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=17662f21038c2278 input=6601ddd2c6c1516d]*/
{
CALL_HASHLIB_NEW(module, Py_hash_sha3_512, data, string, usedforsecurity);
}
#endif /* PY_OPENSSL_HAS_SHA3 */
#ifdef PY_OPENSSL_HAS_SHAKE
/*[clinic input]
@permit_long_summary
_hashlib.openssl_shake_128
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a shake-128 variable hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_shake_128_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=4e6afed8d18980ad input=0d2803af1158b23c]*/
{
CALL_HASHLIB_NEW(module, Py_hash_shake_128, data, string, usedforsecurity);
}
/*[clinic input]
@permit_long_summary
_hashlib.openssl_shake_256
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string: object(c_default="NULL") = None
Returns a shake-256 variable hash object; optionally initialized with a string
[clinic start generated code]*/
static PyObject *
_hashlib_openssl_shake_256_impl(PyObject *module, PyObject *data,
int usedforsecurity, PyObject *string)
/*[clinic end generated code: output=62481bce4a77d16c input=f27b98d9c749f55d]*/
{
CALL_HASHLIB_NEW(module, Py_hash_shake_256, data, string, usedforsecurity);
}
#endif /* PY_OPENSSL_HAS_SHAKE */
#undef CALL_HASHLIB_NEW
/*[clinic input]
@permit_long_summary
_hashlib.pbkdf2_hmac as pbkdf2_hmac
hash_name: str
password: Py_buffer
salt: Py_buffer
iterations: long
dklen as dklen_obj: object = None
Password based key derivation function 2 (PKCS #5 v2.0) with HMAC as pseudorandom function.
[clinic start generated code]*/
static PyObject *
pbkdf2_hmac_impl(PyObject *module, const char *hash_name,
Py_buffer *password, Py_buffer *salt, long iterations,
PyObject *dklen_obj)
/*[clinic end generated code: output=144b76005416599b input=83417fbd9ec2b8a3]*/
{
_hashlibstate *state = get_hashlib_state(module);
PyObject *key_obj = NULL;
long dklen;
int retval;
PY_EVP_MD *digest = get_openssl_evp_md_by_utf8name(state, hash_name,
Py_ht_pbkdf2);
if (digest == NULL) {
goto end;
}
if (password->len > INT_MAX) {
PyErr_SetString(PyExc_OverflowError,
"password is too long.");
goto end;
}
if (salt->len > INT_MAX) {
PyErr_SetString(PyExc_OverflowError,
"salt is too long.");
goto end;
}
if (iterations < 1) {
PyErr_SetString(PyExc_ValueError,
"iteration value must be greater than 0.");
goto end;
}
if (iterations > INT_MAX) {
PyErr_SetString(PyExc_OverflowError,
"iteration value is too great.");
goto end;
}
if (dklen_obj == Py_None) {
dklen = EVP_MD_size(digest);
} else {
dklen = PyLong_AsLong(dklen_obj);
if ((dklen == -1) && PyErr_Occurred()) {
goto end;
}
}
if (dklen < 1) {
PyErr_SetString(PyExc_ValueError,
"key length must be greater than 0.");
goto end;
}
if (dklen > INT_MAX) {
/* INT_MAX is always smaller than dkLen max (2^32 - 1) * hLen */
PyErr_SetString(PyExc_OverflowError,
"key length is too great.");
goto end;
}
PyBytesWriter *writer = PyBytesWriter_Create(dklen);
if (writer == NULL) {
goto end;
}
Py_BEGIN_ALLOW_THREADS
retval = PKCS5_PBKDF2_HMAC((const char *)password->buf, (int)password->len,
(const unsigned char *)salt->buf, (int)salt->len,
iterations, digest, dklen,
(unsigned char *)PyBytesWriter_GetData(writer));
Py_END_ALLOW_THREADS
if (!retval) {
PyBytesWriter_Discard(writer);
notify_ssl_error_occurred_in(Py_STRINGIFY(PKCS5_PBKDF2_HMAC));
goto end;
}
key_obj = PyBytesWriter_Finish(writer);
end:
if (digest != NULL) {
PY_EVP_MD_free(digest);
}
return key_obj;
}
// --- PBKDF: scrypt (RFC 7914) -----------------------------------------------
/*
* By default, OpenSSL 1.1.0 restricts 'maxmem' in EVP_PBE_scrypt()
* to 32 MiB (1024 * 1024 * 32) but only if 'maxmem = 0' and allows
* for an arbitrary large limit fitting on an uint64_t otherwise.
*
* For legacy reasons, we limited 'maxmem' to be at most INTMAX,
* but if users need a more relaxed value, we will revisit this
* limit in the future.
*/
#define HASHLIB_SCRYPT_MAX_MAXMEM INT_MAX
/*
* Limit 'dklen' to INT_MAX even if it can be at most (32 * UINT32_MAX).
*
* See https://datatracker.ietf.org/doc/html/rfc7914.html for details.
*/
#define HASHLIB_SCRYPT_MAX_DKLEN INT_MAX
/*[clinic input]
_hashlib.scrypt
password: Py_buffer
*
salt: Py_buffer
n: unsigned_long
r: unsigned_long
p: unsigned_long
maxmem: long = 0
dklen: long = 64
scrypt password-based key derivation function.
[clinic start generated code]*/
static PyObject *
_hashlib_scrypt_impl(PyObject *module, Py_buffer *password, Py_buffer *salt,
unsigned long n, unsigned long r, unsigned long p,
long maxmem, long dklen)
/*[clinic end generated code: output=d424bc3e8c6b9654 input=bdeac9628d07f7a1]*/
{
int retval;
if (password->len > INT_MAX) {
PyErr_SetString(PyExc_OverflowError, "password is too long");
return NULL;
}
if (salt->len > INT_MAX) {
PyErr_SetString(PyExc_OverflowError, "salt is too long");
return NULL;
}
if (n < 2 || n & (n - 1)) {
PyErr_SetString(PyExc_ValueError, "n must be a power of 2");
return NULL;
}
if (maxmem < 0 || maxmem > HASHLIB_SCRYPT_MAX_MAXMEM) {
PyErr_Format(PyExc_ValueError,
"maxmem must be positive and at most %d",
HASHLIB_SCRYPT_MAX_MAXMEM);
return NULL;
}
if (dklen < 1 || dklen > HASHLIB_SCRYPT_MAX_DKLEN) {
PyErr_Format(PyExc_ValueError,
"dklen must be at least 1 and at most %d",
HASHLIB_SCRYPT_MAX_DKLEN);
return NULL;
}
/* let OpenSSL validate the rest */
retval = EVP_PBE_scrypt(NULL, 0, NULL, 0, n, r, p,
(uint64_t)maxmem, NULL, 0);
if (!retval) {
notify_ssl_error_occurred("invalid parameter combination for "
"n, r, p, and maxmem");
return NULL;
}
PyBytesWriter *writer = PyBytesWriter_Create(dklen);
if (writer == NULL) {
return NULL;
}
Py_BEGIN_ALLOW_THREADS
retval = EVP_PBE_scrypt(
(const char *)password->buf, (size_t)password->len,
(const unsigned char *)salt->buf, (size_t)salt->len,
(uint64_t)n, (uint64_t)r, (uint64_t)p, (uint64_t)maxmem,
(unsigned char *)PyBytesWriter_GetData(writer), (size_t)dklen
);
Py_END_ALLOW_THREADS
if (!retval) {
PyBytesWriter_Discard(writer);
notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_PBE_scrypt));
return NULL;
}
return PyBytesWriter_Finish(writer);
}
#undef HASHLIB_SCRYPT_MAX_DKLEN
#undef HASHLIB_SCRYPT_MAX_MAXMEM
// --- OpenSSL HMAC interface -------------------------------------------------
/*
* Functions prefixed by hashlib_openssl_HMAC_* are wrappers around OpenSSL
* and implement "atomic" operations (e.g., "free"). These functions are used
* by those prefixed by _hashlib_HMAC_* that are methods for HMAC objects, or
* other (local) helper functions prefixed by hashlib_HMAC_*.
*/
#ifdef Py_HAS_OPENSSL3_SUPPORT
/* EVP_MAC_CTX array of parameters specifying the "digest" */
#define HASHLIB_HMAC_OSSL_PARAMS(DIGEST) \
(const OSSL_PARAM []) { \
OSSL_PARAM_utf8_string(OSSL_MAC_PARAM_DIGEST, \
(char *)DIGEST, strlen(DIGEST)), \
OSSL_PARAM_END \
}
#endif
// --- One-shot HMAC interface ------------------------------------------------
/*[clinic input]
_hashlib.hmac_digest as _hashlib_hmac_singleshot
key: Py_buffer
msg: Py_buffer
digest: object
Single-shot HMAC.
[clinic start generated code]*/
static PyObject *
_hashlib_hmac_singleshot_impl(PyObject *module, Py_buffer *key,
Py_buffer *msg, PyObject *digest)
/*[clinic end generated code: output=82f19965d12706ac input=0a0790cc3db45c2e]*/
{
_hashlibstate *state = get_hashlib_state(module);
unsigned char md[EVP_MAX_MD_SIZE] = {0};
#ifdef Py_HAS_OPENSSL3_SUPPORT
size_t md_len = 0;
const char *digest_name = NULL;
#else
unsigned int md_len = 0;
#endif
unsigned char *result = NULL;
PY_EVP_MD *evp = NULL;
int is_xof;
if (key->len > INT_MAX) {
PyErr_SetString(PyExc_OverflowError,
"key is too long.");
return NULL;
}
if (msg->len > INT_MAX) {
PyErr_SetString(PyExc_OverflowError,
"msg is too long.");
return NULL;
}
#ifdef Py_HAS_OPENSSL3_SUPPORT
digest_name = get_openssl_digest_name(state, digest, Py_ht_mac, &evp);
if (digest_name == NULL) {
assert(evp == NULL);
return NULL;
}
assert(evp != NULL);
is_xof = PY_EVP_MD_xof(evp);
Py_BEGIN_ALLOW_THREADS
result = EVP_Q_mac(
NULL, OSSL_MAC_NAME_HMAC, NULL, NULL,
HASHLIB_HMAC_OSSL_PARAMS(digest_name),
(const void *)key->buf, (size_t)key->len,
(const unsigned char *)msg->buf, (size_t)msg->len,
md, sizeof(md), &md_len
);
Py_END_ALLOW_THREADS
PY_EVP_MD_free(evp);
assert(md_len < (size_t)PY_SSIZE_T_MAX);
#else
evp = get_openssl_evp_md(state, digest, Py_ht_mac);
if (evp == NULL) {
return NULL;
}
is_xof = PY_EVP_MD_xof(evp);
Py_BEGIN_ALLOW_THREADS
result = HMAC(
evp,
(const void *)key->buf, (int)key->len,
(const unsigned char *)msg->buf, (size_t)msg->len,
md, &md_len
);
Py_END_ALLOW_THREADS
PY_EVP_MD_free(evp);
#endif
if (result == NULL) {
if (is_xof) {
/* use a better default error message if an XOF is used */
raise_unsupported_algorithm_error(state, digest);
}
else {
#ifdef Py_HAS_OPENSSL3_SUPPORT
notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_Q_mac));
#else
notify_ssl_error_occurred_in(Py_STRINGIFY(HMAC));
#endif
}
return NULL;
}
return PyBytes_FromStringAndSize((const char*)md, md_len);
}
// --- HMAC Object ------------------------------------------------------------
#ifndef Py_HAS_OPENSSL3_SUPPORT
/* Thin wrapper around HMAC_CTX_new() which sets an exception on failure. */
static HMAC_CTX *
py_openssl_wrapper_HMAC_CTX_new(void)
{
HMAC_CTX *ctx = HMAC_CTX_new();
if (ctx == NULL) {
PyErr_NoMemory();
return NULL;
}
return ctx;
}
#endif
static int _hmac_update(HMACobject*, PyObject*);
#ifndef Py_HAS_OPENSSL3_SUPPORT
static const EVP_MD *
_hashlib_hmac_get_md(HMACobject *self)
{
assert(self->ctx != NULL);
const EVP_MD *md = HMAC_CTX_get_md(self->ctx);
if (md == NULL) {
notify_ssl_error_occurred("missing EVP_MD for HMAC context");
}
return md;
}
#endif
static const char *
hashlib_HMAC_get_hashlib_digest_name(HMACobject *self)
{
#ifdef Py_HAS_OPENSSL3_SUPPORT
return get_hashlib_utf8name_by_nid(self->evp_md_nid);
#else
const EVP_MD *md = _hashlib_hmac_get_md(self);
return md == NULL ? NULL : get_hashlib_utf8name_by_evp_md(md);
#endif
}
static int
hashlib_openssl_HMAC_update_once(PY_HMAC_CTX_TYPE *ctx, const Py_buffer *v)
{
if (!PY_HMAC_update(ctx, (const unsigned char *)v->buf, (size_t)v->len)) {
notify_smart_ssl_error_occurred_in(Py_STRINGIFY(PY_HMAC_update));
return -1;
}
return 0;
}
/* Thin wrapper around PY_HMAC_CTX_free that allows to pass a NULL 'ctx'. */
static inline void
hashlib_openssl_HMAC_CTX_free(PY_HMAC_CTX_TYPE *ctx)
{
/* The NULL check was not present in every OpenSSL versions. */
if (ctx) {
PY_HMAC_CTX_free(ctx);
}
}
static PY_HMAC_CTX_TYPE *
hashlib_openssl_HMAC_ctx_copy_with_lock(HMACobject *self)
{
PY_HMAC_CTX_TYPE *ctx = NULL;
#ifdef Py_HAS_OPENSSL3_SUPPORT
HASHLIB_ACQUIRE_LOCK(self);
ctx = EVP_MAC_CTX_dup(self->ctx);
HASHLIB_RELEASE_LOCK(self);
if (ctx == NULL) {
notify_smart_ssl_error_occurred_in(Py_STRINGIFY(EVP_MAC_CTX_dup));
goto error;
}
#else
int r;
ctx = py_openssl_wrapper_HMAC_CTX_new();
if (ctx == NULL) {
return NULL;
}
HASHLIB_ACQUIRE_LOCK(self);
r = HMAC_CTX_copy(ctx, self->ctx);
HASHLIB_RELEASE_LOCK(self);
if (r == 0) {
notify_smart_ssl_error_occurred_in(Py_STRINGIFY(HMAC_CTX_copy));
goto error;
}
#endif
return ctx;
error:
hashlib_openssl_HMAC_CTX_free(ctx);
return NULL;
}
static PY_HMAC_CTX_TYPE *
hashlib_HMAC_CTX_new_from_digestmod(_hashlibstate *state,
Py_buffer *key, PyObject *digestmod,
int *nid)
{
PY_HMAC_CTX_TYPE *ctx = NULL;
PY_EVP_MD *md = NULL;
int is_xof, r;
#ifdef Py_HAS_OPENSSL3_SUPPORT
const char *digest = NULL;
#endif
#ifdef Py_HAS_OPENSSL3_SUPPORT
/*
* OpenSSL 3.0 does not provide a way to extract the NID from an EVP_MAC
* object and does not expose the underlying digest name. The reason is
* that OpenSSL 3.0 treats HMAC objects as being the "same", differing
* only by their *context* parameters. While it is *required* to set
* the digest name when constructing EVP_MAC_CTX objects, that name
* is unfortunately not recoverable through EVP_MAC_CTX_get_params().
*
* On the other hand, the (deprecated) interface based on HMAC_CTX is
* based on EVP_MD, which allows to treat HMAC objects as if they were
* hash functions when querying the digest name.
*
* Since HMAC objects are constructed from DIGESTMOD values and since
* we have a way to map DIGESTMOD to EVP_MD objects, and then to NIDs,
* HMAC objects based on EVP_MAC will store the NID of the EVP_MD we
* used to deduce the digest name to pass to EVP_MAC_CTX_set_params().
*/
assert(nid != NULL);
digest = get_openssl_digest_name(state, digestmod, Py_ht_mac, &md);
assert((digest == NULL && md == NULL) || (digest != NULL && md != NULL));
if (digest == NULL) {
*nid = NID_undef;
return NULL;
}
*nid = EVP_MD_nid(md);
is_xof = PY_EVP_MD_xof(md);
PY_EVP_MD_free(md);
/*
* OpenSSL is responsible for managing the EVP_MAC object's ref. count
* by calling EVP_MAC_up_ref() and EVP_MAC_free() in EVP_MAC_CTX_new()
* and EVP_MAC_CTX_free() respectively.
*/
ctx = EVP_MAC_CTX_new(state->evp_hmac);
if (ctx == NULL) {
/* EVP_MAC_CTX_new() may also set an ERR_R_EVP_LIB error */
notify_smart_ssl_error_occurred_in(Py_STRINGIFY(EVP_MAC_CTX_new));
return NULL;
}
r = EVP_MAC_init(
ctx,
(const unsigned char *)key->buf,
(size_t)key->len,
HASHLIB_HMAC_OSSL_PARAMS(digest)
);
#else
assert(nid == NULL);
md = get_openssl_evp_md(state, digestmod, Py_ht_mac);
if (md == NULL) {
return NULL;
}
is_xof = PY_EVP_MD_xof(md);
ctx = py_openssl_wrapper_HMAC_CTX_new();
if (ctx == NULL) {
PY_EVP_MD_free(md);
return NULL;
}
r = HMAC_Init_ex(ctx, key->buf, (int)key->len, md, NULL /* impl */);
PY_EVP_MD_free(md);
#endif
if (r == 0) {
if (is_xof) {
/* use a better default error message if an XOF is used */
raise_unsupported_algorithm_error(state, digestmod);
}
else {
#ifdef Py_HAS_OPENSSL3_SUPPORT
notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_MAC_init));
#else
notify_ssl_error_occurred_in(Py_STRINGIFY(HMAC_Init_ex));
#endif
}
return NULL;
}
return ctx;
}
/*[clinic input]
_hashlib.hmac_new
key: Py_buffer
msg as msg_obj: object(c_default="NULL") = b''
digestmod: object(c_default="NULL") = None
Return a new hmac object.
[clinic start generated code]*/
static PyObject *
_hashlib_hmac_new_impl(PyObject *module, Py_buffer *key, PyObject *msg_obj,
PyObject *digestmod)
/*[clinic end generated code: output=c20d9e4d9ed6d219 input=5f4071dcc7f34362]*/
{
_hashlibstate *state = get_hashlib_state(module);
PY_HMAC_CTX_TYPE *ctx = NULL;
HMACobject *self = NULL;
#ifdef Py_HAS_OPENSSL3_SUPPORT
int nid;
#endif
if (key->len > INT_MAX) {
PyErr_SetString(PyExc_OverflowError,
"key is too long.");
return NULL;
}
if (digestmod == NULL) {
PyErr_SetString(PyExc_TypeError,
"Missing required parameter 'digestmod'.");
return NULL;
}
#ifdef Py_HAS_OPENSSL3_SUPPORT
ctx = hashlib_HMAC_CTX_new_from_digestmod(state, key, digestmod, &nid);
#else
ctx = hashlib_HMAC_CTX_new_from_digestmod(state, key, digestmod, NULL);
#endif
if (ctx == NULL) {
assert(PyErr_Occurred());
return NULL;
}
self = PyObject_New(HMACobject, state->HMAC_type);
if (self == NULL) {
goto error;
}
self->ctx = ctx;
ctx = NULL; // 'ctx' is now owned by 'self'
#ifdef Py_HAS_OPENSSL3_SUPPORT
assert(nid != NID_undef);
self->evp_md_nid = nid;
#endif
HASHLIB_INIT_MUTEX(self);
/* feed initial data */
if ((msg_obj != NULL) && (msg_obj != Py_None)) {
if (_hmac_update(self, msg_obj) < 0) {
goto error;
}
}
return (PyObject *)self;
error:
hashlib_openssl_HMAC_CTX_free(ctx);
Py_XDECREF(self);
return NULL;
}
/* helper functions */
#define BAD_DIGEST_SIZE 0
/*
* Return the digest size in bytes.
*
* On error, set an exception and return BAD_DIGEST_SIZE.
*/
static unsigned int
_hashlib_hmac_digest_size(HMACobject *self)
{
assert(EVP_MAX_MD_SIZE < INT_MAX);
#ifdef Py_HAS_OPENSSL3_SUPPORT
assert(self->ctx != NULL);
size_t digest_size = EVP_MAC_CTX_get_mac_size(self->ctx);
assert(digest_size <= (size_t)EVP_MAX_MD_SIZE);
#else
const EVP_MD *md = _hashlib_hmac_get_md(self);
if (md == NULL) {
return BAD_DIGEST_SIZE;
}
int digest_size = EVP_MD_size(md);
/* digest_size < 0 iff EVP_MD context is NULL (which is impossible here) */
assert(digest_size >= 0);
assert(digest_size <= (int)EVP_MAX_MD_SIZE);
#endif
/* digest_size == 0 means that the context is not entirely initialized */
if (digest_size == 0) {
raise_ssl_error(PyExc_ValueError, "missing digest size");
return BAD_DIGEST_SIZE;
}
return (unsigned int)digest_size;
}
static int
_hmac_update(HMACobject *self, PyObject *obj)
{
int r;
Py_buffer view = {0};
GET_BUFFER_VIEW_OR_ERROR(obj, &view, return -1);
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, view.len,
r = hashlib_openssl_HMAC_update_once(self->ctx, &view)
);
PyBuffer_Release(&view);
return r;
}
/*[clinic input]
_hashlib.HMAC.copy
Return a copy ("clone") of the HMAC object.
[clinic start generated code]*/
static PyObject *
_hashlib_HMAC_copy_impl(HMACobject *self)
/*[clinic end generated code: output=29aa28b452833127 input=e2fa6a05db61a4d6]*/
{
HMACobject *retval;
PY_HMAC_CTX_TYPE *ctx = hashlib_openssl_HMAC_ctx_copy_with_lock(self);
if (ctx == NULL) {
return NULL;
}
retval = PyObject_New(HMACobject, Py_TYPE(self));
if (retval == NULL) {
PY_HMAC_CTX_free(ctx);
return NULL;
}
retval->ctx = ctx;
#ifdef Py_HAS_OPENSSL3_SUPPORT
retval->evp_md_nid = self->evp_md_nid;
#endif
HASHLIB_INIT_MUTEX(retval);
return (PyObject *)retval;
}
static void
_hmac_dealloc(PyObject *op)
{
HMACobject *self = HMACobject_CAST(op);
PyTypeObject *tp = Py_TYPE(self);
if (self->ctx != NULL) {
PY_HMAC_CTX_free(self->ctx);
self->ctx = NULL;
}
PyObject_Free(self);
Py_DECREF(tp);
}
static PyObject *
_hmac_repr(PyObject *op)
{
HMACobject *self = HMACobject_CAST(op);
const char *digest_name = hashlib_HMAC_get_hashlib_digest_name(self);
if (digest_name == NULL) {
assert(PyErr_Occurred());
return NULL;
}
return PyUnicode_FromFormat("<%s HMAC object @ %p>", digest_name, self);
}
/*[clinic input]
_hashlib.HMAC.update
msg: object
Update the HMAC object with msg.
[clinic start generated code]*/
static PyObject *
_hashlib_HMAC_update_impl(HMACobject *self, PyObject *msg)
/*[clinic end generated code: output=f31f0ace8c625b00 input=1829173bb3cfd4e6]*/
{
if (_hmac_update(self, msg) < 0) {
return NULL;
}
Py_RETURN_NONE;
}
/*
* Extract the MAC value to 'buf' and return the digest size.
*
* The buffer 'buf' must have at least _hashlib_hmac_digest_size(self)
* bytes. Smaller buffers lead to undefined behaviors.
*
* On error, set an exception and return -1.
*/
static Py_ssize_t
_hmac_digest(HMACobject *self, unsigned char *buf)
{
unsigned int digest_size = _hashlib_hmac_digest_size(self);
assert(digest_size <= EVP_MAX_MD_SIZE);
if (digest_size == BAD_DIGEST_SIZE) {
assert(PyErr_Occurred());
return -1;
}
PY_HMAC_CTX_TYPE *ctx = hashlib_openssl_HMAC_ctx_copy_with_lock(self);
if (ctx == NULL) {
return -1;
}
#ifdef Py_HAS_OPENSSL3_SUPPORT
int r = EVP_MAC_final(ctx, buf, NULL, digest_size);
#else
int r = HMAC_Final(ctx, buf, NULL);
#endif
PY_HMAC_CTX_free(ctx);
if (r == 0) {
#ifdef Py_HAS_OPENSSL3_SUPPORT
notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_MAC_final));
#else
notify_ssl_error_occurred_in(Py_STRINGIFY(HMAC_Final));
#endif
return -1;
}
return digest_size;
}
/*[clinic input]
_hashlib.HMAC.digest
Return the digest of the bytes passed to the update() method so far.
[clinic start generated code]*/
static PyObject *
_hashlib_HMAC_digest_impl(HMACobject *self)
/*[clinic end generated code: output=1b1424355af7a41e input=bff07f74da318fb4]*/
{
unsigned char buf[EVP_MAX_MD_SIZE];
Py_ssize_t n = _hmac_digest(self, buf);
return n < 0 ? NULL : PyBytes_FromStringAndSize((const char *)buf, n);
}
/*[clinic input]
@permit_long_summary
@permit_long_docstring_body
_hashlib.HMAC.hexdigest
Return hexadecimal digest of the bytes passed to the update() method so far.
This may be used to exchange the value safely in email or other non-binary
environments.
[clinic start generated code]*/
static PyObject *
_hashlib_HMAC_hexdigest_impl(HMACobject *self)
/*[clinic end generated code: output=80d825be1eaae6a7 input=5e48db83ab1a4d19]*/
{
unsigned char buf[EVP_MAX_MD_SIZE];
Py_ssize_t n = _hmac_digest(self, buf);
return n < 0 ? NULL : _Py_strhex((const char *)buf, n);
}
static PyObject *
_hashlib_hmac_get_digest_size(PyObject *op, void *Py_UNUSED(closure))
{
HMACobject *self = HMACobject_CAST(op);
unsigned int size = _hashlib_hmac_digest_size(self);
return size == BAD_DIGEST_SIZE ? NULL : PyLong_FromLong(size);
}
static PyObject *
_hashlib_hmac_get_block_size(PyObject *op, void *Py_UNUSED(closure))
{
HMACobject *self = HMACobject_CAST(op);
#ifdef Py_HAS_OPENSSL3_SUPPORT
assert(self->ctx != NULL);
return PyLong_FromSize_t(EVP_MAC_CTX_get_block_size(self->ctx));
#else
const EVP_MD *md = _hashlib_hmac_get_md(self);
return md == NULL ? NULL : PyLong_FromLong(EVP_MD_block_size(md));
#endif
}
static PyObject *
_hashlib_hmac_get_name(PyObject *op, void *Py_UNUSED(closure))
{
HMACobject *self = HMACobject_CAST(op);
const char *digest_name = hashlib_HMAC_get_hashlib_digest_name(self);
if (digest_name == NULL) {
assert(PyErr_Occurred());
return NULL;
}
return PyUnicode_FromFormat("hmac-%s", digest_name);
}
static PyMethodDef HMAC_methods[] = {
_HASHLIB_HMAC_UPDATE_METHODDEF
_HASHLIB_HMAC_DIGEST_METHODDEF
_HASHLIB_HMAC_HEXDIGEST_METHODDEF
_HASHLIB_HMAC_COPY_METHODDEF
{NULL, NULL} /* sentinel */
};
static PyGetSetDef HMAC_getset[] = {
{"digest_size", _hashlib_hmac_get_digest_size, NULL, NULL, NULL},
{"block_size", _hashlib_hmac_get_block_size, NULL, NULL, NULL},
{"name", _hashlib_hmac_get_name, NULL, NULL, NULL},
{NULL} /* Sentinel */
};
PyDoc_STRVAR(hmactype_doc,
"The object used to calculate HMAC of a message.\n\
\n\
Methods:\n\
\n\
update() -- updates the current digest with an additional string\n\
digest() -- return the current digest value\n\
hexdigest() -- return the current digest as a string of hexadecimal digits\n\
copy() -- return a copy of the current hash object\n\
\n\
Attributes:\n\
\n\
name -- the name, including the hash algorithm used by this object\n\
digest_size -- number of bytes in digest() output\n");
static PyType_Slot HMACtype_slots[] = {
{Py_tp_doc, (char *)hmactype_doc},
{Py_tp_repr, _hmac_repr},
{Py_tp_dealloc, _hmac_dealloc},
{Py_tp_methods, HMAC_methods},
{Py_tp_getset, HMAC_getset},
{0, NULL}
};
PyType_Spec HMACtype_spec = {
"_hashlib.HMAC", /* name */
sizeof(HMACobject), /* basicsize */
.flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE,
.slots = HMACtype_slots,
};
/* State for our callback function so that it can accumulate a result. */
typedef struct _internal_name_mapper_state {
PyObject *set;
int error;
} _InternalNameMapperState;
/* A callback function to pass to OpenSSL's OBJ_NAME_do_all(...) */
static void
#ifdef Py_HAS_OPENSSL3_SUPPORT
_openssl_hash_name_mapper(EVP_MD *md, void *arg)
#else
_openssl_hash_name_mapper(const EVP_MD *md, const char *from,
const char *to, void *arg)
#endif
{
_InternalNameMapperState *state = (_InternalNameMapperState *)arg;
PyObject *py_name;
assert(state != NULL);
// ignore all undefined providers
if ((md == NULL) || (EVP_MD_nid(md) == NID_undef)) {
return;
}
const char *name = get_hashlib_utf8name_by_evp_md(md);
assert(name != NULL || PyErr_Occurred());
py_name = name == NULL ? NULL : PyUnicode_FromString(name);
if (py_name == NULL) {
state->error = 1;
} else {
if (PySet_Add(state->set, py_name) != 0) {
state->error = 1;
}
Py_DECREF(py_name);
}
}
/* Ask OpenSSL for a list of supported ciphers, filling in a Python set. */
static int
hashlib_md_meth_names(PyObject *module)
{
_InternalNameMapperState state = {
.set = PyFrozenSet_New(NULL),
.error = 0
};
if (state.set == NULL) {
return -1;
}
#ifdef Py_HAS_OPENSSL3_SUPPORT
// get algorithms from all activated providers in default context
EVP_MD_do_all_provided(NULL, &_openssl_hash_name_mapper, &state);
#else
EVP_MD_do_all(&_openssl_hash_name_mapper, &state);
#endif
if (state.error) {
Py_DECREF(state.set);
return -1;
}
return PyModule_Add(module, "openssl_md_meth_names", state.set);
}
/*[clinic input]
_hashlib.get_fips_mode -> int
Determine the OpenSSL FIPS mode of operation.
For OpenSSL 3.0.0 and newer it returns the state of the default provider
in the default OSSL context. It's not quite the same as FIPS_mode() but good
enough for unittests.
Effectively any non-zero return value indicates FIPS mode;
values other than 1 may have additional significance.
[clinic start generated code]*/
static int
_hashlib_get_fips_mode_impl(PyObject *module)
/*[clinic end generated code: output=87eece1bab4d3fa9 input=2db61538c41c6fef]*/
{
#ifdef Py_HAS_OPENSSL3_SUPPORT
return EVP_default_properties_is_fips_enabled(NULL);
#else
ERR_clear_error();
int result = FIPS_mode();
if (result == 0 && ERR_peek_last_error()) {
// "If the library was built without support of the FIPS Object Module,
// then the function will return 0 with an error code of
// CRYPTO_R_FIPS_MODE_NOT_SUPPORTED (0x0f06d065)."
// But 0 is also a valid result value.
notify_ssl_error_occurred_in(Py_STRINGIFY(FIPS_mode));
return -1;
}
return result;
#endif
}
static int
_tscmp(const unsigned char *a, const unsigned char *b,
Py_ssize_t len_a, Py_ssize_t len_b)
{
/* loop count depends on length of b. Might leak very little timing
* information if sizes are different.
*/
Py_ssize_t length = len_b;
const void *left = a;
const void *right = b;
int result = 0;
if (len_a != length) {
left = b;
result = 1;
}
result |= CRYPTO_memcmp(left, right, length);
return (result == 0);
}
/* NOTE: Keep in sync with _operator.c implementation. */
/*[clinic input]
_hashlib.compare_digest
a: object
b: object
/
Return 'a == b'.
This function uses an approach designed to prevent
timing analysis, making it appropriate for cryptography.
a and b must both be of the same type: either str (ASCII only),
or any bytes-like object.
Note: If a and b are of different lengths, or if an error occurs,
a timing attack could theoretically reveal information about the
types and lengths of a and b--but not their values.
[clinic start generated code]*/
static PyObject *
_hashlib_compare_digest_impl(PyObject *module, PyObject *a, PyObject *b)
/*[clinic end generated code: output=6f1c13927480aed9 input=9c40c6e566ca12f5]*/
{
int rc;
/* ASCII unicode string */
if(PyUnicode_Check(a) && PyUnicode_Check(b)) {
if (!PyUnicode_IS_ASCII(a) || !PyUnicode_IS_ASCII(b)) {
PyErr_SetString(PyExc_TypeError,
"comparing strings with non-ASCII characters is "
"not supported");
return NULL;
}
rc = _tscmp(PyUnicode_DATA(a),
PyUnicode_DATA(b),
PyUnicode_GET_LENGTH(a),
PyUnicode_GET_LENGTH(b));
}
/* fallback to buffer interface for bytes, bytearray and other */
else {
Py_buffer view_a;
Py_buffer view_b;
if (PyObject_CheckBuffer(a) == 0 && PyObject_CheckBuffer(b) == 0) {
PyErr_Format(PyExc_TypeError,
"unsupported operand types(s) or combination of types: "
"'%.100s' and '%.100s'",
Py_TYPE(a)->tp_name, Py_TYPE(b)->tp_name);
return NULL;
}
if (PyObject_GetBuffer(a, &view_a, PyBUF_SIMPLE) == -1) {
return NULL;
}
if (view_a.ndim > 1) {
PyErr_SetString(PyExc_BufferError,
"Buffer must be single dimension");
PyBuffer_Release(&view_a);
return NULL;
}
if (PyObject_GetBuffer(b, &view_b, PyBUF_SIMPLE) == -1) {
PyBuffer_Release(&view_a);
return NULL;
}
if (view_b.ndim > 1) {
PyErr_SetString(PyExc_BufferError,
"Buffer must be single dimension");
PyBuffer_Release(&view_a);
PyBuffer_Release(&view_b);
return NULL;
}
rc = _tscmp((const unsigned char*)view_a.buf,
(const unsigned char*)view_b.buf,
view_a.len,
view_b.len);
PyBuffer_Release(&view_a);
PyBuffer_Release(&view_b);
}
return PyBool_FromLong(rc);
}
/* List of functions exported by this module */
static struct PyMethodDef EVP_functions[] = {
_HASHLIB_HASH_NEW_METHODDEF
PBKDF2_HMAC_METHODDEF
_HASHLIB_SCRYPT_METHODDEF
_HASHLIB_GET_FIPS_MODE_METHODDEF
_HASHLIB_COMPARE_DIGEST_METHODDEF
_HASHLIB_HMAC_SINGLESHOT_METHODDEF
_HASHLIB_HMAC_NEW_METHODDEF
_HASHLIB_OPENSSL_MD5_METHODDEF
_HASHLIB_OPENSSL_SHA1_METHODDEF
_HASHLIB_OPENSSL_SHA224_METHODDEF
_HASHLIB_OPENSSL_SHA256_METHODDEF
_HASHLIB_OPENSSL_SHA384_METHODDEF
_HASHLIB_OPENSSL_SHA512_METHODDEF
_HASHLIB_OPENSSL_SHA3_224_METHODDEF
_HASHLIB_OPENSSL_SHA3_256_METHODDEF
_HASHLIB_OPENSSL_SHA3_384_METHODDEF
_HASHLIB_OPENSSL_SHA3_512_METHODDEF
_HASHLIB_OPENSSL_SHAKE_128_METHODDEF
_HASHLIB_OPENSSL_SHAKE_256_METHODDEF
{NULL, NULL} /* Sentinel */
};
/* Initialize this module. */
static int
hashlib_traverse(PyObject *m, visitproc visit, void *arg)
{
_hashlibstate *state = get_hashlib_state(m);
Py_VISIT(state->HASH_type);
Py_VISIT(state->HMAC_type);
#ifdef PY_OPENSSL_HAS_SHAKE
Py_VISIT(state->HASHXOF_type);
#endif
Py_VISIT(state->constructs);
Py_VISIT(state->unsupported_digestmod_error);
return 0;
}
static int
hashlib_clear(PyObject *m)
{
_hashlibstate *state = get_hashlib_state(m);
Py_CLEAR(state->HASH_type);
Py_CLEAR(state->HMAC_type);
#ifdef PY_OPENSSL_HAS_SHAKE
Py_CLEAR(state->HASHXOF_type);
#endif
Py_CLEAR(state->constructs);
Py_CLEAR(state->unsupported_digestmod_error);
if (state->hashtable != NULL) {
_Py_hashtable_destroy(state->hashtable);
state->hashtable = NULL;
}
#ifdef Py_HAS_OPENSSL3_SUPPORT
if (state->evp_hmac != NULL) {
EVP_MAC_free(state->evp_hmac);
state->evp_hmac = NULL;
}
#endif
return 0;
}
static void
hashlib_free(void *m)
{
(void)hashlib_clear((PyObject *)m);
}
/* Py_mod_exec functions */
static int
hashlib_init_hashtable(PyObject *module)
{
_hashlibstate *state = get_hashlib_state(module);
state->hashtable = py_hashentry_table_new();
if (state->hashtable == NULL) {
PyErr_NoMemory();
return -1;
}
return 0;
}
static int
hashlib_init_HASH_type(PyObject *module)
{
_hashlibstate *state = get_hashlib_state(module);
state->HASH_type = (PyTypeObject *)PyType_FromSpec(&HASHobject_type_spec);
if (state->HASH_type == NULL) {
return -1;
}
if (PyModule_AddType(module, state->HASH_type) < 0) {
return -1;
}
return 0;
}
static int
hashlib_init_HASHXOF_type(PyObject *module)
{
#ifdef PY_OPENSSL_HAS_SHAKE
_hashlibstate *state = get_hashlib_state(module);
if (state->HASH_type == NULL) {
return -1;
}
state->HASHXOF_type = (PyTypeObject *)PyType_FromSpecWithBases(
&HASHXOFobject_type_spec, (PyObject *)state->HASH_type
);
if (state->HASHXOF_type == NULL) {
return -1;
}
if (PyModule_AddType(module, state->HASHXOF_type) < 0) {
return -1;
}
#endif
return 0;
}
static int
hashlib_init_hmactype(PyObject *module)
{
_hashlibstate *state = get_hashlib_state(module);
state->HMAC_type = (PyTypeObject *)PyType_FromSpec(&HMACtype_spec);
if (state->HMAC_type == NULL) {
return -1;
}
if (PyModule_AddType(module, state->HMAC_type) < 0) {
return -1;
}
#ifdef Py_HAS_OPENSSL3_SUPPORT
state->evp_hmac = EVP_MAC_fetch(NULL, "HMAC", NULL);
if (state->evp_hmac == NULL) {
ERR_clear_error();
PyErr_SetString(PyExc_ImportError, "cannot initialize EVP_MAC HMAC");
return -1;
}
#endif
return 0;
}
static int
hashlib_init_constructors(PyObject *module)
{
/* Create dict from builtin openssl_hash functions to name
* {_hashlib.openssl_sha256: "sha256", ...}
*/
PyModuleDef *mdef;
PyMethodDef *fdef;
PyObject *func, *name_obj;
_hashlibstate *state = get_hashlib_state(module);
mdef = PyModule_GetDef(module);
if (mdef == NULL) {
return -1;
}
state->constructs = PyDict_New();
if (state->constructs == NULL) {
return -1;
}
for (fdef = mdef->m_methods; fdef->ml_name != NULL; fdef++) {
if (strncmp(fdef->ml_name, "openssl_", 8)) {
continue;
}
name_obj = PyUnicode_FromString(fdef->ml_name + 8);
if (name_obj == NULL) {
return -1;
}
func = PyObject_GetAttrString(module, fdef->ml_name);
if (func == NULL) {
Py_DECREF(name_obj);
return -1;
}
int rc = PyDict_SetItem(state->constructs, func, name_obj);
Py_DECREF(func);
Py_DECREF(name_obj);
if (rc < 0) {
return -1;
}
}
return PyModule_Add(module, "_constructors",
PyDictProxy_New(state->constructs));
}
static int
hashlib_exception(PyObject *module)
{
_hashlibstate *state = get_hashlib_state(module);
state->unsupported_digestmod_error = PyErr_NewException(
"_hashlib.UnsupportedDigestmodError", PyExc_ValueError, NULL);
if (state->unsupported_digestmod_error == NULL) {
return -1;
}
if (PyModule_AddObjectRef(module, "UnsupportedDigestmodError",
state->unsupported_digestmod_error) < 0) {
return -1;
}
return 0;
}
static int
hashlib_constants(PyObject *module)
{
if (PyModule_AddIntConstant(module, "_GIL_MINSIZE",
HASHLIB_GIL_MINSIZE) < 0)
{
return -1;
}
return 0;
}
static PyModuleDef_Slot hashlib_slots[] = {
{Py_mod_exec, hashlib_init_hashtable},
{Py_mod_exec, hashlib_init_HASH_type},
{Py_mod_exec, hashlib_init_HASHXOF_type},
{Py_mod_exec, hashlib_init_hmactype},
{Py_mod_exec, hashlib_md_meth_names},
{Py_mod_exec, hashlib_init_constructors},
{Py_mod_exec, hashlib_exception},
{Py_mod_exec, hashlib_constants},
{Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED},
{Py_mod_gil, Py_MOD_GIL_NOT_USED},
{0, NULL}
};
static struct PyModuleDef _hashlibmodule = {
PyModuleDef_HEAD_INIT,
.m_name = "_hashlib",
.m_doc = "OpenSSL interface for hashlib module",
.m_size = sizeof(_hashlibstate),
.m_methods = EVP_functions,
.m_slots = hashlib_slots,
.m_traverse = hashlib_traverse,
.m_clear = hashlib_clear,
.m_free = hashlib_free
};
PyMODINIT_FUNC
PyInit__hashlib(void)
{
return PyModuleDef_Init(&_hashlibmodule);
}
|
c
|
github
|
https://github.com/python/cpython
|
Modules/_hashopenssl.c
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the IRC window.
"""
from __future__ import unicode_literals
try:
str = unicode
except NameError:
pass
import re
import logging
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QByteArray, QTimer
from PyQt5.QtWidgets import QWidget, QToolButton, QLabel, QTabWidget
from PyQt5.QtNetwork import QTcpSocket, QAbstractSocket
try:
from PyQt5.QtNetwork import QSslSocket, QSslConfiguration
from E5Network.E5SslErrorHandler import E5SslErrorHandler
SSL_AVAILABLE = True
except ImportError:
SSL_AVAILABLE = False
from E5Gui import E5MessageBox
from .Ui_IrcWidget import Ui_IrcWidget
import Preferences
import UI.PixmapCache
from Globals import isMacPlatform
from UI.Info import Version, Copyright
class IrcWidget(QWidget, Ui_IrcWidget):
"""
Class implementing the IRC window.
@signal autoConnected() emitted after an automatic connection was initiated
"""
autoConnected = pyqtSignal()
ServerDisconnected = 1
ServerConnected = 2
ServerConnecting = 3
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(IrcWidget, self).__init__(parent)
self.setupUi(self)
from .IrcNetworkManager import IrcNetworkManager
self.__ircNetworkManager = IrcNetworkManager(self)
self.__leaveButton = QToolButton(self)
self.__leaveButton.setIcon(
UI.PixmapCache.getIcon("ircCloseChannel.png"))
self.__leaveButton.setToolTip(
self.tr("Press to leave the current channel"))
self.__leaveButton.clicked.connect(self.__leaveChannel)
self.__leaveButton.setEnabled(False)
self.channelsWidget.setCornerWidget(
self.__leaveButton, Qt.BottomRightCorner)
self.channelsWidget.setTabsClosable(False)
if not isMacPlatform():
self.channelsWidget.setTabPosition(QTabWidget.South)
height = self.height()
self.splitter.setSizes([height * 0.6, height * 0.4])
self.__channelList = []
self.__channelTypePrefixes = ""
self.__userName = ""
self.__identityName = ""
self.__quitMessage = ""
self.__nickIndex = -1
self.__nickName = ""
self.__server = None
self.__registering = False
self.__connectionState = IrcWidget.ServerDisconnected
self.__sslErrorLock = False
self.__buffer = ""
self.__userPrefix = {}
self.__socket = None
if SSL_AVAILABLE:
self.__sslErrorHandler = E5SslErrorHandler(self)
else:
self.__sslErrorHandler = None
self.__patterns = [
# :foo_!n=foo@foohost.bar.net PRIVMSG bar_ :some long message
(re.compile(r":([^!]+)!([^ ]+)\sPRIVMSG\s([^ ]+)\s:(.*)"),
self.__query),
# :foo.bar.net COMMAND some message
(re.compile(r""":([^ ]+)\s+([A-Z]+)\s+(.+)"""),
self.__handleNamedMessage),
# :foo.bar.net 123 * :info
(re.compile(r""":([^ ]+)\s+(\d{3})\s+(.+)"""),
self.__handleNumericMessage),
# PING :ping message
(re.compile(r"""PING\s+:(.*)"""), self.__ping),
]
self.__prefixRe = re.compile(r""".*\sPREFIX=\((.*)\)([^ ]+).*""")
self.__chanTypesRe = re.compile(r""".*\sCHANTYPES=([^ ]+).*""")
ircPic = UI.PixmapCache.getPixmap("irc128.png")
self.__emptyLabel = QLabel()
self.__emptyLabel.setPixmap(ircPic)
self.__emptyLabel.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.channelsWidget.addTab(self.__emptyLabel, "")
# all initialized, do connections now
self.__ircNetworkManager.dataChanged.connect(self.__networkDataChanged)
self.networkWidget.initialize(self.__ircNetworkManager)
self.networkWidget.connectNetwork.connect(self.__connectNetwork)
self.networkWidget.editNetwork.connect(self.__editNetwork)
self.networkWidget.joinChannel.connect(self.__joinChannel)
self.networkWidget.nickChanged.connect(self.__changeNick)
self.networkWidget.sendData.connect(self.__send)
self.networkWidget.away.connect(self.__away)
self.networkWidget.autoConnected.connect(self.autoConnected)
def shutdown(self):
"""
Public method to shut down the widget.
@return flag indicating successful shutdown (boolean)
"""
if self.__server:
if Preferences.getIrc("AskOnShutdown"):
ok = E5MessageBox.yesNo(
self,
self.tr("Disconnect from Server"),
self.tr(
"""<p>Do you really want to disconnect from"""
""" <b>{0}</b>?</p><p>All channels will be closed."""
"""</p>""").format(self.__server.getName()))
else:
ok = True
if ok:
self.__socket.blockSignals(True)
self.__send("QUIT :" + self.__quitMessage)
self.__socket.flush()
self.__socket.close()
self.__socket.deleteLater()
self.__socket = None
else:
ok = True
if ok:
self.__ircNetworkManager.close()
return ok
def autoConnect(self):
"""
Public method to initiate the IRC auto connection.
"""
self.networkWidget.autoConnect()
def __connectNetwork(self, name, connect, silent=False):
"""
Private slot to connect to or disconnect from the given network.
@param name name of the network to connect to (string)
@param connect flag indicating to connect (boolean)
@keyparam silent flag indicating a silent connect/disconnect (boolean)
"""
if connect:
network = self.__ircNetworkManager.getNetwork(name)
if network:
self.__server = network.getServer()
self.__identityName = network.getIdentityName()
identity = self.__ircNetworkManager.getIdentity(
self.__identityName)
self.__userName = identity.getIdent()
self.__quitMessage = identity.getQuitMessage()
if self.__server:
useSSL = self.__server.useSSL()
if useSSL and not SSL_AVAILABLE:
E5MessageBox.critical(
self,
self.tr("SSL Connection"),
self.tr(
"""An encrypted connection to the IRC"""
""" network was requested but SSL is not"""
""" available. Please change the server"""
""" configuration."""))
return
if useSSL:
# create SSL socket
self.__socket = QSslSocket(self)
self.__socket.encrypted.connect(self.__hostConnected)
self.__socket.sslErrors.connect(self.__sslErrors)
else:
# create TCP socket
self.__socket = QTcpSocket(self)
self.__socket.connected.connect(self.__hostConnected)
self.__socket.hostFound.connect(self.__hostFound)
self.__socket.disconnected.connect(self.__hostDisconnected)
self.__socket.readyRead.connect(self.__readyRead)
self.__socket.error.connect(self.__tcpError)
self.__connectionState = IrcWidget.ServerConnecting
if useSSL:
self.networkWidget.addServerMessage(
self.tr("Info"),
self.tr("Looking for server {0} (port {1})"
" using an SSL encrypted connection"
"...").format(self.__server.getName(),
self.__server.getPort()))
self.__socket.connectToHostEncrypted(
self.__server.getName(), self.__server.getPort())
else:
self.networkWidget.addServerMessage(
self.tr("Info"),
self.tr(
"Looking for server {0} (port {1})...").format(
self.__server.getName(),
self.__server.getPort()))
self.__socket.connectToHost(self.__server.getName(),
self.__server.getPort())
else:
if silent:
ok = True
else:
ok = E5MessageBox.yesNo(
self,
self.tr("Disconnect from Server"),
self.tr("""<p>Do you really want to disconnect from"""
""" <b>{0}</b>?</p><p>All channels will be"""
""" closed.</p>""")
.format(self.__server.getName()))
if ok:
self.networkWidget.addServerMessage(
self.tr("Info"),
self.tr("Disconnecting from server {0}...").format(
self.__server.getName()))
self.__closeAllChannels()
self.__send("QUIT :" + self.__quitMessage)
self.__socket and self.__socket.flush()
self.__socket and self.__socket.close()
self.__userName = ""
self.__identityName = ""
self.__quitMessage = ""
def __editNetwork(self, name):
"""
Private slot to edit the network configuration.
@param name name of the network to edit (string)
"""
from .IrcNetworkListDialog import IrcNetworkListDialog
dlg = IrcNetworkListDialog(self.__ircNetworkManager, self)
dlg.exec_()
def __networkDataChanged(self):
"""
Private slot handling changes of the network and identity definitions.
"""
identity = self.__ircNetworkManager.getIdentity(self.__identityName)
if identity:
partMsg = identity.getPartMessage()
for channel in self.__channelList:
channel.setPartMessage(partMsg)
def __joinChannel(self, name, key=""):
"""
Private slot to join a channel.
@param name name of the channel (string)
@param key key of the channel (string)
"""
# step 1: check, if this channel is already joined
for channel in self.__channelList:
if channel.name() == name:
return
from .IrcChannelWidget import IrcChannelWidget
channel = IrcChannelWidget(self)
channel.setName(name)
channel.setUserName(self.__nickName)
identity = self.__ircNetworkManager.getIdentity(self.__identityName)
channel.setPartMessage(identity.getPartMessage())
channel.setUserPrivilegePrefix(self.__userPrefix)
channel.initAutoWho()
channel.sendData.connect(self.__send)
channel.sendCtcpReply.connect(self.__sendCtcpReply)
channel.channelClosed.connect(self.__closeChannel)
channel.openPrivateChat.connect(self.__openPrivate)
self.channelsWidget.addTab(channel, name)
self.__channelList.append(channel)
self.channelsWidget.setCurrentWidget(channel)
joinCommand = ["JOIN", name]
if key:
joinCommand.append(key)
self.__send(" ".join(joinCommand))
self.__send("MODE " + name)
emptyIndex = self.channelsWidget.indexOf(self.__emptyLabel)
if emptyIndex > -1:
self.channelsWidget.removeTab(emptyIndex)
self.__leaveButton.setEnabled(True)
self.channelsWidget.setTabsClosable(True)
def __query(self, match):
"""
Private method to handle a new private connection.
@param match reference to the match object
@return flag indicating, if the message was handled (boolean)
"""
# group(1) sender user name
# group(2) sender user@host
# group(3) target nick
# group(4) message
if match.group(4).startswith("\x01"):
return self.__handleCtcp(match)
self.__openPrivate(match.group(1))
# the above call sets the new channel as the current widget
channel = self.channelsWidget.currentWidget()
channel.addMessage(match.group(1), match.group(4))
channel.setPrivateInfo(
"{0} - {1}".format(match.group(1), match.group(2)))
return True
@pyqtSlot(str)
def __openPrivate(self, name):
"""
Private slot to open a private chat with the given user.
@param name name of the user (string)
"""
from .IrcChannelWidget import IrcChannelWidget
channel = IrcChannelWidget(self)
channel.setName(self.__nickName)
channel.setUserName(self.__nickName)
identity = self.__ircNetworkManager.getIdentity(self.__identityName)
channel.setPartMessage(identity.getPartMessage())
channel.setUserPrivilegePrefix(self.__userPrefix)
channel.setPrivate(True, name)
channel.addUsers([name, self.__nickName])
channel.sendData.connect(self.__send)
channel.sendCtcpReply.connect(self.__sendCtcpReply)
channel.channelClosed.connect(self.__closeChannel)
self.channelsWidget.addTab(channel, name)
self.__channelList.append(channel)
self.channelsWidget.setCurrentWidget(channel)
@pyqtSlot()
def __leaveChannel(self):
"""
Private slot to leave a channel and close the associated tab.
"""
channel = self.channelsWidget.currentWidget()
channel.requestLeave()
def __closeAllChannels(self):
"""
Private method to close all channels.
"""
while self.__channelList:
channel = self.__channelList.pop()
self.channelsWidget.removeTab(self.channelsWidget.indexOf(channel))
channel.deleteLater()
channel = None
self.channelsWidget.addTab(self.__emptyLabel, "")
self.__emptyLabel.show()
self.__leaveButton.setEnabled(False)
self.channelsWidget.setTabsClosable(False)
def __closeChannel(self, name):
"""
Private slot handling the closing of a channel.
@param name name of the closed channel (string)
"""
for channel in self.__channelList:
if channel.name() == name:
self.channelsWidget.removeTab(
self.channelsWidget.indexOf(channel))
self.__channelList.remove(channel)
channel.deleteLater()
if self.channelsWidget.count() == 0:
self.channelsWidget.addTab(self.__emptyLabel, "")
self.__emptyLabel.show()
self.__leaveButton.setEnabled(False)
self.channelsWidget.setTabsClosable(False)
@pyqtSlot(int)
def on_channelsWidget_tabCloseRequested(self, index):
"""
Private slot to close a channel by pressing the close button of
the channels widget.
@param index index of the tab to be closed (integer)
"""
channel = self.channelsWidget.widget(index)
channel.requestLeave()
def __send(self, data):
"""
Private slot to send data to the IRC server.
@param data data to be sent (string)
"""
if self.__socket:
self.__socket.write(
QByteArray("{0}\r\n".format(data).encode("utf-8")))
def __sendCtcpReply(self, receiver, text):
"""
Private slot to send a CTCP reply.
@param receiver nick name of the receiver (string)
@param text text to be sent (string)
"""
self.__send("NOTICE {0} :\x01{1}\x01".format(receiver, text))
def __hostFound(self):
"""
Private slot to indicate the host was found.
"""
self.networkWidget.addServerMessage(
self.tr("Info"),
self.tr("Server found,connecting..."))
def __hostConnected(self):
"""
Private slot to log in to the server after the connection was
established.
"""
self.networkWidget.addServerMessage(
self.tr("Info"),
self.tr("Connected,logging in..."))
self.networkWidget.setConnected(True)
self.__registering = True
serverPassword = self.__server.getPassword()
if serverPassword:
self.__send("PASS " + serverPassword)
nick = self.networkWidget.getNickname()
if not nick:
self.__nickIndex = 0
try:
nick = self.__ircNetworkManager.getIdentity(
self.__identityName).getNickNames()[self.__nickIndex]
except IndexError:
nick = ""
if not nick:
nick = self.__userName
self.__nickName = nick
self.networkWidget.setNickName(nick)
realName = self.__ircNetworkManager.getIdentity(
self.__identityName).getRealName()
if not realName:
realName = "eric IDE chat"
self.__send("NICK " + nick)
self.__send("USER " + self.__userName + " 0 * :" + realName)
def __hostDisconnected(self):
"""
Private slot to indicate the host was disconnected.
"""
if self.networkWidget.isConnected():
self.__closeAllChannels()
self.networkWidget.addServerMessage(
self.tr("Info"),
self.tr("Server disconnected."))
self.networkWidget.setRegistered(False)
self.networkWidget.setConnected(False)
self.__server = None
self.__nickName = ""
self.__nickIndex = -1
self.__channelTypePrefixes = ""
self.__socket.deleteLater()
self.__socket = None
self.__connectionState = IrcWidget.ServerDisconnected
self.__sslErrorLock = False
def __readyRead(self):
"""
Private slot to read data from the socket.
"""
if self.__socket:
self.__buffer += str(
self.__socket.readAll(),
Preferences.getSystem("IOEncoding"),
'replace')
if self.__buffer.endswith("\r\n"):
for line in self.__buffer.splitlines():
line = line.strip()
if line:
logging.debug("<IRC> " + line)
handled = False
# step 1: give channels a chance to handle the message
for channel in self.__channelList:
handled = channel.handleMessage(line)
if handled:
break
else:
# step 2: try to process the message ourselves
for patternRe, patternFunc in self.__patterns:
match = patternRe.match(line)
if match is not None:
if patternFunc(match):
break
else:
# Oops, the message wasn't handled
self.networkWidget.addErrorMessage(
self.tr("Message Error"),
self.tr(
"Unknown message received from server:"
"<br/>{0}").format(line))
self.__updateUsersCount()
self.__buffer = ""
def __handleNamedMessage(self, match):
"""
Private method to handle a server message containing a message name.
@param match reference to the match object
@return flag indicating, if the message was handled (boolean)
"""
name = match.group(2)
if name == "NOTICE":
try:
msg = match.group(3).split(":", 1)[1]
except IndexError:
msg = match.group(3)
if "!" in match.group(1):
name = match.group(1).split("!", 1)[0]
msg = "-{0}- {1}".format(name, msg)
self.networkWidget.addServerMessage(self.tr("Notice"), msg)
return True
elif name == "MODE":
self.__registering = False
if ":" in match.group(3):
# :detlev_ MODE detlev_ :+i
name, modes = match.group(3).split(" :")
sourceNick = match.group(1)
if not self.isChannelName(name):
if name == self.__nickName:
if sourceNick == self.__nickName:
msg = self.tr(
"You have set your personal modes to"
" <b>[{0}]</b>.").format(modes)
else:
msg = self.tr(
"{0} has changed your personal modes to"
" <b>[{1}]</b>.").format(sourceNick, modes)
self.networkWidget.addServerMessage(
self.tr("Mode"), msg, filterMsg=False)
return True
elif name == "PART":
nick = match.group(1).split("!", 1)[0]
if nick == self.__nickName:
channel = match.group(3).split(None, 1)[0]
self.networkWidget.addMessage(
self.tr("You have left channel {0}.").format(channel))
return True
elif name == "QUIT":
# don't do anything with it here
return True
elif name == "NICK":
# :foo_!n=foo@foohost.bar.net NICK :newnick
oldNick = match.group(1).split("!", 1)[0]
newNick = match.group(3).split(":", 1)[1]
if oldNick == self.__nickName:
self.networkWidget.addMessage(
self.tr("You are now known as {0}.").format(newNick))
self.__nickName = newNick
self.networkWidget.setNickName(newNick)
else:
self.networkWidget.addMessage(
self.tr("User {0} is now known as {1}.").format(
oldNick, newNick))
return True
elif name == "ERROR":
self.networkWidget.addErrorMessage(
self.tr("Server Error"), match.group(3).split(":", 1)[1])
return True
return False
def __handleNumericMessage(self, match):
"""
Private method to handle a server message containing a numeric code.
@param match reference to the match object
@return flag indicating, if the message was handled (boolean)
"""
code = int(match.group(2))
if code < 400:
return self.__handleServerReply(
code, match.group(1), match.group(3))
else:
return self.__handleServerError(
code, match.group(1), match.group(3))
def __handleServerError(self, code, server, message):
"""
Private slot to handle a server error reply.
@param code numerical code sent by the server (integer)
@param server name of the server (string)
@param message message sent by the server (string)
@return flag indicating, if the message was handled (boolean)
"""
if code == 433:
if self.__registering:
self.__handleNickInUseLogin()
else:
self.__handleNickInUse()
else:
self.networkWidget.addServerMessage(self.tr("Error"), message)
return True
def __handleServerReply(self, code, server, message):
"""
Private slot to handle a server reply.
@param code numerical code sent by the server (integer)
@param server name of the server (string)
@param message message sent by the server (string)
@return flag indicating, if the message was handled (boolean)
"""
# determine message type
if code in [1, 2, 3, 4]:
msgType = self.tr("Welcome")
elif code == 5:
msgType = self.tr("Support")
elif code in [250, 251, 252, 253, 254, 255, 265, 266]:
msgType = self.tr("User")
elif code in [372, 375, 376]:
msgType = self.tr("MOTD")
elif code in [305, 306]:
msgType = self.tr("Away")
else:
msgType = self.tr("Info ({0})").format(code)
# special treatment for some messages
if code == 375:
message = self.tr("Message of the day")
elif code == 376:
message = self.tr("End of message of the day")
elif code == 4:
parts = message.strip().split()
message = self.tr(
"Server {0} (Version {1}), User-Modes: {2},"
" Channel-Modes: {3}")\
.format(parts[1], parts[2], parts[3], parts[4])
elif code == 265:
parts = message.strip().split()
message = self.tr(
"Current users on {0}: {1}, max. {2}").format(
server, parts[1], parts[2])
elif code == 266:
parts = message.strip().split()
message = self.tr(
"Current users on the network: {0}, max. {1}").format(
parts[1], parts[2])
elif code == 305:
message = self.tr("You are no longer marked as being away.")
elif code == 306:
message = self.tr("You have been marked as being away.")
else:
first, message = message.split(None, 1)
if message.startswith(":"):
message = message[1:]
else:
message = message.replace(":", "", 1)
self.networkWidget.addServerMessage(msgType, message)
if code == 1:
# register with services after the welcome message
self.__connectionState = IrcWidget.ServerConnected
self.__registerWithServices()
self.networkWidget.setRegistered(True)
QTimer.singleShot(1000, self.__autoJoinChannels)
elif code == 5:
# extract the user privilege prefixes
# ... PREFIX=(ov)@+ ...
m = self.__prefixRe.match(message)
if m:
self.__setUserPrivilegePrefix(m.group(1), m.group(2))
# extract the channel type prefixes
# ... CHANTYPES=# ...
m = self.__chanTypesRe.match(message)
if m:
self.__setChannelTypePrefixes(m.group(1))
return True
def __registerWithServices(self):
"""
Private method to register to services.
"""
identity = self.__ircNetworkManager.getIdentity(self.__identityName)
service = identity.getServiceName()
password = identity.getPassword()
if service and password:
self.__send("PRIVMSG " + service + " :identify " + password)
def __autoJoinChannels(self):
"""
Private slot to join channels automatically once a server got
connected.
"""
for channel in self.networkWidget.getNetworkChannels():
if channel.autoJoin():
name = channel.getName()
key = channel.getKey()
self.__joinChannel(name, key)
def __tcpError(self, error):
"""
Private slot to handle errors reported by the TCP socket.
@param error error code reported by the socket
(QAbstractSocket.SocketError)
"""
if error == QAbstractSocket.RemoteHostClosedError:
# ignore this one, it's a disconnect
if self.__sslErrorLock:
self.networkWidget.addErrorMessage(
self.tr("SSL Error"),
self.tr(
"""Connection to server {0} (port {1}) lost while"""
""" waiting for user response to an SSL error.""")
.format(self.__server.getName(), self.__server.getPort()))
self.__connectionState = IrcWidget.ServerDisconnected
elif error == QAbstractSocket.HostNotFoundError:
self.networkWidget.addErrorMessage(
self.tr("Socket Error"),
self.tr(
"The host was not found. Please check the host name"
" and port settings."))
elif error == QAbstractSocket.ConnectionRefusedError:
self.networkWidget.addErrorMessage(
self.tr("Socket Error"),
self.tr(
"The connection was refused by the peer. Please check the"
" host name and port settings."))
elif error == QAbstractSocket.SslHandshakeFailedError:
self.networkWidget.addErrorMessage(
self.tr("Socket Error"),
self.tr("The SSL handshake failed."))
else:
if self.__socket:
self.networkWidget.addErrorMessage(
self.tr("Socket Error"),
self.tr(
"The following network error occurred:<br/>{0}")
.format(self.__socket.errorString()))
else:
self.networkWidget.addErrorMessage(
self.tr("Socket Error"),
self.tr("A network error occurred."))
def __sslErrors(self, errors):
"""
Private slot to handle SSL errors.
@param errors list of SSL errors (list of QSslError)
"""
ignored, defaultChanged = self.__sslErrorHandler.sslErrors(
errors, self.__server.getName(), self.__server.getPort())
if ignored == E5SslErrorHandler.NotIgnored:
self.networkWidget.addErrorMessage(
self.tr("SSL Error"),
self.tr(
"""Could not connect to {0} (port {1}) using an SSL"""
""" encrypted connection. Either the server does not"""
""" support SSL (did you use the correct port?) or"""
""" you rejected the certificate.""")
.format(self.__server.getName(), self.__server.getPort()))
self.__socket.close()
else:
if defaultChanged:
self.__socket.setSslConfiguration(
QSslConfiguration.defaultConfiguration())
if ignored == E5SslErrorHandler.UserIgnored:
self.networkWidget.addErrorMessage(
self.tr("SSL Error"),
self.tr(
"""The SSL certificate for the server {0} (port {1})"""
""" failed the authenticity check. SSL errors"""
""" were accepted by you.""")
.format(self.__server.getName(), self.__server.getPort()))
if self.__connectionState == IrcWidget.ServerConnecting:
self.__socket.ignoreSslErrors()
def __setUserPrivilegePrefix(self, prefix1, prefix2):
"""
Private method to set the user privilege prefix.
@param prefix1 first part of the prefix (string)
@param prefix2 indictors the first part gets mapped to (string)
"""
# PREFIX=(ov)@+
# o = @ -> @ircbot , channel operator
# v = + -> +userName , voice operator
for i in range(len(prefix1)):
self.__userPrefix["+" + prefix1[i]] = prefix2[i]
self.__userPrefix["-" + prefix1[i]] = ""
def __ping(self, match):
"""
Private method to handle a PING message.
@param match reference to the match object
@return flag indicating, if the message was handled (boolean)
"""
self.__send("PONG " + match.group(1))
return True
def __handleCtcp(self, match):
"""
Private method to handle a CTCP command.
@param match reference to the match object
@return flag indicating, if the message was handled (boolean)
"""
# group(1) sender user name
# group(2) sender user@host
# group(3) target nick
# group(4) message
if match.group(4).startswith("\x01"):
ctcpCommand = match.group(4)[1:].split("\x01", 1)[0]
if " " in ctcpCommand:
ctcpRequest, ctcpArg = ctcpCommand.split(" ", 1)
else:
ctcpRequest, ctcpArg = ctcpCommand, ""
ctcpRequest = ctcpRequest.lower()
if ctcpRequest == "version":
if Version.startswith("@@"):
vers = ""
else:
vers = " " + Version
msg = "Eric IRC client{0}, {1}".format(vers, Copyright)
self.networkWidget.addServerMessage(
self.tr("CTCP"),
self.tr("Received Version request from {0}.").format(
match.group(1)))
self.__sendCtcpReply(match.group(1), "VERSION " + msg)
elif ctcpRequest == "ping":
self.networkWidget.addServerMessage(
self.tr("CTCP"),
self.tr(
"Received CTCP-PING request from {0},"
" sending answer.").format(match.group(1)))
self.__sendCtcpReply(
match.group(1), "PING {0}".format(ctcpArg))
elif ctcpRequest == "clientinfo":
self.networkWidget.addServerMessage(
self.tr("CTCP"),
self.tr(
"Received CTCP-CLIENTINFO request from {0},"
" sending answer.").format(match.group(1)))
self.__sendCtcpReply(
match.group(1),
"CLIENTINFO CLIENTINFO PING VERSION")
else:
self.networkWidget.addServerMessage(
self.tr("CTCP"),
self.tr(
"Received unknown CTCP-{0} request from {1}.")
.format(ctcpRequest, match.group(1)))
return True
return False
def __updateUsersCount(self):
"""
Private method to update the users count on the channel tabs.
"""
for channel in self.__channelList:
index = self.channelsWidget.indexOf(channel)
self.channelsWidget.setTabText(
index,
self.tr("{0} ({1})", "channel name, users count").format(
channel.name(), channel.getUsersCount()))
def __handleNickInUseLogin(self):
"""
Private method to handle a 443 server error at login.
"""
self.__nickIndex += 1
try:
nick = self.__ircNetworkManager.getIdentity(self.__identityName)\
.getNickNames()[self.__nickIndex]
self.__nickName = nick
except IndexError:
self.networkWidget.addServerMessage(
self.tr("Critical"),
self.tr(
"No nickname acceptable to the server configured"
" for <b>{0}</b>. Disconnecting...")
.format(self.__userName),
filterMsg=False)
self.__connectNetwork("", False, silent=True)
self.__nickName = ""
self.__nickIndex = -1
return
self.networkWidget.setNickName(nick)
self.__send("NICK " + nick)
def __handleNickInUse(self):
"""
Private method to handle a 443 server error.
"""
self.networkWidget.addServerMessage(
self.tr("Critical"),
self.tr("The given nickname is already in use."))
def __changeNick(self, nick):
"""
Private slot to use a new nick name.
@param nick nick name to use (str)
"""
if nick and nick != self.__nickName:
self.__send("NICK " + nick)
def __setChannelTypePrefixes(self, prefixes):
"""
Private method to set the channel type prefixes.
@param prefixes channel prefix characters (string)
"""
self.__channelTypePrefixes = prefixes
def isChannelName(self, name):
"""
Public method to check, if the given name is a channel name.
@param name name to check (string)
@return flag indicating a channel name (boolean)
"""
if not name:
return False
if self.__channelTypePrefixes:
return name[0] in self.__channelTypePrefixes
else:
return name[0] in "#&"
def __away(self, isAway):
"""
Private slot handling the change of the away state.
@param isAway flag indicating the current away state (boolean)
"""
if isAway and self.__identityName:
identity = self.__ircNetworkManager.getIdentity(
self.__identityName)
if identity.rememberAwayPosition():
for channel in self.__channelList:
channel.setMarkerLine()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# flake8: noqa
import operator
__author__ = 'ladynerd'
from ava.import_google.models import GoogleDirectoryUser
class GoogleStatistics():
GOOGLE_CONFIG = None
def get_all_stats(self):
all_users = GoogleDirectoryUser.objects.filter()
results = {
'admin_accounts': self.get_admin_accounts(all_users),
'delegated_admin_accounts': self.get_delegated_admin_accounts(all_users),
'never_logged_in': self.get_never_logged_in(all_users),
'connection_size': self.get_connection_size(all_users),
}
return results
def get_stats(self, google_config):
self.GOOGLE_CONFIG = google_config
all_users = GoogleDirectoryUser.objects.filter(google_configuration=self.GOOGLE_CONFIG)
results = {
'admin_accounts': self.get_admin_accounts(all_users),
'delegated_admin_accounts': self.get_delegated_admin_accounts(all_users),
'never_logged_in': self.get_never_logged_in(all_users),
'connection_size': self.get_connection_size(all_users),
}
return results
@staticmethod
def get_admin_accounts(users):
results = []
for user in users:
if user.is_admin is True:
results.append(user)
return results
@staticmethod
def get_delegated_admin_accounts(users):
results = []
for user in users:
if user.is_delegated_admin is True:
results.append(user)
return results
@staticmethod
def get_never_logged_in(users):
results = []
for user in users:
if user.last_login_time is None:
results.append(user)
return results
def get_connection_size(self, users):
results = {}
for user in users:
results[user] = user.groups.all().count()
sorted_results = sorted(results.items(), key=operator.itemgetter(1))
return sorted_results
|
unknown
|
codeparrot/codeparrot-clean
| ||
# python3 putada
# -*- coding: utf-8 -*-
import speech_recognition as sr
from time import sleep as delay
from subprocess import call
import subprocess
import RPi.GPIO as GPIO
LED=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(LED,GPIO.OUT)
GPIO.output(LED,False)
r = sr.Recognizer()
temps=4
delay(3)
call('clear')
delay(2)
GPIO.output(LED,True)
print("\33[38;2;0;0;255;48;2;100;100;100mGraban\x1B[0m")
delay(.3)
call(['arecord','-D','plughw:1','--duration=4','-f','cd','-vv','lect.wav'],stdout=subprocess.PIPE)
GPIO.output(LED,False)
print ("\33[38;2;0;0;255;48;2;100;100;100mGravacio complerta\x1B[0m")
call(['sox','lect.wav','-c','1','lectura.wav'],stdout=subprocess.PIPE)
with sr.WavFile("lectura.wav") as source: # fa servir el microfon per defecte
audio = r.record(source) #comença a grabar fins al duration en segons
try:
reconegut=r.recognize(audio)
print("Has dit " + reconegut) #reconeix fen servir reconeixement de Google
call(['sh','/home/pi/programes/bash/TTS.sh','you have said: %s'%reconegut])
except IndexError: # no hi ha conexio
print("No hi ha connexió a internet")
except LookupError: # no s'enten
print("No s'ha entes l'audio")
GPIO.cleanup()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os #@UnusedImport
import sys #@UnusedImport
import sre_compile
from nsiqcppstyle_util import * #@UnusedWildImport
class RuleManager :
def __init__(self, runtimePath) :
self.availRuleNames = []
basePath = os.path.join(runtimePath, "rules")
ruleFiles = os.listdir(basePath)
rulePattern = sre_compile.compile("^(.*)\.py$")
for eachRuleFile in ruleFiles :
if os.path.isfile(os.path.join(basePath, eachRuleFile)) :
ruleMatch = rulePattern.match(eachRuleFile)
if ruleMatch != None and eachRuleFile.find("__init__") == -1 :
ruleName = ruleMatch.group(1)
self.availRuleNames.append(ruleName)
self.availRuleCount = len(self.availRuleNames)
self.availRuleModules = {}
self.loadedRule = []
self.rules = []
self.preprocessRules = []
self.functionNameRules = []
self.functionScopeRules = []
self.typeNameRules = []
self.typeScopeRules = []
self.lineRules = []
self.fileEndRules = []
self.fileStartRules = []
self.projectRules = []
self.rollBackImporter = None
# self.LoadAllRules()
def LoadRules(self, checkingRuleNames, printRule = True):
"""
Load Rules. It resets rule before loading rules
"""
self.ResetRules()
self.ResetRegisteredRules()
if self.rollBackImporter != None :
self.rollBackImporter.uninstall()
self.rollBackImporter = RollbackImporter()
if printRule :
print "======================================================================================"
for ruleName in checkingRuleNames :
count = self.availRuleNames.count(ruleName)
if count == 0 :
print "%s does not exist or incompatible." % ruleName
continue
else :
if printRule :
print " - ", ruleName, "is applied."
ruleModule = __import__("rules."+ruleName)
self.loadedRule.append(ruleModule)
if len(self.loadedRule) == 0 :
print " No Rule is specified. Please configure rules in filefilter.txt."
if printRule :
print "======================================================================================"
def ResetRules(self):
self.loadedRule = []
############################################################################
# Rule Runner
############################################################################
def RunPreprocessRule(self, lexer, contextStack):
""" Run rules which runs in the preprecessor blocks """
for preprocessRule in self.preprocessRules :
data = lexer.Backup()
preprocessRule(lexer, contextStack)
lexer.Restore(data)
def RunFunctionNameRule(self, lexer, functionFullName, decl, contextStack, functionContext) :
""" Run rules which runs on the function name """
for eachFunctionNameRule in self.functionNameRules :
data = lexer.Backup()
eachFunctionNameRule(lexer, functionFullName, decl, contextStack, functionContext)
lexer.Restore(data)
def RunFunctionScopeRule(self, lexer, contextStack):
""" Run rules which runs in the function blocks """
for eachFunctionScopeRule in self.functionScopeRules :
data = lexer.Backup()
eachFunctionScopeRule(lexer, contextStack)
lexer.Restore(data)
def RunTypeNameRule(self, lexer, typeName, typeFullName, decl, contextStack, typeContext):
""" Run rules which runs on the type names """
for typeNameRule in self.typeNameRules :
data = lexer.Backup()
typeNameRule(lexer, typeName, typeFullName, decl, contextStack, typeContext)
lexer.Restore(data)
def RunTypeScopeRule(self, lexer, contextStack):
""" Run rules which runs in the type blocks """
for typeScopeRule in self.typeScopeRules :
data = lexer.Backup()
typeScopeRule(lexer, contextStack)
lexer.Restore(data)
def RunRule(self, lexer, contextStack):
""" Run rules which runs in any tokens """
for rule in self.rules :
data = lexer.Backup()
rule(lexer, contextStack)
lexer.Restore(data)
def RunLineRule(self, lexer, line, lineno):
""" Run rules which runs in each lines. """
for lineRule in self.lineRules :
data = lexer.Backup()
lineRule(lexer, line, lineno)
lexer.Restore(data)
def RunFileEndRule(self, lexer, filename, dirname):
""" Run rules which runs at the end of files. """
for fileEndRule in self.fileEndRules :
data = lexer.Backup()
fileEndRule(lexer, filename, dirname)
lexer.Restore(data)
def RunFileStartRule(self, lexer, filename, dirname):
""" Run rules which runs at the start of files. """
for fileStartRule in self.fileStartRules :
data = lexer.Backup()
fileStartRule(lexer, filename, dirname)
lexer.Restore(data)
def RunProjectRules(self, targetName):
""" Run rules which runs once a project. """
for projectRule in self.projectRules :
projectRule(targetName)
############################################################################
# Rule Resister Methods
############################################################################
def ResetRegisteredRules(self):
""" Reset all registered rules. """
del self.functionNameRules[:]
del self.functionScopeRules[:]
del self.lineRules[:]
del self.rules[:]
del self.typeNameRules[:]
del self.typeScopeRules[:]
del self.fileStartRules[:]
del self.fileEndRules[:]
del self.projectRules[:]
del self.preprocessRules[:]
def AddPreprocessRule(self, preprocessRule):
""" Add rule which runs in preprocess statements """
self.preprocessRules.append(preprocessRule)
def AddFunctionScopeRule(self, functionScopeRule):
""" Add rule which runs in function scope """
self.functionScopeRules.append(functionScopeRule)
def AddFunctionNameRule(self, functionRule):
""" Add rule on the function name place"""
self.functionNameRules.append(functionRule)
def AddLineRule(self, lineRule):
""" Add rule on the each line """
self.lineRules.append(lineRule)
def AddRule(self, rule):
""" Add rule on any token """
self.rules.append(rule)
def AddTypeNameRule(self, typeNameRule):
""" Add rule on any type (class / struct / union / namesapce / enum) """
self.typeNameRules.append(typeNameRule)
def AddTypeScopeRule(self, typeScopeRule):
""" Add rule on the any type definition scope """
self.typeScopeRules.append(typeScopeRule)
def AddFileEndRule(self, fileEndRule):
"""
Add rule on the file end
Added Rule should be function with following prototype "def RunRule(lexer, filename, dirname)"
lexer is the lexer used to analyze the source. it points the end token of source.
filename is the filename analyzed.
dirname is the file directory.
"""
self.fileEndRules.append(fileEndRule)
def AddFileStartRule(self, fileStartRule):
"""
Add rule on the file start
Added Rule should be function with following prototype "def RunRule(lexer, filename, dirname)"
lexer is the lexer used to analyze the source. it points the start token of source.
filename is the filename analyzed.
dirname is the file directory.
"""
self.fileStartRules.append(fileStartRule)
def AddProjectRules(self, projectRule):
"""
Add rule on the project
Added Rule should be function with following prototype "def RunRule(targetName)"
targetName is the analysis target directory.
"""
self.projectRules.append(projectRule)
class RollbackImporter:
def __init__(self):
"Creates an instance and installs as the global importer"
self.previousModules = sys.modules.copy()
self.realImport = __builtins__["__import__"]
__builtins__["__import__"] = self._import
self.newModules = {}
def _import(self, name, globals=None, locals=None, fromlist=[]):
result = apply(self.realImport, (name, globals, locals, fromlist))
if name.find("rules") != -1 :
self.newModules[name] = 1
return result
def uninstall(self):
for modname in self.newModules.keys():
if modname.find("rules") != -1 :
if not self.previousModules.has_key(modname):
# Force reload when modname next imported
del(sys.modules[modname])
__builtins__["__import__"] = self.realImport
ruleManager = RuleManager(GetRuntimePath())
|
unknown
|
codeparrot/codeparrot-clean
| ||
module.exports = "module";
|
javascript
|
github
|
https://github.com/webpack/webpack
|
examples/node_modules/module.js
|
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_UTILS_NAMESPACEALIASER_H
#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_UTILS_NAMESPACEALIASER_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include <map>
#include <optional>
namespace clang::tidy::utils {
// This class creates function-level namespace aliases.
class NamespaceAliaser {
public:
explicit NamespaceAliaser(const SourceManager &SourceMgr);
// Adds a namespace alias for \p Namespace valid near \p
// Statement. Picks the first available name from \p Abbreviations.
// Returns ``std::nullopt`` if an alias already exists or there is an error.
std::optional<FixItHint>
createAlias(ASTContext &Context, const Stmt &Statement,
llvm::StringRef Namespace,
const std::vector<std::string> &Abbreviations);
// Get an alias name for \p Namespace valid at \p Statement. Returns \p
// Namespace if there is no alias.
std::string getNamespaceName(ASTContext &Context, const Stmt &Statement,
llvm::StringRef Namespace) const;
private:
const SourceManager &SourceMgr;
llvm::DenseMap<const FunctionDecl *, llvm::StringMap<std::string>>
AddedAliases;
};
} // namespace clang::tidy::utils
#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_UTILS_NAMESPACEALIASER_H
|
c
|
github
|
https://github.com/llvm/llvm-project
|
clang-tools-extra/clang-tidy/utils/NamespaceAliaser.h
|
#!/usr/bin/env python
# This example demonstrates the use of streamlines generated from seeds,
# combined with a tube filter to create several streamtubes.
import vtk
from vtk.util.misc import vtkGetDataRoot
from vtk.util.colors import *
VTK_DATA_ROOT = vtkGetDataRoot()
# We read a data file the is a CFD analysis of airflow in an office
# (with ventilation and a burning cigarette). We force an update so
# that we can query the output for its length, i.e., the length of the
# diagonal of the bounding box. This is useful for normalizing the
# data.
reader = vtk.vtkStructuredGridReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/office.binary.vtk")
reader.Update()
length = reader.GetOutput().GetLength()
maxVelocity =reader.GetOutput().GetPointData().GetVectors().GetMaxNorm()
maxTime = 35.0*length/maxVelocity
# Now we will generate multiple streamlines in the data. We create a
# random cloud of points and then use those as integration seeds. We
# select the integration order to use (RungeKutta order 4) and
# associate it with the streamer. The start position is the position
# in world space where we want to begin streamline integration; and we
# integrate in both directions. The step length is the length of the
# line segments that make up the streamline (i.e., related to
# display). The IntegrationStepLength specifies the integration step
# length as a fraction of the cell size that the streamline is in.
# Create source for streamtubes
seeds = vtk.vtkPointSource()
seeds.SetRadius(0.15)
seeds.SetCenter(0.1, 2.1, 0.5)
seeds.SetNumberOfPoints(6)
integ = vtk.vtkRungeKutta4()
streamer = vtk.vtkStreamTracer()
streamer.SetInputConnection(reader.GetOutputPort())
streamer.SetSourceConnection(seeds.GetOutputPort())
streamer.SetMaximumPropagation(500)
streamer.SetInitialIntegrationStep(0.05)
streamer.SetIntegrationDirectionToBoth()
streamer.SetIntegrator(integ)
# The tube is wrapped around the generated streamline. By varying the
# radius by the inverse of vector magnitude, we are creating a tube
# whose radius is proportional to mass flux (in incompressible flow).
streamTube = vtk.vtkTubeFilter()
streamTube.SetInputConnection(streamer.GetOutputPort())
streamTube.SetInputArrayToProcess(1, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, "vectors")
streamTube.SetRadius(0.02)
streamTube.SetNumberOfSides(12)
streamTube.SetVaryRadiusToVaryRadiusByVector()
mapStreamTube = vtk.vtkPolyDataMapper()
mapStreamTube.SetInputConnection(streamTube.GetOutputPort())
mapStreamTube.SetScalarRange(reader.GetOutput().GetPointData().GetScalars().GetRange())
streamTubeActor = vtk.vtkActor()
streamTubeActor.SetMapper(mapStreamTube)
streamTubeActor.GetProperty().BackfaceCullingOn()
# From here on we generate a whole bunch of planes which correspond to
# the geometry in the analysis; tables, bookshelves and so on.
table1 = vtk.vtkStructuredGridGeometryFilter()
table1.SetInputConnection(reader.GetOutputPort())
table1.SetExtent(11, 15, 7, 9, 8, 8)
mapTable1 = vtk.vtkPolyDataMapper()
mapTable1.SetInputConnection(table1.GetOutputPort())
mapTable1.ScalarVisibilityOff()
table1Actor = vtk.vtkActor()
table1Actor.SetMapper(mapTable1)
table1Actor.GetProperty().SetColor(.59, .427, .392)
table2 = vtk.vtkStructuredGridGeometryFilter()
table2.SetInputConnection(reader.GetOutputPort())
table2.SetExtent(11, 15, 10, 12, 8, 8)
mapTable2 = vtk.vtkPolyDataMapper()
mapTable2.SetInputConnection(table2.GetOutputPort())
mapTable2.ScalarVisibilityOff()
table2Actor = vtk.vtkActor()
table2Actor.SetMapper(mapTable2)
table2Actor.GetProperty().SetColor(.59, .427, .392)
FilingCabinet1 = vtk.vtkStructuredGridGeometryFilter()
FilingCabinet1.SetInputConnection(reader.GetOutputPort())
FilingCabinet1.SetExtent(15, 15, 7, 9, 0, 8)
mapFilingCabinet1 = vtk.vtkPolyDataMapper()
mapFilingCabinet1.SetInputConnection(FilingCabinet1.GetOutputPort())
mapFilingCabinet1.ScalarVisibilityOff()
FilingCabinet1Actor = vtk.vtkActor()
FilingCabinet1Actor.SetMapper(mapFilingCabinet1)
FilingCabinet1Actor.GetProperty().SetColor(.8, .8, .6)
FilingCabinet2 = vtk.vtkStructuredGridGeometryFilter()
FilingCabinet2.SetInputConnection(reader.GetOutputPort())
FilingCabinet2.SetExtent(15, 15, 10, 12, 0, 8)
mapFilingCabinet2 = vtk.vtkPolyDataMapper()
mapFilingCabinet2.SetInputConnection(FilingCabinet2.GetOutputPort())
mapFilingCabinet2.ScalarVisibilityOff()
FilingCabinet2Actor = vtk.vtkActor()
FilingCabinet2Actor.SetMapper(mapFilingCabinet2)
FilingCabinet2Actor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Top = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Top.SetInputConnection(reader.GetOutputPort())
bookshelf1Top.SetExtent(13, 13, 0, 4, 0, 11)
mapBookshelf1Top = vtk.vtkPolyDataMapper()
mapBookshelf1Top.SetInputConnection(bookshelf1Top.GetOutputPort())
mapBookshelf1Top.ScalarVisibilityOff()
bookshelf1TopActor = vtk.vtkActor()
bookshelf1TopActor.SetMapper(mapBookshelf1Top)
bookshelf1TopActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Bottom = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Bottom.SetInputConnection(reader.GetOutputPort())
bookshelf1Bottom.SetExtent(20, 20, 0, 4, 0, 11)
mapBookshelf1Bottom = vtk.vtkPolyDataMapper()
mapBookshelf1Bottom.SetInputConnection(bookshelf1Bottom.GetOutputPort())
mapBookshelf1Bottom.ScalarVisibilityOff()
bookshelf1BottomActor = vtk.vtkActor()
bookshelf1BottomActor.SetMapper(mapBookshelf1Bottom)
bookshelf1BottomActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Front = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Front.SetInputConnection(reader.GetOutputPort())
bookshelf1Front.SetExtent(13, 20, 0, 0, 0, 11)
mapBookshelf1Front = vtk.vtkPolyDataMapper()
mapBookshelf1Front.SetInputConnection(bookshelf1Front.GetOutputPort())
mapBookshelf1Front.ScalarVisibilityOff()
bookshelf1FrontActor = vtk.vtkActor()
bookshelf1FrontActor.SetMapper(mapBookshelf1Front)
bookshelf1FrontActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Back = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Back.SetInputConnection(reader.GetOutputPort())
bookshelf1Back.SetExtent(13, 20, 4, 4, 0, 11)
mapBookshelf1Back = vtk.vtkPolyDataMapper()
mapBookshelf1Back.SetInputConnection(bookshelf1Back.GetOutputPort())
mapBookshelf1Back.ScalarVisibilityOff()
bookshelf1BackActor = vtk.vtkActor()
bookshelf1BackActor.SetMapper(mapBookshelf1Back)
bookshelf1BackActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1LHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf1LHS.SetInputConnection(reader.GetOutputPort())
bookshelf1LHS.SetExtent(13, 20, 0, 4, 0, 0)
mapBookshelf1LHS = vtk.vtkPolyDataMapper()
mapBookshelf1LHS.SetInputConnection(bookshelf1LHS.GetOutputPort())
mapBookshelf1LHS.ScalarVisibilityOff()
bookshelf1LHSActor = vtk.vtkActor()
bookshelf1LHSActor.SetMapper(mapBookshelf1LHS)
bookshelf1LHSActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1RHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf1RHS.SetInputConnection(reader.GetOutputPort())
bookshelf1RHS.SetExtent(13, 20, 0, 4, 11, 11)
mapBookshelf1RHS = vtk.vtkPolyDataMapper()
mapBookshelf1RHS.SetInputConnection(bookshelf1RHS.GetOutputPort())
mapBookshelf1RHS.ScalarVisibilityOff()
bookshelf1RHSActor = vtk.vtkActor()
bookshelf1RHSActor.SetMapper(mapBookshelf1RHS)
bookshelf1RHSActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Top = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Top.SetInputConnection(reader.GetOutputPort())
bookshelf2Top.SetExtent(13, 13, 15, 19, 0, 11)
mapBookshelf2Top = vtk.vtkPolyDataMapper()
mapBookshelf2Top.SetInputConnection(bookshelf2Top.GetOutputPort())
mapBookshelf2Top.ScalarVisibilityOff()
bookshelf2TopActor = vtk.vtkActor()
bookshelf2TopActor.SetMapper(mapBookshelf2Top)
bookshelf2TopActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Bottom = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Bottom.SetInputConnection(reader.GetOutputPort())
bookshelf2Bottom.SetExtent(20, 20, 15, 19, 0, 11)
mapBookshelf2Bottom = vtk.vtkPolyDataMapper()
mapBookshelf2Bottom.SetInputConnection(bookshelf2Bottom.GetOutputPort())
mapBookshelf2Bottom.ScalarVisibilityOff()
bookshelf2BottomActor = vtk.vtkActor()
bookshelf2BottomActor.SetMapper(mapBookshelf2Bottom)
bookshelf2BottomActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Front = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Front.SetInputConnection(reader.GetOutputPort())
bookshelf2Front.SetExtent(13, 20, 15, 15, 0, 11)
mapBookshelf2Front = vtk.vtkPolyDataMapper()
mapBookshelf2Front.SetInputConnection(bookshelf2Front.GetOutputPort())
mapBookshelf2Front.ScalarVisibilityOff()
bookshelf2FrontActor = vtk.vtkActor()
bookshelf2FrontActor.SetMapper(mapBookshelf2Front)
bookshelf2FrontActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Back = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Back.SetInputConnection(reader.GetOutputPort())
bookshelf2Back.SetExtent(13, 20, 19, 19, 0, 11)
mapBookshelf2Back = vtk.vtkPolyDataMapper()
mapBookshelf2Back.SetInputConnection(bookshelf2Back.GetOutputPort())
mapBookshelf2Back.ScalarVisibilityOff()
bookshelf2BackActor = vtk.vtkActor()
bookshelf2BackActor.SetMapper(mapBookshelf2Back)
bookshelf2BackActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2LHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf2LHS.SetInputConnection(reader.GetOutputPort())
bookshelf2LHS.SetExtent(13, 20, 15, 19, 0, 0)
mapBookshelf2LHS = vtk.vtkPolyDataMapper()
mapBookshelf2LHS.SetInputConnection(bookshelf2LHS.GetOutputPort())
mapBookshelf2LHS.ScalarVisibilityOff()
bookshelf2LHSActor = vtk.vtkActor()
bookshelf2LHSActor.SetMapper(mapBookshelf2LHS)
bookshelf2LHSActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2RHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf2RHS.SetInputConnection(reader.GetOutputPort())
bookshelf2RHS.SetExtent(13, 20, 15, 19, 11, 11)
mapBookshelf2RHS = vtk.vtkPolyDataMapper()
mapBookshelf2RHS.SetInputConnection(bookshelf2RHS.GetOutputPort())
mapBookshelf2RHS.ScalarVisibilityOff()
bookshelf2RHSActor = vtk.vtkActor()
bookshelf2RHSActor.SetMapper(mapBookshelf2RHS)
bookshelf2RHSActor.GetProperty().SetColor(.8, .8, .6)
window = vtk.vtkStructuredGridGeometryFilter()
window.SetInputConnection(reader.GetOutputPort())
window.SetExtent(20, 20, 6, 13, 10, 13)
mapWindow = vtk.vtkPolyDataMapper()
mapWindow.SetInputConnection(window.GetOutputPort())
mapWindow.ScalarVisibilityOff()
windowActor = vtk.vtkActor()
windowActor.SetMapper(mapWindow)
windowActor.GetProperty().SetColor(.3, .3, .5)
outlet = vtk.vtkStructuredGridGeometryFilter()
outlet.SetInputConnection(reader.GetOutputPort())
outlet.SetExtent(0, 0, 9, 10, 14, 16)
mapOutlet = vtk.vtkPolyDataMapper()
mapOutlet.SetInputConnection(outlet.GetOutputPort())
mapOutlet.ScalarVisibilityOff()
outletActor = vtk.vtkActor()
outletActor.SetMapper(mapOutlet)
outletActor.GetProperty().SetColor(0, 0, 0)
inlet = vtk.vtkStructuredGridGeometryFilter()
inlet.SetInputConnection(reader.GetOutputPort())
inlet.SetExtent(0, 0, 9, 10, 0, 6)
mapInlet = vtk.vtkPolyDataMapper()
mapInlet.SetInputConnection(inlet.GetOutputPort())
mapInlet.ScalarVisibilityOff()
inletActor = vtk.vtkActor()
inletActor.SetMapper(mapInlet)
inletActor.GetProperty().SetColor(0, 0, 0)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputConnection(reader.GetOutputPort())
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(mapOutline)
outlineActor.GetProperty().SetColor(0, 0, 0)
# Now create the usual graphics stuff.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(table1Actor)
ren.AddActor(table2Actor)
ren.AddActor(FilingCabinet1Actor)
ren.AddActor(FilingCabinet2Actor)
ren.AddActor(bookshelf1TopActor)
ren.AddActor(bookshelf1BottomActor)
ren.AddActor(bookshelf1FrontActor)
ren.AddActor(bookshelf1BackActor)
ren.AddActor(bookshelf1LHSActor)
ren.AddActor(bookshelf1RHSActor)
ren.AddActor(bookshelf2TopActor)
ren.AddActor(bookshelf2BottomActor)
ren.AddActor(bookshelf2FrontActor)
ren.AddActor(bookshelf2BackActor)
ren.AddActor(bookshelf2LHSActor)
ren.AddActor(bookshelf2RHSActor)
ren.AddActor(windowActor)
ren.AddActor(outletActor)
ren.AddActor(inletActor)
ren.AddActor(outlineActor)
ren.AddActor(streamTubeActor)
ren.SetBackground(slate_grey)
# Here we specify a particular view.
aCamera = vtk.vtkCamera()
aCamera.SetClippingRange(0.726079, 36.3039)
aCamera.SetFocalPoint(2.43584, 2.15046, 1.11104)
aCamera.SetPosition(-4.76183, -10.4426, 3.17203)
aCamera.SetViewUp(0.0511273, 0.132773, 0.989827)
aCamera.SetViewAngle(18.604)
aCamera.Zoom(1.2)
ren.SetActiveCamera(aCamera)
renWin.SetSize(500, 300)
iren.Initialize()
renWin.Render()
iren.Start()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_claim
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Run managers."""
from __future__ import annotations
import asyncio
import atexit
import functools
import logging
from abc import ABC, abstractmethod
from collections.abc import Callable
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager, contextmanager
from contextvars import copy_context
from typing import TYPE_CHECKING, Any, TypeVar, cast
from langsmith.run_helpers import get_tracing_context
from typing_extensions import Self, override
from langchain_core.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
from langchain_core.globals import get_debug
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.tracers.context import (
_configure_hooks,
_get_trace_callbacks,
_get_tracer_project,
_tracing_v2_is_enabled,
tracing_v2_callback_var,
)
from langchain_core.tracers.langchain import LangChainTracer
from langchain_core.tracers.stdout import ConsoleCallbackHandler
from langchain_core.utils.env import env_var_is_set
from langchain_core.utils.uuid import uuid7
if TYPE_CHECKING:
from collections.abc import AsyncGenerator, Coroutine, Generator, Sequence
from uuid import UUID
from tenacity import RetryCallState
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.documents import Document
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult
from langchain_core.runnables.config import RunnableConfig
from langchain_core.tracers.schemas import Run
logger = logging.getLogger(__name__)
def _get_debug() -> bool:
return get_debug()
@contextmanager
def trace_as_chain_group(
group_name: str,
callback_manager: CallbackManager | None = None,
*,
inputs: dict[str, Any] | None = None,
project_name: str | None = None,
example_id: str | UUID | None = None,
run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
) -> Generator[CallbackManagerForChainGroup, None, None]:
"""Get a callback manager for a chain group in a context manager.
Useful for grouping different calls together as a single run even if they aren't
composed in a single chain.
Args:
group_name: The name of the chain group.
callback_manager: The callback manager to use.
inputs: The inputs to the chain group.
project_name: The name of the project.
example_id: The ID of the example.
run_id: The ID of the run.
tags: The inheritable tags to apply to all runs.
metadata: The metadata to apply to all runs.
!!! note
Must have `LANGCHAIN_TRACING_V2` env var set to true to see the trace in
LangSmith.
Yields:
The callback manager for the chain group.
Example:
```python
llm_input = "Foo"
with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the callback manager for the chain group
res = llm.invoke(llm_input, {"callbacks": manager})
manager.on_chain_end({"output": res})
```
"""
cb = _get_trace_callbacks(
project_name, example_id, callback_manager=callback_manager
)
cm = CallbackManager.configure(
inheritable_callbacks=cb,
inheritable_tags=tags,
inheritable_metadata=metadata,
)
run_manager = cm.on_chain_start({"name": group_name}, inputs or {}, run_id=run_id)
child_cm = run_manager.get_child()
group_cm = CallbackManagerForChainGroup(
child_cm.handlers,
child_cm.inheritable_handlers,
child_cm.parent_run_id,
parent_run_manager=run_manager,
tags=child_cm.tags,
inheritable_tags=child_cm.inheritable_tags,
metadata=child_cm.metadata,
inheritable_metadata=child_cm.inheritable_metadata,
)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
run_manager.on_chain_error(e)
raise
else:
if not group_cm.ended:
run_manager.on_chain_end({})
@asynccontextmanager
async def atrace_as_chain_group(
group_name: str,
callback_manager: AsyncCallbackManager | None = None,
*,
inputs: dict[str, Any] | None = None,
project_name: str | None = None,
example_id: str | UUID | None = None,
run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
) -> AsyncGenerator[AsyncCallbackManagerForChainGroup, None]:
"""Get an async callback manager for a chain group in a context manager.
Useful for grouping different async calls together as a single run even if they
aren't composed in a single chain.
Args:
group_name: The name of the chain group.
callback_manager: The async callback manager to use, which manages tracing and
other callback behavior.
inputs: The inputs to the chain group.
project_name: The name of the project.
example_id: The ID of the example.
run_id: The ID of the run.
tags: The inheritable tags to apply to all runs.
metadata: The metadata to apply to all runs.
Yields:
The async callback manager for the chain group.
!!! note
Must have `LANGCHAIN_TRACING_V2` env var set to true to see the trace in
LangSmith.
Example:
```python
llm_input = "Foo"
async with atrace_as_chain_group(
"group_name", inputs={"input": llm_input}
) as manager:
# Use the async callback manager for the chain group
res = await llm.ainvoke(llm_input, {"callbacks": manager})
await manager.on_chain_end({"output": res})
```
"""
cb = _get_trace_callbacks(
project_name, example_id, callback_manager=callback_manager
)
cm = AsyncCallbackManager.configure(
inheritable_callbacks=cb, inheritable_tags=tags, inheritable_metadata=metadata
)
run_manager = await cm.on_chain_start(
{"name": group_name}, inputs or {}, run_id=run_id
)
child_cm = run_manager.get_child()
group_cm = AsyncCallbackManagerForChainGroup(
child_cm.handlers,
child_cm.inheritable_handlers,
child_cm.parent_run_id,
parent_run_manager=run_manager,
tags=child_cm.tags,
inheritable_tags=child_cm.inheritable_tags,
metadata=child_cm.metadata,
inheritable_metadata=child_cm.inheritable_metadata,
)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
await run_manager.on_chain_error(e)
raise
else:
if not group_cm.ended:
await run_manager.on_chain_end({})
Func = TypeVar("Func", bound=Callable)
def shielded(func: Func) -> Func:
"""Makes so an awaitable method is always shielded from cancellation.
Args:
func: The function to shield.
Returns:
The shielded function
"""
@functools.wraps(func)
async def wrapped(*args: Any, **kwargs: Any) -> Any:
# Capture the current context to preserve context variables
ctx = copy_context()
# Create the coroutine
coro = func(*args, **kwargs)
# For Python 3.11+, create task with explicit context
# For older versions, fallback to original behavior
try:
# Create a task with the captured context to preserve context variables
task = asyncio.create_task(coro, context=ctx) # type: ignore[call-arg, unused-ignore]
# `call-arg` used to not fail 3.9 or 3.10 tests
return await asyncio.shield(task)
except TypeError:
# Python < 3.11 fallback - create task normally then shield
# This won't preserve context perfectly but is better than nothing
task = asyncio.create_task(coro)
return await asyncio.shield(task)
return cast("Func", wrapped)
def handle_event(
handlers: list[BaseCallbackHandler],
event_name: str,
ignore_condition_name: str | None,
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for `CallbackManager`.
Args:
handlers: The list of handlers that will handle the event.
event_name: The name of the event (e.g., `'on_llm_start'`).
ignore_condition_name: Name of the attribute defined on handler that if `True`
will cause the handler to be skipped for the given event.
*args: The arguments to pass to the event handler.
**kwargs: The keyword arguments to pass to the event handler
"""
coros: list[Coroutine[Any, Any, Any]] = []
try:
message_strings: list[str] | None = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(
handler, ignore_condition_name
):
event = getattr(handler, event_name)(*args, **kwargs)
if asyncio.iscoroutine(event):
coros.append(event)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
handler_name = handler.__class__.__name__
logger.warning(
"NotImplementedError in %s.%s callback: %s",
handler_name,
event_name,
repr(e),
)
except Exception as e:
logger.warning(
"Error in %s.%s callback: %s",
handler.__class__.__name__,
event_name,
repr(e),
)
if handler.raise_error:
raise
finally:
if coros:
try:
# Raises RuntimeError if there is no current event loop.
asyncio.get_running_loop()
loop_running = True
except RuntimeError:
loop_running = False
if loop_running:
# If we try to submit this coroutine to the running loop
# we end up in a deadlock, as we'd have gotten here from a
# running coroutine, which we cannot interrupt to run this one.
# The solution is to run the synchronous function on the globally shared
# thread pool executor to avoid blocking the main event loop.
_executor().submit(
cast("Callable", copy_context().run), _run_coros, coros
).result()
else:
# If there's no running loop, we can run the coroutines directly.
_run_coros(coros)
def _run_coros(coros: list[Coroutine[Any, Any, Any]]) -> None:
if hasattr(asyncio, "Runner"):
# Python 3.11+
# Run the coroutines in a new event loop, taking care to
# - install signal handlers
# - run pending tasks scheduled by `coros`
# - close asyncgens and executors
# - close the loop
with asyncio.Runner() as runner:
# Run the coroutine, get the result
for coro in coros:
try:
runner.run(coro)
except Exception as e:
logger.warning("Error in callback coroutine: %s", repr(e))
# Run pending tasks scheduled by coros until they are all done
while pending := asyncio.all_tasks(runner.get_loop()):
runner.run(asyncio.wait(pending))
else:
# Before Python 3.11 we need to run each coroutine in a new event loop
# as the Runner api is not available.
for coro in coros:
try:
asyncio.run(coro)
except Exception as e:
logger.warning("Error in callback coroutine: %s", repr(e))
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: str | None,
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
elif handler.run_inline:
event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(
None,
cast(
"Callable",
functools.partial(copy_context().run, event, *args, **kwargs),
),
)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(
"NotImplementedError in %s.%s callback: %s",
handler.__class__.__name__,
event_name,
repr(e),
)
except Exception as e:
logger.warning(
"Error in %s.%s callback: %s",
handler.__class__.__name__,
event_name,
repr(e),
)
if handler.raise_error:
raise
async def ahandle_event(
handlers: list[BaseCallbackHandler],
event_name: str,
ignore_condition_name: str | None,
*args: Any,
**kwargs: Any,
) -> None:
"""Async generic event handler for `AsyncCallbackManager`.
Args:
handlers: The list of handlers that will handle the event.
event_name: The name of the event (e.g., `'on_llm_start'`).
ignore_condition_name: Name of the attribute defined on handler that if `True`
will cause the handler to be skipped for the given event.
*args: The arguments to pass to the event handler.
**kwargs: The keyword arguments to pass to the event handler.
"""
for handler in [h for h in handlers if h.run_inline]:
await _ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
await asyncio.gather(
*(
_ahandle_event_for_handler(
handler,
event_name,
ignore_condition_name,
*args,
**kwargs,
)
for handler in handlers
if not handler.run_inline
)
)
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
*,
run_id: UUID,
handlers: list[BaseCallbackHandler],
inheritable_handlers: list[BaseCallbackHandler],
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
inheritable_tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
inheritable_metadata: dict[str, Any] | None = None,
) -> None:
"""Initialize the run manager.
Args:
run_id: The ID of the run.
handlers: The list of handlers.
inheritable_handlers: The list of inheritable handlers.
parent_run_id: The ID of the parent run.
tags: The list of tags.
inheritable_tags: The list of inheritable tags.
metadata: The metadata.
inheritable_metadata: The inheritable metadata.
"""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
self.metadata = metadata or {}
self.inheritable_metadata = inheritable_metadata or {}
@classmethod
def get_noop_manager(cls) -> Self:
"""Return a manager that doesn't perform any operations.
Returns:
The noop manager.
"""
return cls(
run_id=uuid7(),
handlers=[],
inheritable_handlers=[],
tags=[],
inheritable_tags=[],
metadata={},
inheritable_metadata={},
)
class RunManager(BaseRunManager):
"""Synchronous run manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> None:
"""Run when a text is received.
Args:
text: The received text.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
"""Run when a retry is received.
Args:
retry_state: The retry state.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class ParentRunManager(RunManager):
"""Synchronous parent run manager."""
def get_child(self, tag: str | None = None) -> CallbackManager:
"""Get a child callback manager.
Args:
tag: The tag for the child callback manager.
Returns:
The child callback manager.
"""
manager = CallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], inherit=False)
return manager
class AsyncRunManager(BaseRunManager, ABC):
"""Async run manager."""
@abstractmethod
def get_sync(self) -> RunManager:
"""Get the equivalent sync `RunManager`.
Returns:
The sync `RunManager`.
"""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> None:
"""Run when a text is received.
Args:
text: The received text.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
"""Async run when a retry is received.
Args:
retry_state: The retry state.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncParentRunManager(AsyncRunManager):
"""Async parent run manager."""
def get_child(self, tag: str | None = None) -> AsyncCallbackManager:
"""Get a child callback manager.
Args:
tag: The tag for the child callback manager.
Returns:
The child callback manager.
"""
manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], inherit=False)
return manager
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
*,
chunk: GenerationChunk | ChatGenerationChunk | None = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token: The new token.
chunk: The chunk.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
chunk=chunk,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response: The LLM result.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
- response (LLMResult): The response which was generated before
the error occurred.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
def get_sync(self) -> CallbackManagerForLLMRun:
"""Get the equivalent sync `RunManager`.
Returns:
The sync `RunManager`.
"""
return CallbackManagerForLLMRun(
run_id=self.run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_llm_new_token(
self,
token: str,
*,
chunk: GenerationChunk | ChatGenerationChunk | None = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token: The new token.
chunk: The chunk.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
chunk=chunk,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
@shielded
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response: The LLM result.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
@shielded
async def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
- response (LLMResult): The response which was generated before
the error occurred.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def on_chain_end(self, outputs: dict[str, Any] | Any, **kwargs: Any) -> None:
"""Run when chain ends running.
Args:
outputs: The outputs of the chain.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> None:
"""Run when agent action is received.
Args:
action: The agent action.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent finish is received.
Args:
finish: The agent finish.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
def get_sync(self) -> CallbackManagerForChainRun:
"""Get the equivalent sync `RunManager`.
Returns:
The sync `RunManager`.
"""
return CallbackManagerForChainRun(
run_id=self.run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@shielded
async def on_chain_end(self, outputs: dict[str, Any] | Any, **kwargs: Any) -> None:
"""Run when a chain ends running.
Args:
outputs: The outputs of the chain.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
@shielded
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> None:
"""Run when agent action is received.
Args:
action: The agent action.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent finish is received.
Args:
finish: The agent finish.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def on_tool_end(
self,
output: Any,
**kwargs: Any,
) -> None:
"""Run when the tool ends running.
Args:
output: The output of the tool.
**kwargs: The keyword arguments to pass to the event handler
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_tool_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
def get_sync(self) -> CallbackManagerForToolRun:
"""Get the equivalent sync `RunManager`.
Returns:
The sync `RunManager`.
"""
return CallbackManagerForToolRun(
run_id=self.run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Async run when the tool ends running.
Args:
output: The output of the tool.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_tool_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForRetrieverRun(ParentRunManager, RetrieverManagerMixin):
"""Callback manager for retriever run."""
def on_retriever_end(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> None:
"""Run when retriever ends running.
Args:
documents: The retrieved documents.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForRetrieverRun(
AsyncParentRunManager,
RetrieverManagerMixin,
):
"""Async callback manager for retriever run."""
def get_sync(self) -> CallbackManagerForRetrieverRun:
"""Get the equivalent sync `RunManager`.
Returns:
The sync `RunManager`.
"""
return CallbackManagerForRetrieverRun(
run_id=self.run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@shielded
async def on_retriever_end(
self, documents: Sequence[Document], **kwargs: Any
) -> None:
"""Run when the retriever ends running.
Args:
documents: The retrieved documents.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
@shielded
async def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager for LangChain."""
def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
run_id: UUID | None = None,
**kwargs: Any,
) -> list[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized: The serialized LLM.
prompts: The list of prompts.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
A callback manager for each prompt as an LLM run.
"""
managers = []
for i, prompt in enumerate(prompts):
# Can't have duplicate runs with the same run ID (if provided)
run_id_ = run_id if i == 0 and run_id is not None else uuid7()
handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
run_id: UUID | None = None,
**kwargs: Any,
) -> list[CallbackManagerForLLMRun]:
"""Run when chat model starts running.
Args:
serialized: The serialized LLM.
messages: The list of messages.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
A callback manager for each list of messages as an LLM run.
"""
managers = []
for message_list in messages:
if run_id is not None:
run_id_ = run_id
run_id = None
else:
run_id_ = uuid7()
handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chain_start(
self,
serialized: dict[str, Any] | None,
inputs: dict[str, Any] | Any,
run_id: UUID | None = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized: The serialized chain.
inputs: The inputs to the chain.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
The callback manager for the chain run.
"""
if run_id is None:
run_id = uuid7()
handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@override
def on_tool_start(
self,
serialized: dict[str, Any] | None,
input_str: str,
run_id: UUID | None = None,
parent_run_id: UUID | None = None,
inputs: dict[str, Any] | None = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized: Serialized representation of the tool.
input_str: The input to the tool as a string.
Non-string inputs are cast to strings.
run_id: ID for the run.
parent_run_id: The ID of the parent run.
inputs: The original input to the tool if provided.
Recommended for usage instead of input_str when the original input is
needed.
If provided, the inputs are expected to be formatted as a dict. The keys
will correspond to the named-arguments in the tool.
**kwargs: The keyword arguments to pass to the event handler
Returns:
The callback manager for the tool run.
"""
if run_id is None:
run_id = uuid7()
handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
inputs=inputs,
**kwargs,
)
return CallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@override
def on_retriever_start(
self,
serialized: dict[str, Any] | None,
query: str,
run_id: UUID | None = None,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> CallbackManagerForRetrieverRun:
"""Run when the retriever starts running.
Args:
serialized: The serialized retriever.
query: The query.
run_id: The ID of the run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
Returns:
The callback manager for the retriever run.
"""
if run_id is None:
run_id = uuid7()
handle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_custom_event(
self,
name: str,
data: Any,
run_id: UUID | None = None,
**kwargs: Any,
) -> None:
"""Dispatch an adhoc event to the handlers (async version).
This event should NOT be used in any internal LangChain code. The event is meant
specifically for users of the library to dispatch custom events that are
tailored to their application.
Args:
name: The name of the adhoc event.
data: The data for the adhoc event.
run_id: The ID of the run.
Raises:
ValueError: If additional keyword arguments are passed.
"""
if not self.handlers:
return
if kwargs:
msg = (
"The dispatcher API does not accept additional keyword arguments."
"Please do not pass any additional keyword arguments, instead "
"include them in the data field."
)
raise ValueError(msg)
if run_id is None:
run_id = uuid7()
handle_event(
self.handlers,
"on_custom_event",
"ignore_custom_event",
name,
data,
run_id=run_id,
tags=self.tags,
metadata=self.metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False, # noqa: FBT001,FBT002
inheritable_tags: list[str] | None = None,
local_tags: list[str] | None = None,
inheritable_metadata: dict[str, Any] | None = None,
local_metadata: dict[str, Any] | None = None,
) -> CallbackManager:
"""Configure the callback manager.
Args:
inheritable_callbacks: The inheritable callbacks.
local_callbacks: The local callbacks.
verbose: Whether to enable verbose mode.
inheritable_tags: The inheritable tags.
local_tags: The local tags.
inheritable_metadata: The inheritable metadata.
local_metadata: The local metadata.
Returns:
The configured callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
verbose=verbose,
)
class CallbackManagerForChainGroup(CallbackManager):
"""Callback manager for the chain group."""
def __init__(
self,
handlers: list[BaseCallbackHandler],
inheritable_handlers: list[BaseCallbackHandler] | None = None,
parent_run_id: UUID | None = None,
*,
parent_run_manager: CallbackManagerForChainRun,
**kwargs: Any,
) -> None:
"""Initialize the callback manager.
Args:
handlers: The list of handlers.
inheritable_handlers: The list of inheritable handlers.
parent_run_id: The ID of the parent run.
parent_run_manager: The parent run manager.
**kwargs: Additional keyword arguments.
"""
super().__init__(
handlers,
inheritable_handlers,
parent_run_id,
**kwargs,
)
self.parent_run_manager = parent_run_manager
self.ended = False
@override
def copy(self) -> CallbackManagerForChainGroup:
return self.__class__(
handlers=self.handlers.copy(),
inheritable_handlers=self.inheritable_handlers.copy(),
parent_run_id=self.parent_run_id,
tags=self.tags.copy(),
inheritable_tags=self.inheritable_tags.copy(),
metadata=self.metadata.copy(),
inheritable_metadata=self.inheritable_metadata.copy(),
parent_run_manager=self.parent_run_manager,
)
def merge(
self: CallbackManagerForChainGroup, other: BaseCallbackManager
) -> CallbackManagerForChainGroup:
"""Merge the group callback manager with another callback manager.
Overwrites the merge method in the base class to ensure that the parent run
manager is preserved. Keeps the `parent_run_manager` from the current object.
Returns:
A copy of the current object with the handlers, tags, and other attributes
merged from the other object.
Example:
```python
# Merging two callback managers
from langchain_core.callbacks.manager import (
CallbackManager,
trace_as_chain_group,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"])
with trace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager:
merged_manager = group_manager.merge(manager)
print(type(merged_manager))
# <class 'langchain_core.callbacks.manager.CallbackManagerForChainGroup'>
print(merged_manager.handlers)
# [
# <langchain_core.callbacks.stdout.LangChainTracer object at ...>,
# <langchain_core.callbacks.streaming_stdout.StdOutCallbackHandler object at ...>,
# ]
print(merged_manager.tags)
# ['tag2', 'tag1']
```
""" # noqa: E501
manager = self.__class__(
parent_run_id=self.parent_run_id or other.parent_run_id,
handlers=[],
inheritable_handlers=[],
tags=list(set(self.tags + other.tags)),
inheritable_tags=list(set(self.inheritable_tags + other.inheritable_tags)),
metadata={
**self.metadata,
**other.metadata,
},
parent_run_manager=self.parent_run_manager,
)
handlers = self.handlers + other.handlers
inheritable_handlers = self.inheritable_handlers + other.inheritable_handlers
for handler in handlers:
manager.add_handler(handler)
for handler in inheritable_handlers:
manager.add_handler(handler, inherit=True)
return manager
def on_chain_end(self, outputs: dict[str, Any] | Any, **kwargs: Any) -> None:
"""Run when traced chain group ends.
Args:
outputs: The outputs of the chain.
**kwargs: Additional keyword arguments.
"""
self.ended = True
return self.parent_run_manager.on_chain_end(outputs, **kwargs)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
"""
self.ended = True
return self.parent_run_manager.on_chain_error(error, **kwargs)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that handles callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
run_id: UUID | None = None,
**kwargs: Any,
) -> list[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized: The serialized LLM.
prompts: The list of prompts.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
The list of async callback managers, one for each LLM run corresponding to
each prompt.
"""
inline_tasks = []
non_inline_tasks = []
inline_handlers = [handler for handler in self.handlers if handler.run_inline]
non_inline_handlers = [
handler for handler in self.handlers if not handler.run_inline
]
managers = []
for prompt in prompts:
if run_id is not None:
run_id_ = run_id
run_id = None
else:
run_id_ = uuid7()
if inline_handlers:
inline_tasks.append(
ahandle_event(
inline_handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
else:
non_inline_tasks.append(
ahandle_event(
non_inline_handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
# Run inline tasks sequentially
for inline_task in inline_tasks:
await inline_task
# Run non-inline tasks concurrently
if non_inline_tasks:
await asyncio.gather(*non_inline_tasks)
return managers
async def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
run_id: UUID | None = None,
**kwargs: Any,
) -> list[AsyncCallbackManagerForLLMRun]:
"""Async run when LLM starts running.
Args:
serialized: The serialized LLM.
messages: The list of messages.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
The list of async callback managers, one for each LLM run corresponding to
each inner message list.
"""
inline_tasks = []
non_inline_tasks = []
managers = []
for message_list in messages:
if run_id is not None:
run_id_ = run_id
run_id = None
else:
run_id_ = uuid7()
for handler in self.handlers:
task = ahandle_event(
[handler],
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
if handler.run_inline:
inline_tasks.append(task)
else:
non_inline_tasks.append(task)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
# Run inline tasks sequentially
for task in inline_tasks:
await task
# Run non-inline tasks concurrently
if non_inline_tasks:
await asyncio.gather(*non_inline_tasks)
return managers
async def on_chain_start(
self,
serialized: dict[str, Any] | None,
inputs: dict[str, Any] | Any,
run_id: UUID | None = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Async run when chain starts running.
Args:
serialized: The serialized chain.
inputs: The inputs to the chain.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
The async callback manager for the chain run.
"""
if run_id is None:
run_id = uuid7()
await ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@override
async def on_tool_start(
self,
serialized: dict[str, Any] | None,
input_str: str,
run_id: UUID | None = None,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when the tool starts running.
Args:
serialized: The serialized tool.
input_str: The input to the tool.
run_id: The ID of the run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
Returns:
The async callback manager for the tool run.
"""
if run_id is None:
run_id = uuid7()
await ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_custom_event(
self,
name: str,
data: Any,
run_id: UUID | None = None,
**kwargs: Any,
) -> None:
"""Dispatch an adhoc event to the handlers (async version).
This event should NOT be used in any internal LangChain code. The event is meant
specifically for users of the library to dispatch custom events that are
tailored to their application.
Args:
name: The name of the adhoc event.
data: The data for the adhoc event.
run_id: The ID of the run.
Raises:
ValueError: If additional keyword arguments are passed.
"""
if not self.handlers:
return
if run_id is None:
run_id = uuid7()
if kwargs:
msg = (
"The dispatcher API does not accept additional keyword arguments."
"Please do not pass any additional keyword arguments, instead "
"include them in the data field."
)
raise ValueError(msg)
await ahandle_event(
self.handlers,
"on_custom_event",
"ignore_custom_event",
name,
data,
run_id=run_id,
tags=self.tags,
metadata=self.metadata,
)
@override
async def on_retriever_start(
self,
serialized: dict[str, Any] | None,
query: str,
run_id: UUID | None = None,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> AsyncCallbackManagerForRetrieverRun:
"""Run when the retriever starts running.
Args:
serialized: The serialized retriever.
query: The query.
run_id: The ID of the run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
Returns:
The async callback manager for the retriever run.
"""
if run_id is None:
run_id = uuid7()
await ahandle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False, # noqa: FBT001,FBT002
inheritable_tags: list[str] | None = None,
local_tags: list[str] | None = None,
inheritable_metadata: dict[str, Any] | None = None,
local_metadata: dict[str, Any] | None = None,
) -> AsyncCallbackManager:
"""Configure the async callback manager.
Args:
inheritable_callbacks: The inheritable callbacks.
local_callbacks: The local callbacks.
verbose: Whether to enable verbose mode.
inheritable_tags: The inheritable tags.
local_tags: The local tags.
inheritable_metadata: The inheritable metadata.
local_metadata: The local metadata.
Returns:
The configured async callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
verbose=verbose,
)
class AsyncCallbackManagerForChainGroup(AsyncCallbackManager):
"""Async callback manager for the chain group."""
def __init__(
self,
handlers: list[BaseCallbackHandler],
inheritable_handlers: list[BaseCallbackHandler] | None = None,
parent_run_id: UUID | None = None,
*,
parent_run_manager: AsyncCallbackManagerForChainRun,
**kwargs: Any,
) -> None:
"""Initialize the async callback manager.
Args:
handlers: The list of handlers.
inheritable_handlers: The list of inheritable handlers.
parent_run_id: The ID of the parent run.
parent_run_manager: The parent run manager.
**kwargs: Additional keyword arguments.
"""
super().__init__(
handlers,
inheritable_handlers,
parent_run_id,
**kwargs,
)
self.parent_run_manager = parent_run_manager
self.ended = False
def copy(self) -> AsyncCallbackManagerForChainGroup:
"""Return a copy the async callback manager."""
return self.__class__(
handlers=self.handlers.copy(),
inheritable_handlers=self.inheritable_handlers.copy(),
parent_run_id=self.parent_run_id,
tags=self.tags.copy(),
inheritable_tags=self.inheritable_tags.copy(),
metadata=self.metadata.copy(),
inheritable_metadata=self.inheritable_metadata.copy(),
parent_run_manager=self.parent_run_manager,
)
def merge(
self: AsyncCallbackManagerForChainGroup, other: BaseCallbackManager
) -> AsyncCallbackManagerForChainGroup:
"""Merge the group callback manager with another callback manager.
Overwrites the merge method in the base class to ensure that the parent run
manager is preserved. Keeps the `parent_run_manager` from the current object.
Returns:
A copy of the current `AsyncCallbackManagerForChainGroup` with the handlers,
tags, etc. of the other callback manager merged in.
Example:
```python
# Merging two callback managers
from langchain_core.callbacks.manager import (
CallbackManager,
atrace_as_chain_group,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"])
async with atrace_as_chain_group(
"My Group Name", tags=["tag1"]
) as group_manager:
merged_manager = group_manager.merge(manager)
print(type(merged_manager))
# <class 'langchain_core.callbacks.manager.AsyncCallbackManagerForChainGroup'>
print(merged_manager.handlers)
# [
# <langchain_core.callbacks.stdout.LangChainTracer object at ...>,
# <langchain_core.callbacks.streaming_stdout.StdOutCallbackHandler object at ...>,
# ]
print(merged_manager.tags)
# ['tag2', 'tag1']
```
""" # noqa: E501
manager = self.__class__(
parent_run_id=self.parent_run_id or other.parent_run_id,
handlers=[],
inheritable_handlers=[],
tags=list(set(self.tags + other.tags)),
inheritable_tags=list(set(self.inheritable_tags + other.inheritable_tags)),
metadata={
**self.metadata,
**other.metadata,
},
parent_run_manager=self.parent_run_manager,
)
handlers = self.handlers + other.handlers
inheritable_handlers = self.inheritable_handlers + other.inheritable_handlers
for handler in handlers:
manager.add_handler(handler)
for handler in inheritable_handlers:
manager.add_handler(handler, inherit=True)
return manager
async def on_chain_end(self, outputs: dict[str, Any] | Any, **kwargs: Any) -> None:
"""Run when traced chain group ends.
Args:
outputs: The outputs of the chain.
**kwargs: Additional keyword arguments.
"""
self.ended = True
await self.parent_run_manager.on_chain_end(outputs, **kwargs)
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
"""
self.ended = True
await self.parent_run_manager.on_chain_error(error, **kwargs)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def _configure(
callback_manager_cls: type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
inheritable_tags: list[str] | None = None,
local_tags: list[str] | None = None,
inheritable_metadata: dict[str, Any] | None = None,
local_metadata: dict[str, Any] | None = None,
*,
verbose: bool = False,
) -> T:
"""Configure the callback manager.
Args:
callback_manager_cls: The callback manager class.
inheritable_callbacks: The inheritable callbacks.
local_callbacks: The local callbacks.
inheritable_tags: The inheritable tags.
local_tags: The local tags.
inheritable_metadata: The inheritable metadata.
local_metadata: The local metadata.
verbose: Whether to enable verbose mode.
Raises:
RuntimeError: If `LANGCHAIN_TRACING` is set but `LANGCHAIN_TRACING_V2` is not.
Returns:
The configured callback manager.
"""
tracing_context = get_tracing_context()
tracing_metadata = tracing_context["metadata"]
tracing_tags = tracing_context["tags"]
run_tree: Run | None = tracing_context["parent"]
parent_run_id = None if run_tree is None else run_tree.id
callback_manager = callback_manager_cls(
handlers=[],
parent_run_id=parent_run_id,
)
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
parent_run_id=parent_run_id,
)
else:
parent_run_id_ = inheritable_callbacks.parent_run_id
# Break ties between the external tracing context and inherited context
if parent_run_id is not None and (
parent_run_id_ is None
# If the LC parent has already been reflected
# in the run tree, we know the run_tree is either the
# same parent or a child of the parent.
or (run_tree and str(parent_run_id_) in run_tree.dotted_order)
):
parent_run_id_ = parent_run_id
# Otherwise, we assume the LC context has progressed
# beyond the run tree and we should not inherit the parent.
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers.copy(),
inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(),
parent_run_id=parent_run_id_,
tags=inheritable_callbacks.tags.copy(),
inheritable_tags=inheritable_callbacks.inheritable_tags.copy(),
metadata=inheritable_callbacks.metadata.copy(),
inheritable_metadata=inheritable_callbacks.inheritable_metadata.copy(),
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, inherit=False)
if inheritable_tags or local_tags:
callback_manager.add_tags(inheritable_tags or [])
callback_manager.add_tags(local_tags or [], inherit=False)
if inheritable_metadata or local_metadata:
callback_manager.add_metadata(inheritable_metadata or {})
callback_manager.add_metadata(local_metadata or {}, inherit=False)
if tracing_metadata:
callback_manager.add_metadata(tracing_metadata.copy())
if tracing_tags:
callback_manager.add_tags(tracing_tags.copy())
v1_tracing_enabled_ = env_var_is_set("LANGCHAIN_TRACING") or env_var_is_set(
"LANGCHAIN_HANDLER"
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = _tracing_v2_is_enabled()
if v1_tracing_enabled_ and not tracing_v2_enabled_:
# if both are enabled, can silently ignore the v1 tracer
msg = (
"Tracing using LangChainTracerV1 is no longer supported. "
"Please set the LANGCHAIN_TRACING_V2 environment variable to enable "
"tracing instead."
)
raise RuntimeError(msg)
tracer_project = _get_tracer_project()
debug = _get_debug()
if verbose or debug or tracing_v2_enabled_:
if verbose and not any(
isinstance(handler, StdOutCallbackHandler)
for handler in callback_manager.handlers
):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), inherit=False)
if debug and not any(
isinstance(handler, ConsoleCallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(ConsoleCallbackHandler())
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2)
else:
try:
handler = LangChainTracer(
project_name=tracer_project,
client=(
run_tree.client
if run_tree is not None
else tracing_context["client"]
),
tags=tracing_tags,
)
callback_manager.add_handler(handler)
except Exception as e:
logger.warning(
"Unable to load requested LangChainTracer."
" To disable this warning,"
" unset the LANGCHAIN_TRACING_V2 environment variables.\n"
"%s",
repr(e),
)
if run_tree is not None:
for handler in callback_manager.handlers:
if isinstance(handler, LangChainTracer):
handler.order_map[run_tree.id] = (
run_tree.trace_id,
run_tree.dotted_order,
)
handler.run_map[str(run_tree.id)] = run_tree
for var, inheritable, handler_class, env_var in _configure_hooks:
create_one = (
env_var is not None
and env_var_is_set(env_var)
and handler_class is not None
)
if var.get() is not None or create_one:
var_handler = (
var.get() or cast("type[BaseCallbackHandler]", handler_class)()
)
if handler_class is None:
if not any(
handler is var_handler # direct pointer comparison
for handler in callback_manager.handlers
):
callback_manager.add_handler(var_handler, inheritable)
elif not any(
isinstance(handler, handler_class)
for handler in callback_manager.handlers
):
callback_manager.add_handler(var_handler, inheritable)
return callback_manager
async def adispatch_custom_event(
name: str, data: Any, *, config: RunnableConfig | None = None
) -> None:
"""Dispatch an adhoc event to the handlers.
Args:
name: The name of the adhoc event.
data: The data for the adhoc event.
Free form data. Ideally should be JSON serializable to avoid serialization
issues downstream, but this is not enforced.
config: Optional config object.
Mirrors the async API but not strictly needed.
Raises:
RuntimeError: If there is no parent run ID available to associate the event
with.
Example:
```python
from langchain_core.callbacks import (
AsyncCallbackHandler,
adispatch_custom_event
)
from langchain_core.runnable import RunnableLambda
class CustomCallbackManager(AsyncCallbackHandler):
async def on_custom_event(
self,
name: str,
data: Any,
*,
run_id: UUID,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
print(f"Received custom event: {name} with data: {data}")
callback = CustomCallbackManager()
async def foo(inputs):
await adispatch_custom_event("my_event", {"bar": "buzz})
return inputs
foo_ = RunnableLambda(foo)
await foo_.ainvoke({"a": "1"}, {"callbacks": [CustomCallbackManager()]})
```
Example: Use with astream events
```python
from langchain_core.callbacks import (
AsyncCallbackHandler,
adispatch_custom_event
)
from langchain_core.runnable import RunnableLambda
class CustomCallbackManager(AsyncCallbackHandler):
async def on_custom_event(
self,
name: str,
data: Any,
*,
run_id: UUID,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
print(f"Received custom event: {name} with data: {data}")
callback = CustomCallbackManager()
async def foo(inputs):
await adispatch_custom_event("event_type_1", {"bar": "buzz})
await adispatch_custom_event("event_type_2", 5)
return inputs
foo_ = RunnableLambda(foo)
async for event in foo_.ainvoke_stream(
{"a": "1"},
version="v2",
config={"callbacks": [CustomCallbackManager()]}
):
print(event)
```
!!! warning
If using python 3.10 and async, you MUST specify the `config` parameter or the
function will raise an error. This is due to a limitation in asyncio for python
3.10 that prevents LangChain from automatically propagating the config object on
the user's behalf.
"""
# Import locally to prevent circular imports.
from langchain_core.runnables.config import ( # noqa: PLC0415
ensure_config,
get_async_callback_manager_for_config,
)
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
# We want to get the callback manager for the parent run.
# This is a work-around for now to be able to dispatch adhoc events from
# within a tool or a lambda and have the metadata events associated
# with the parent run rather than have a new run id generated for each.
if callback_manager.parent_run_id is None:
msg = (
"Unable to dispatch an adhoc event without a parent run id."
"This function can only be called from within an existing run (e.g.,"
"inside a tool or a RunnableLambda or a RunnableGenerator.)"
"If you are doing that and still seeing this error, try explicitly"
"passing the config parameter to this function."
)
raise RuntimeError(msg)
await callback_manager.on_custom_event(
name,
data,
run_id=callback_manager.parent_run_id,
)
def dispatch_custom_event(
name: str, data: Any, *, config: RunnableConfig | None = None
) -> None:
"""Dispatch an adhoc event.
Args:
name: The name of the adhoc event.
data: The data for the adhoc event.
Free form data. Ideally should be JSON serializable to avoid serialization
issues downstream, but this is not enforced.
config: Optional config object.
Mirrors the async API but not strictly needed.
Raises:
RuntimeError: If there is no parent run ID available to associate the event
with.
Example:
```python
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.callbacks import dispatch_custom_event
from langchain_core.runnable import RunnableLambda
class CustomCallbackManager(BaseCallbackHandler):
def on_custom_event(
self,
name: str,
data: Any,
*,
run_id: UUID,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
print(f"Received custom event: {name} with data: {data}")
def foo(inputs):
dispatch_custom_event("my_event", {"bar": "buzz})
return inputs
foo_ = RunnableLambda(foo)
foo_.invoke({"a": "1"}, {"callbacks": [CustomCallbackManager()]})
```
"""
# Import locally to prevent circular imports.
from langchain_core.runnables.config import ( # noqa: PLC0415
ensure_config,
get_callback_manager_for_config,
)
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
# We want to get the callback manager for the parent run.
# This is a work-around for now to be able to dispatch adhoc events from
# within a tool or a lambda and have the metadata events associated
# with the parent run rather than have a new run id generated for each.
if callback_manager.parent_run_id is None:
msg = (
"Unable to dispatch an adhoc event without a parent run id."
"This function can only be called from within an existing run (e.g.,"
"inside a tool or a RunnableLambda or a RunnableGenerator.)"
"If you are doing that and still seeing this error, try explicitly"
"passing the config parameter to this function."
)
raise RuntimeError(msg)
callback_manager.on_custom_event(
name,
data,
run_id=callback_manager.parent_run_id,
)
@functools.lru_cache(maxsize=1)
def _executor() -> ThreadPoolExecutor:
# If the user is specifying ASYNC callback handlers to be run from a
# SYNC context, and an event loop is already running,
# we cannot submit the coroutine to the running loop, because it
# would result in a deadlock. Instead we have to schedule them
# on a background thread. To avoid creating & shutting down
# a new executor every time, we use a lazily-created, shared
# executor. If you're using regular langgchain parallelism (batch, etc.)
# you'd only ever need 1 worker, but we permit more for now to reduce the chance
# of slowdown if you are mixing with your own executor.
cutie = ThreadPoolExecutor(max_workers=10)
atexit.register(cutie.shutdown, wait=True)
return cutie
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/core/langchain_core/callbacks/manager.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from future import Future
from object_store import ObjectStore
class CacheChainObjectStore(ObjectStore):
'''Maintains an in-memory cache along with a chain of other object stores to
try for the same keys. This is useful for implementing a multi-layered cache.
The in-memory cache is inbuilt since it's synchronous, but the object store
interface is asynchronous.
The rules for the object store chain are:
- When setting (or deleting) items, all object stores in the hierarcy will
have that item set.
- When getting items, the behaviour depends on |start_empty|.
- If false, each object store is tried in order. The first object
store to find the item will trickle back up, setting it on all object
stores higher in the hierarchy.
- If true, only the first in-memory cache is checked, as though the store
had been initialized with no content as opposed to the union of its
delegate stores.
'''
def __init__(self, object_stores, start_empty=False):
self._object_stores = object_stores
self._start_empty = start_empty
self._cache = {}
def SetMulti(self, mapping):
self._cache.update(mapping)
for object_store in self._object_stores:
object_store.SetMulti(mapping)
def GetMulti(self, keys):
missing_keys = list(keys)
cached_items = {}
for key in keys:
if key in self._cache:
cached_items[key] = self._cache.get(key)
missing_keys.remove(key)
if len(missing_keys) == 0 or self._start_empty:
return Future(value=cached_items)
object_store_futures = [(object_store, object_store.GetMulti(missing_keys))
for object_store in self._object_stores]
def resolve():
# Approach:
#
# Try each object store in order, until there are no more missing keys.
# Don't realise the Future value of an object store that we don't need to;
# this is important e.g. to avoid querying data store constantly.
#
# When a value is found, cache it in all object stores further up the
# chain, including the object-based cache on CacheChainObjectStore.
object_store_updates = []
for object_store, object_store_future in object_store_futures:
if len(missing_keys) == 0:
break
result = object_store_future.Get()
for k, v in result.items(): # use items(); changes during iteration
if v is None or k not in missing_keys:
del result[k]
continue
self._cache[k] = v
cached_items[k] = v
missing_keys.remove(k)
for _, updates in object_store_updates:
updates.update(result)
object_store_updates.append((object_store, {}))
# Update the caches of all object stores that need it.
for object_store, updates in object_store_updates:
if updates:
object_store.SetMulti(updates)
return cached_items
return Future(callback=resolve)
def DelMulti(self, keys):
for k in keys:
self._cache.pop(k, None)
for object_store in self._object_stores:
object_store.DelMulti(keys)
|
unknown
|
codeparrot/codeparrot-clean
| ||
package client
import (
"fmt"
"net/http"
"testing"
cerrdefs "github.com/containerd/errdefs"
"github.com/moby/moby/api/types/swarm"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestConfigListError(t *testing.T) {
client, err := New(
WithMockClient(errorMock(http.StatusInternalServerError, "Server error")),
)
assert.NilError(t, err)
_, err = client.ConfigList(t.Context(), ConfigListOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
}
func TestConfigList(t *testing.T) {
const expectedURL = "/configs"
listCases := []struct {
options ConfigListOptions
expectedQueryParams map[string]string
}{
{
options: ConfigListOptions{},
expectedQueryParams: map[string]string{
"filters": "",
},
},
{
options: ConfigListOptions{
Filters: make(Filters).
Add("label", "label1").
Add("label", "label2"),
},
expectedQueryParams: map[string]string{
"filters": `{"label":{"label1":true,"label2":true}}`,
},
},
}
for _, listCase := range listCases {
client, err := New(
WithMockClient(func(req *http.Request) (*http.Response, error) {
if err := assertRequest(req, http.MethodGet, expectedURL); err != nil {
return nil, err
}
query := req.URL.Query()
for key, expected := range listCase.expectedQueryParams {
actual := query.Get(key)
if actual != expected {
return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual)
}
}
return mockJSONResponse(http.StatusOK, nil, []swarm.Config{
{ID: "config_id1"},
{ID: "config_id2"},
})(req)
}),
)
assert.NilError(t, err)
result, err := client.ConfigList(t.Context(), listCase.options)
assert.NilError(t, err)
assert.Check(t, is.Len(result.Items, 2))
}
}
|
go
|
github
|
https://github.com/moby/moby
|
client/config_list_test.go
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import json
import jinja2
import regex
def parse(lines, highlight_words, filters, objref_dict):
"""
Given filters returns indices of wanted lines from log
Args:
lines: array of log lines
highlight_words: array of words that need to be bolded
filters: dictionary of which filters to apply
objref_dict: a dictionary where the keys are possible filters
and the values are the words to be highlighted
Returns:
matched_lines: ordered array of indices of lines to display
highlight_words: updated highlight_words
"""
matched_lines = []
if not filters["pod"] and objref_dict:
highlight_words = []
# If the filter is on, look for it in the objref_dict
for k in filters:
if k != "pod" and filters[k] and k in objref_dict:
highlight_words.append(objref_dict[k])
words_re = regex.combine_wordsRE(highlight_words)
for n, line in enumerate(lines):
if words_re.search(line):
matched_lines.append(n)
return matched_lines, highlight_words
def make_dict(data, pod_re, objref_dict):
"""
Given the log file and the failed pod name, returns a dictionary
containing the namespace, UID, and other information associated with the pod
and a bool indicating if the pod name string is in the log file.
This dictionary is lifted from the line with the ObjectReference
"""
pod_in_file = False
lines = unicode(jinja2.escape(data)).split('\n')
for line in lines:
if pod_re.search(line):
pod_in_file = True
objref = regex.objref(line)
containerID = regex.containerID(line)
if containerID and not objref_dict.get("ContainerID"):
objref_dict["ContainerID"] = containerID.group(1)
if objref:
objref_dict_re = objref.group(1)
objref_dict_re = re.sub(r'(\w+):', r'"\1": ', objref_dict_re)
objref_dict_re = objref_dict_re.replace('"', '"')
objref_dict_re = json.loads(objref_dict_re)
objref_dict_re.update(objref_dict)
return objref_dict_re, pod_in_file
return objref_dict, pod_in_file
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
A ModuleStore that knows about a special version DRAFT. Modules
marked as DRAFT are read in preference to modules without the DRAFT
version by this ModuleStore (so, access to i4x://org/course/cat/name
returns the i4x://org/course/cat/name@draft object if that exists,
and otherwise returns i4x://org/course/cat/name).
"""
import pymongo
import logging
from opaque_keys.edx.locations import Location
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore import PublishState, ModuleStoreEnum
from xmodule.modulestore.exceptions import (
ItemNotFoundError, DuplicateItemError, InvalidBranchSetting, DuplicateCourseError
)
from xmodule.modulestore.mongo.base import (
MongoModuleStore, MongoRevisionKey, as_draft, as_published,
SORT_REVISION_FAVOR_DRAFT
)
from xmodule.modulestore.store_utilities import rewrite_nonportable_content_links
from xmodule.modulestore.draft_and_published import UnsupportedRevisionError, DIRECT_ONLY_CATEGORIES
log = logging.getLogger(__name__)
def wrap_draft(item):
"""
Cleans the item's location and sets the `is_draft` attribute if needed.
Sets `item.is_draft` to `True` if the item is DRAFT, and `False` otherwise.
Sets the item's location to the non-draft location in either case.
"""
setattr(item, 'is_draft', item.location.revision == MongoRevisionKey.draft)
item.location = item.location.replace(revision=MongoRevisionKey.published)
return item
class DraftModuleStore(MongoModuleStore):
"""
This mixin modifies a modulestore to give it draft semantics.
Edits made to units are stored to locations that have the revision DRAFT.
Reads are first read with revision DRAFT, and then fall back
to the baseline revision only if DRAFT doesn't exist.
This module also includes functionality to promote DRAFT modules (and their children)
to published modules.
"""
def get_item(self, usage_key, depth=0, revision=None):
"""
Returns an XModuleDescriptor instance for the item at usage_key.
Args:
usage_key: A :class:`.UsageKey` instance
depth (int): An argument that some module stores may use to prefetch
descendants of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendants.
revision:
ModuleStoreEnum.RevisionOption.published_only - returns only the published item.
ModuleStoreEnum.RevisionOption.draft_only - returns only the draft item.
None - uses the branch setting as follows:
if branch setting is ModuleStoreEnum.Branch.published_only, returns only the published item.
if branch setting is ModuleStoreEnum.Branch.draft_preferred, returns either draft or published item,
preferring draft.
Note: If the item is in DIRECT_ONLY_CATEGORIES, then returns only the PUBLISHED
version regardless of the revision.
Raises:
xmodule.modulestore.exceptions.InsufficientSpecificationError
if any segment of the usage_key is None except revision
xmodule.modulestore.exceptions.ItemNotFoundError if no object
is found at that usage_key
"""
def get_published():
return wrap_draft(super(DraftModuleStore, self).get_item(usage_key, depth=depth))
def get_draft():
return wrap_draft(super(DraftModuleStore, self).get_item(as_draft(usage_key), depth=depth))
# return the published version if ModuleStoreEnum.RevisionOption.published_only is requested
if revision == ModuleStoreEnum.RevisionOption.published_only:
return get_published()
# if the item is direct-only, there can only be a published version
elif usage_key.category in DIRECT_ONLY_CATEGORIES:
return get_published()
# return the draft version (without any fallback to PUBLISHED) if DRAFT-ONLY is requested
elif revision == ModuleStoreEnum.RevisionOption.draft_only:
return get_draft()
elif self.get_branch_setting() == ModuleStoreEnum.Branch.published_only:
return get_published()
elif revision is None:
# could use a single query wildcarding revision and sorting by revision. would need to
# use prefix form of to_deprecated_son
try:
# first check for a draft version
return get_draft()
except ItemNotFoundError:
# otherwise, fall back to the published version
return get_published()
else:
raise UnsupportedRevisionError()
def has_item(self, usage_key, revision=None):
"""
Returns True if location exists in this ModuleStore.
Args:
revision:
ModuleStoreEnum.RevisionOption.published_only - checks for the published item only
ModuleStoreEnum.RevisionOption.draft_only - checks for the draft item only
None - uses the branch setting, as follows:
if branch setting is ModuleStoreEnum.Branch.published_only, checks for the published item only
if branch setting is ModuleStoreEnum.Branch.draft_preferred, checks whether draft or published item exists
"""
def has_published():
return super(DraftModuleStore, self).has_item(usage_key)
def has_draft():
return super(DraftModuleStore, self).has_item(as_draft(usage_key))
if revision == ModuleStoreEnum.RevisionOption.draft_only:
return has_draft()
elif (
revision == ModuleStoreEnum.RevisionOption.published_only or
self.get_branch_setting() == ModuleStoreEnum.Branch.published_only
):
return has_published()
elif revision is None:
key = usage_key.to_deprecated_son(prefix='_id.')
del key['_id.revision']
return self.collection.find(key).count() > 0
else:
raise UnsupportedRevisionError()
def delete_course(self, course_key, user_id):
"""
:param course_key: which course to delete
:param user_id: id of the user deleting the course
"""
# delete the assets
super(DraftModuleStore, self).delete_course(course_key, user_id)
# delete all of the db records for the course
course_query = self._course_key_to_son(course_key)
self.collection.remove(course_query, multi=True)
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None):
"""
Only called if cloning within this store or if env doesn't set up mixed.
* copy the courseware
"""
# check to see if the source course is actually there
if not self.has_course(source_course_id):
raise ItemNotFoundError("Cannot find a course at {0}. Aborting".format(source_course_id))
# verify that the dest_location really is an empty course
# b/c we don't want the payload, I'm copying the guts of get_items here
query = self._course_key_to_son(dest_course_id)
query['_id.category'] = {'$nin': ['course', 'about']}
if self.collection.find(query).limit(1).count() > 0:
raise DuplicateCourseError(
dest_course_id,
"Course at destination {0} is not an empty course. You can only clone into an empty course. Aborting...".format(
dest_course_id
)
)
# clone the assets
super(DraftModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields)
# get the whole old course
new_course = self.get_course(dest_course_id)
if new_course is None:
# create_course creates the about overview
new_course = self.create_course(
dest_course_id.org, dest_course_id.course, dest_course_id.run, user_id, fields=fields
)
else:
# update fields on existing course
for key, value in fields.iteritems():
setattr(new_course, key, value)
self.update_item(new_course, user_id)
# Get all modules under this namespace which is (tag, org, course) tuple
modules = self.get_items(source_course_id, revision=ModuleStoreEnum.RevisionOption.published_only)
self._clone_modules(modules, dest_course_id, user_id)
course_location = dest_course_id.make_usage_key('course', dest_course_id.run)
self.publish(course_location, user_id)
modules = self.get_items(source_course_id, revision=ModuleStoreEnum.RevisionOption.draft_only)
self._clone_modules(modules, dest_course_id, user_id)
return True
def _clone_modules(self, modules, dest_course_id, user_id):
"""Clones each module into the given course"""
for module in modules:
original_loc = module.location
module.location = module.location.map_into_course(dest_course_id)
if module.location.category == 'course':
module.location = module.location.replace(name=module.location.run)
log.info("Cloning module %s to %s....", original_loc, module.location)
if 'data' in module.fields and module.fields['data'].is_set_on(module) and isinstance(module.data, basestring):
module.data = rewrite_nonportable_content_links(
original_loc.course_key, dest_course_id, module.data
)
# repoint children
if module.has_children:
new_children = []
for child_loc in module.children:
child_loc = child_loc.map_into_course(dest_course_id)
new_children.append(child_loc)
module.children = new_children
self.update_item(module, user_id, allow_not_found=True)
def _get_raw_parent_locations(self, location, key_revision):
"""
Get the parents but don't unset the revision in their locations.
Intended for internal use but not restricted.
Args:
location (UsageKey): assumes the location's revision is None; so, uses revision keyword solely
key_revision:
MongoRevisionKey.draft - return only the draft parent
MongoRevisionKey.published - return only the published parent
ModuleStoreEnum.RevisionOption.all - return both draft and published parents
"""
_verify_revision_is_published(location)
# create a query to find all items in the course that have the given location listed as a child
query = self._course_key_to_son(location.course_key)
query['definition.children'] = location.to_deprecated_string()
# find all the items that satisfy the query
parents = self.collection.find(query, {'_id': True}, sort=[SORT_REVISION_FAVOR_DRAFT])
# return only the parent(s) that satisfy the request
return [
Location._from_deprecated_son(parent['_id'], location.course_key.run)
for parent in parents
if (
# return all versions of the parent if revision is ModuleStoreEnum.RevisionOption.all
key_revision == ModuleStoreEnum.RevisionOption.all or
# return this parent if it's direct-only, regardless of which revision is requested
parent['_id']['category'] in DIRECT_ONLY_CATEGORIES or
# return this parent only if its revision matches the requested one
parent['_id']['revision'] == key_revision
)
]
def get_parent_location(self, location, revision=None, **kwargs):
'''
Returns the given location's parent location in this course.
Returns: version agnostic locations (revision always None) as per the rest of mongo.
Args:
revision:
None - uses the branch setting for the revision
ModuleStoreEnum.RevisionOption.published_only
- return only the PUBLISHED parent if it exists, else returns None
ModuleStoreEnum.RevisionOption.draft_preferred
- return either the DRAFT or PUBLISHED parent, preferring DRAFT, if parent(s) exists,
else returns None
If the draft has a different parent than the published, it returns only
the draft's parent. Because parents don't record their children's revisions, this
is actually a potentially fragile deduction based on parent type. If the parent type
is not DIRECT_ONLY, then the parent revision must be DRAFT.
Only xml_exporter currently uses this argument. Others should avoid it.
'''
if revision is None:
revision = ModuleStoreEnum.RevisionOption.published_only \
if self.get_branch_setting() == ModuleStoreEnum.Branch.published_only \
else ModuleStoreEnum.RevisionOption.draft_preferred
return super(DraftModuleStore, self).get_parent_location(location, revision, **kwargs)
def create_xmodule(self, location, definition_data=None, metadata=None, runtime=None, fields={}, **kwargs):
"""
Create the new xmodule but don't save it. Returns the new module with a draft locator if
the category allows drafts. If the category does not allow drafts, just creates a published module.
:param location: a Location--must have a category
:param definition_data: can be empty. The initial definition_data for the kvs
:param metadata: can be empty, the initial metadata for the kvs
:param runtime: if you already have an xmodule from the course, the xmodule.runtime value
:param fields: a dictionary of field names and values for the new xmodule
"""
self._verify_branch_setting(ModuleStoreEnum.Branch.draft_preferred)
if location.category not in DIRECT_ONLY_CATEGORIES:
location = as_draft(location)
return wrap_draft(
super(DraftModuleStore, self).create_xmodule(location, definition_data, metadata, runtime, fields)
)
def get_items(self, course_key, revision=None, **kwargs):
"""
Performance Note: This is generally a costly operation, but useful for wildcard searches.
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_key
NOTE: don't use this to look for courses as the course_key is required. Use get_courses instead.
Args:
course_key (CourseKey): the course identifier
revision:
ModuleStoreEnum.RevisionOption.published_only - returns only Published items
ModuleStoreEnum.RevisionOption.draft_only - returns only Draft items
None - uses the branch setting, as follows:
if the branch setting is ModuleStoreEnum.Branch.published_only,
returns only Published items
if the branch setting is ModuleStoreEnum.Branch.draft_preferred,
returns either Draft or Published, preferring Draft items.
kwargs (key=value): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
Substring matching pass a regex object.
``name`` is another commonly provided key (Location based stores)
"""
def base_get_items(key_revision):
return super(DraftModuleStore, self).get_items(course_key, key_revision=key_revision, **kwargs)
def draft_items():
return [wrap_draft(item) for item in base_get_items(MongoRevisionKey.draft)]
def published_items(draft_items):
# filters out items that are not already in draft_items
draft_items_locations = {item.location for item in draft_items}
return [
item for item in
base_get_items(MongoRevisionKey.published)
if item.location not in draft_items_locations
]
if revision == ModuleStoreEnum.RevisionOption.draft_only:
return draft_items()
elif revision == ModuleStoreEnum.RevisionOption.published_only \
or self.get_branch_setting() == ModuleStoreEnum.Branch.published_only:
return published_items([])
elif revision is None:
draft_items = draft_items()
return draft_items + published_items(draft_items)
else:
raise UnsupportedRevisionError()
def convert_to_draft(self, location, user_id):
"""
Copy the subtree rooted at source_location and mark the copies as draft.
Args:
location: the location of the source (its revision must be None)
user_id: the ID of the user doing the operation
Raises:
InvalidVersionError: if the source can not be made into a draft
ItemNotFoundError: if the source does not exist
"""
# TODO (dhm) I don't think this needs to recurse anymore but can convert each unit on demand.
# See if that's true.
# delegating to internal b/c we don't want any public user to use the kwargs on the internal
self._convert_to_draft(location, user_id, ignore_if_draft=True)
# return the new draft item (does another fetch)
# get_item will wrap_draft so don't call it here (otherwise, it would override the is_draft attribute)
return self.get_item(location)
def _convert_to_draft(self, location, user_id, delete_published=False, ignore_if_draft=False):
"""
Internal method with additional internal parameters to convert a subtree to draft.
Args:
location: the location of the source (its revision must be MongoRevisionKey.published)
user_id: the ID of the user doing the operation
delete_published (Boolean): intended for use by unpublish
ignore_if_draft(Boolean): for internal use only as part of depth first change
Raises:
InvalidVersionError: if the source can not be made into a draft
ItemNotFoundError: if the source does not exist
DuplicateItemError: if the source or any of its descendants already has a draft copy. Only
useful for unpublish b/c we don't want unpublish to overwrite any existing drafts.
"""
# verify input conditions
self._verify_branch_setting(ModuleStoreEnum.Branch.draft_preferred)
_verify_revision_is_published(location)
# ensure we are not creating a DRAFT of an item that is direct-only
if location.category in DIRECT_ONLY_CATEGORIES:
raise InvalidVersionError(location)
def convert_item(item, to_be_deleted):
"""
Convert the subtree
"""
# collect the children's ids for future processing
next_tier = []
for child in item.get('definition', {}).get('children', []):
child_loc = Location.from_deprecated_string(child)
next_tier.append(child_loc.to_deprecated_son())
# insert a new DRAFT version of the item
item['_id']['revision'] = MongoRevisionKey.draft
# ensure keys are in fixed and right order before inserting
item['_id'] = self._id_dict_to_son(item['_id'])
try:
self.collection.insert(item)
except pymongo.errors.DuplicateKeyError:
# prevent re-creation of DRAFT versions, unless explicitly requested to ignore
if not ignore_if_draft:
raise DuplicateItemError(item['_id'], self, 'collection')
# delete the old PUBLISHED version if requested
if delete_published:
item['_id']['revision'] = MongoRevisionKey.published
to_be_deleted.append(item['_id'])
return next_tier
# convert the subtree using the original item as the root
self._breadth_first(convert_item, [location])
def update_item(self, xblock, user_id, allow_not_found=False, force=False, isPublish=False):
"""
See superclass doc.
In addition to the superclass's behavior, this method converts the unit to draft if it's not
direct-only and not already draft.
"""
self._verify_branch_setting(ModuleStoreEnum.Branch.draft_preferred)
# if the xblock is direct-only, update the PUBLISHED version
if xblock.location.category in DIRECT_ONLY_CATEGORIES:
return super(DraftModuleStore, self).update_item(xblock, user_id, allow_not_found)
draft_loc = as_draft(xblock.location)
if not super(DraftModuleStore, self).has_item(draft_loc):
try:
# ignore any descendants which are already draft
self._convert_to_draft(xblock.location, user_id, ignore_if_draft=True)
except ItemNotFoundError as exception:
# ignore the exception only if allow_not_found is True and
# the item that wasn't found is the one that was passed in
# we make this extra location check so we do not hide errors when converting any children to draft
if not (allow_not_found and exception.args[0] == xblock.location):
raise
xblock.location = draft_loc
super(DraftModuleStore, self).update_item(xblock, user_id, allow_not_found, isPublish=isPublish)
return wrap_draft(xblock)
def delete_item(self, location, user_id, revision=None, **kwargs):
"""
Delete an item from this modulestore.
The method determines which revisions to delete. It disconnects and deletes the subtree.
In general, it assumes deletes only occur on drafts except for direct_only. The only exceptions
are internal calls like deleting orphans (during publishing as well as from delete_orphan view).
To indicate that all versions should be deleted, pass the keyword revision=ModuleStoreEnum.RevisionOption.all.
* Deleting a DIRECT_ONLY_CATEGORIES block, deletes both draft and published children and removes from parent.
* Deleting a specific version of block whose parent is of DIRECT_ONLY_CATEGORIES, only removes it from parent if
the other version of the block does not exist. Deletes only children of same version.
* Other deletions remove from parent of same version and subtree of same version
Args:
location: UsageKey of the item to be deleted
user_id: id of the user deleting the item
revision:
None - deletes the item and its subtree, and updates the parents per description above
ModuleStoreEnum.RevisionOption.published_only - removes only Published versions
ModuleStoreEnum.RevisionOption.all - removes both Draft and Published parents
currently only provided by contentstore.views.item.orphan_handler
Otherwise, raises a ValueError.
"""
self._verify_branch_setting(ModuleStoreEnum.Branch.draft_preferred)
_verify_revision_is_published(location)
is_item_direct_only = location.category in DIRECT_ONLY_CATEGORIES
if is_item_direct_only or revision == ModuleStoreEnum.RevisionOption.published_only:
parent_revision = MongoRevisionKey.published
elif revision == ModuleStoreEnum.RevisionOption.all:
parent_revision = ModuleStoreEnum.RevisionOption.all
else:
parent_revision = MongoRevisionKey.draft
# remove subtree from its parent
parent_locations = self._get_raw_parent_locations(location, key_revision=parent_revision)
# if no parents, then we're trying to delete something which we should convert to draft
if not parent_locations:
# find the published parent, convert it to draft, then manipulate the draft
parent_locations = self._get_raw_parent_locations(location, key_revision=MongoRevisionKey.published)
# parent_locations will still be empty if the object was an orphan
if parent_locations:
draft_parent = self.convert_to_draft(parent_locations[0], user_id)
parent_locations = [draft_parent.location]
# there could be 2 parents if
# Case 1: the draft item moved from one parent to another
# Case 2: revision==ModuleStoreEnum.RevisionOption.all and the single parent has 2 versions: draft and published
for parent_location in parent_locations:
# don't remove from direct_only parent if other versions of this still exists (this code
# assumes that there's only one parent_location in this case)
if not is_item_direct_only and parent_location.category in DIRECT_ONLY_CATEGORIES:
# see if other version of to-be-deleted root exists
query = location.to_deprecated_son(prefix='_id.')
del query['_id.revision']
if self.collection.find(query).count() > 1:
continue
parent_block = super(DraftModuleStore, self).get_item(parent_location)
parent_block.children.remove(location)
parent_block.location = parent_location # ensure the location is with the correct revision
self.update_item(parent_block, user_id)
if is_item_direct_only or revision == ModuleStoreEnum.RevisionOption.all:
as_functions = [as_draft, as_published]
elif revision == ModuleStoreEnum.RevisionOption.published_only:
as_functions = [as_published]
elif revision is None:
as_functions = [as_draft]
else:
raise UnsupportedRevisionError(
[
None,
ModuleStoreEnum.RevisionOption.published_only,
ModuleStoreEnum.RevisionOption.all
]
)
self._delete_subtree(location, as_functions)
def _delete_subtree(self, location, as_functions):
"""
Internal method for deleting all of the subtree whose revisions match the as_functions
"""
course_key = location.course_key
def _delete_item(current_entry, to_be_deleted):
"""
Depth first deletion of nodes
"""
to_be_deleted.append(self._id_dict_to_son(current_entry['_id']))
next_tier = []
for child_loc in current_entry.get('definition', {}).get('children', []):
child_loc = course_key.make_usage_key_from_deprecated_string(child_loc)
for rev_func in as_functions:
current_loc = rev_func(child_loc)
current_son = current_loc.to_deprecated_son()
next_tier.append(current_son)
return next_tier
first_tier = [as_func(location) for as_func in as_functions]
self._breadth_first(_delete_item, first_tier)
# recompute (and update) the metadata inheritance tree which is cached
self.refresh_cached_metadata_inheritance_tree(location.course_key)
def _breadth_first(self, function, root_usages):
"""
Get the root_usage from the db and do a depth first scan. Call the function on each. The
function should return a list of SON for any next tier items to process and should
add the SON for any items to delete to the to_be_deleted array.
At the end, it mass deletes the to_be_deleted items and refreshes the cached metadata inheritance
tree.
:param function: a function taking (item, to_be_deleted) and returning [SON] for next_tier invocation
:param root_usages: the usage keys for the root items (ensure they have the right revision set)
"""
if len(root_usages) == 0:
return
to_be_deleted = []
def _internal(tier):
next_tier = []
tier_items = self.collection.find({'_id': {'$in': tier}})
for current_entry in tier_items:
next_tier.extend(function(current_entry, to_be_deleted))
if len(next_tier) > 0:
_internal(next_tier)
_internal([root_usage.to_deprecated_son() for root_usage in root_usages])
self.collection.remove({'_id': {'$in': to_be_deleted}}, safe=self.collection.safe)
def has_changes(self, xblock):
"""
Check if the xblock or its children have been changed since the last publish.
:param xblock: xblock to check
:return: True if the draft and published versions differ
"""
# don't check children if this block has changes (is not public)
if self.compute_publish_state(xblock) != PublishState.public:
return True
# if this block doesn't have changes, then check its children
elif xblock.has_children:
return any([self.has_changes(child) for child in xblock.get_children()])
# otherwise there are no changes
else:
return False
def publish(self, location, user_id):
"""
Publish the subtree rooted at location to the live course and remove the drafts.
Such publishing may cause the deletion of previously published but subsequently deleted
child trees. Overwrites any existing published xblocks from the subtree.
Treats the publishing of non-draftable items as merely a subtree selection from
which to descend.
Raises:
ItemNotFoundError: if any of the draft subtree nodes aren't found
"""
# NOTE: cannot easily use self._breadth_first b/c need to get pub'd and draft as pairs
# (could do it by having 2 breadth first scans, the first to just get all published children
# and the second to do the publishing on the drafts looking for the published in the cached
# list of published ones.)
to_be_deleted = []
def _internal_depth_first(item_location, is_root):
"""
Depth first publishing from the given location
"""
try:
# handle child does not exist w/o killing publish
item = self.get_item(item_location)
except ItemNotFoundError:
log.warning('Cannot find: %s', item_location)
return
# publish the children first
if item.has_children:
for child_loc in item.children:
_internal_depth_first(child_loc, False)
if item_location.category in DIRECT_ONLY_CATEGORIES or not getattr(item, 'is_draft', False):
# ignore noop attempt to publish something that can't be or isn't currently draft
return
# try to find the originally PUBLISHED version, if it exists
try:
original_published = super(DraftModuleStore, self).get_item(item_location)
except ItemNotFoundError:
original_published = None
# if the category of this item allows having children
if item.has_children:
if original_published is not None:
# see if previously published children were deleted. 2 reasons for children lists to differ:
# Case 1: child deleted
# Case 2: child moved
for orig_child in original_published.children:
if orig_child not in item.children:
published_parent = self.get_parent_location(orig_child)
if published_parent == item_location:
# Case 1: child was deleted in draft parent item
# So, delete published version of the child now that we're publishing the draft parent
self._delete_subtree(orig_child, [as_published])
else:
# Case 2: child was moved to a new draft parent item
# So, do not delete the child. It will be published when the new parent is published.
pass
super(DraftModuleStore, self).update_item(item, user_id, isPublish=True, is_publish_root=is_root)
to_be_deleted.append(as_draft(item_location).to_deprecated_son())
# verify input conditions
self._verify_branch_setting(ModuleStoreEnum.Branch.draft_preferred)
_verify_revision_is_published(location)
_internal_depth_first(location, True)
if len(to_be_deleted) > 0:
self.collection.remove({'_id': {'$in': to_be_deleted}})
return self.get_item(as_published(location))
def unpublish(self, location, user_id):
"""
Turn the published version into a draft, removing the published version.
NOTE: unlike publish, this gives an error if called above the draftable level as it's intended
to remove things from the published version
"""
self._verify_branch_setting(ModuleStoreEnum.Branch.draft_preferred)
self._convert_to_draft(location, user_id, delete_published=True)
def revert_to_published(self, location, user_id=None):
"""
Reverts an item to its last published version (recursively traversing all of its descendants).
If no published version exists, a VersionConflictError is thrown.
If a published version exists but there is no draft version of this item or any of its descendants, this
method is a no-op. It is also a no-op if the root item is in DIRECT_ONLY_CATEGORIES.
:raises InvalidVersionError: if no published version exists for the location specified
"""
self._verify_branch_setting(ModuleStoreEnum.Branch.draft_preferred)
_verify_revision_is_published(location)
if location.category in DIRECT_ONLY_CATEGORIES:
return
if not self.has_item(location, revision=ModuleStoreEnum.RevisionOption.published_only):
raise InvalidVersionError(location)
def delete_draft_only(root_location):
"""
Helper function that calls delete on the specified location if a draft version of the item exists.
If no draft exists, this function recursively calls itself on the children of the item.
"""
query = root_location.to_deprecated_son(prefix='_id.')
del query['_id.revision']
versions_found = self.collection.find(
query, {'_id': True, 'definition.children': True}, sort=[SORT_REVISION_FAVOR_DRAFT]
)
# If 2 versions versions exist, we can assume one is a published version. Go ahead and do the delete
# of the draft version.
if versions_found.count() > 1:
self._delete_subtree(root_location, [as_draft])
elif versions_found.count() == 1:
# Since this method cannot be called on something in DIRECT_ONLY_CATEGORIES and we call
# delete_subtree as soon as we find an item with a draft version, if there is only 1 version
# it must be published (since adding a child to a published item creates a draft of the parent).
item = versions_found[0]
assert item.get('_id').get('revision') != MongoRevisionKey.draft
for child in item.get('definition', {}).get('children', []):
child_loc = Location.from_deprecated_string(child)
delete_draft_only(child_loc)
delete_draft_only(location)
def _query_children_for_cache_children(self, course_key, items):
# first get non-draft in a round-trip
to_process_non_drafts = super(DraftModuleStore, self)._query_children_for_cache_children(course_key, items)
to_process_dict = {}
for non_draft in to_process_non_drafts:
to_process_dict[Location._from_deprecated_son(non_draft["_id"], course_key.run)] = non_draft
if self.get_branch_setting() == ModuleStoreEnum.Branch.draft_preferred:
# now query all draft content in another round-trip
query = []
for item in items:
item_usage_key = course_key.make_usage_key_from_deprecated_string(item)
if item_usage_key.category not in DIRECT_ONLY_CATEGORIES:
query.append(as_draft(item_usage_key).to_deprecated_son())
if query:
query = {'_id': {'$in': query}}
to_process_drafts = list(self.collection.find(query))
# now we have to go through all drafts and replace the non-draft
# with the draft. This is because the semantics of the DraftStore is to
# always return the draft - if available
for draft in to_process_drafts:
draft_loc = Location._from_deprecated_son(draft["_id"], course_key.run)
draft_as_non_draft_loc = as_published(draft_loc)
# does non-draft exist in the collection
# if so, replace it
if draft_as_non_draft_loc in to_process_dict:
to_process_dict[draft_as_non_draft_loc] = draft
# convert the dict - which is used for look ups - back into a list
queried_children = to_process_dict.values()
return queried_children
def compute_publish_state(self, xblock):
"""
Returns whether this xblock is draft, public, or private.
Returns:
PublishState.draft - content is in the process of being edited, but still has a previous
version deployed to LMS
PublishState.public - content is locked and deployed to LMS
PublishState.private - content is editable and not deployed to LMS
"""
if getattr(xblock, 'is_draft', False):
published_xblock_location = as_published(xblock.location)
try:
xblock.runtime.lookup_item(published_xblock_location)
except ItemNotFoundError:
return PublishState.private
return PublishState.draft
else:
return PublishState.public
def _verify_branch_setting(self, expected_branch_setting):
"""
Raises an exception if the current branch setting does not match the expected branch setting.
"""
actual_branch_setting = self.get_branch_setting()
if actual_branch_setting != expected_branch_setting:
raise InvalidBranchSetting(
expected_setting=expected_branch_setting,
actual_setting=actual_branch_setting
)
def _verify_revision_is_published(location):
"""
Asserts that the revision set on the given location is MongoRevisionKey.published
"""
assert location.revision == MongoRevisionKey.published
|
unknown
|
codeparrot/codeparrot-clean
| ||
// RUN: mkdir -p %t.dir/move-function
// RUN: cat %S/Inputs/function_test.h > %t.dir/move-function/function_test.h
// RUN: cat %S/Inputs/function_test.cpp > %t.dir/move-function/function_test.cpp
// RUN: cd %t.dir/move-function
// RUN: clang-move -names="g" -new_header=%t.dir/move-function/new_function_test.h -old_header=../move-function/function_test.h %t.dir/move-function/function_test.cpp --
// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE1 %s
//
// CHECK-NEW-TEST-H-CASE1: #ifndef {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE1: #define {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE1: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE1: inline int g() { return 0; }
// CHECK-NEW-TEST-H-CASE1: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE1: #endif // {{.*}}NEW_FUNCTION_TEST_H
//
// RUN: cp %S/Inputs/function_test* %t.dir/move-function
// RUN: clang-move -names="h" -new_header=%t.dir/move-function/new_function_test.h -old_header=../move-function/function_test.h %t.dir/move-function/function_test.cpp --
// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE2 %s
//
// CHECK-NEW-TEST-H-CASE2: #ifndef {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE2: #define {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE2: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE2: template <typename T> void h(T t) {}
// CHECK-NEW-TEST-H-CASE2: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE2: template <> void h(int t) {}
// CHECK-NEW-TEST-H-CASE2: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE2: #endif // {{.*}}NEW_FUNCTION_TEST_H
//
// RUN: cp %S/Inputs/function_test* %t.dir/move-function
// RUN: clang-move -names="f" -new_header=%t.dir/move-function/new_function_test.h -new_cc=%t.dir/move-function/new_function_test.cpp -old_header=../move-function/function_test.h -old_cc=../move-function/function_test.cpp %t.dir/move-function/function_test.cpp --
// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE3 %s
// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.cpp -check-prefix=CHECK-NEW-TEST-CPP-CASE3 %s
//
// CHECK-NEW-TEST-H-CASE3: #ifndef {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE3: #define {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE3: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE3: void f();
// CHECK-NEW-TEST-H-CASE3: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE3: #endif // {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-CPP-CASE3: #include "{{.*}}new_function_test.h"
// CHECK-NEW-TEST-CPP-CASE3: {{[[:space:]]+}}
// CHECK-NEW-TEST-CPP-CASE3: void f() {}
//
// RUN: cat %S/Inputs/function_test.h > %t.dir/move-function/function_test.h
// RUN: cat %S/Inputs/function_test.cpp > %t.dir/move-function/function_test.cpp
// RUN: clang-move -names="A::f" -new_header=%t.dir/move-function/new_function_test.h -new_cc=%t.dir/move-function/new_function_test.cpp -old_header=../move-function/function_test.h -old_cc=../move-function/function_test.cpp %t.dir/move-function/function_test.cpp -dump_result -- | FileCheck %s -check-prefix=CHECK-EMPTY
//
// CHECK-EMPTY: [{{[[:space:]]*}}]
//
// RUN: cat %S/Inputs/function_test.h > %t.dir/move-function/function_test.h
// RUN: cat %S/Inputs/function_test.cpp > %t.dir/move-function/function_test.cpp
// RUN: clang-move -names="f,A" -new_header=%t.dir/move-function/new_function_test.h -new_cc=%t.dir/move-function/new_function_test.cpp -old_header=../move-function/function_test.h -old_cc=../move-function/function_test.cpp %t.dir/move-function/function_test.cpp --
// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE4 %s
// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.cpp -check-prefix=CHECK-NEW-TEST-CPP-CASE4 %s
// CHECK-NEW-TEST-H-CASE4: #ifndef {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE4: #define {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE4: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE4: void f();
// CHECK-NEW-TEST-H-CASE4: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE4: class A {
// CHECK-NEW-TEST-H-CASE4: public:
// CHECK-NEW-TEST-H-CASE4: void f();
// CHECK-NEW-TEST-H-CASE4: };
// CHECK-NEW-TEST-H-CASE4: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE4: #endif // {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-CPP-CASE4: #include "{{.*}}new_function_test.h"
// CHECK-NEW-TEST-CPP-CASE4: {{[[:space:]]+}}
// CHECK-NEW-TEST-CPP-CASE4: void f() {}
// CHECK-NEW-TEST-CPP-CASE4: {{[[:space:]]+}}
// CHECK-NEW-TEST-CPP-CASE4: void A::f() {}
|
cpp
|
github
|
https://github.com/llvm/llvm-project
|
clang-tools-extra/test/clang-move/move-function.cpp
|
/*-------------------------------------------------------------------------
*
* lwlock.c
* Lightweight lock manager
*
* Lightweight locks are intended primarily to provide mutual exclusion of
* access to shared-memory data structures. Therefore, they offer both
* exclusive and shared lock modes (to support read/write and read-only
* access to a shared object). There are few other frammishes. User-level
* locking should be done with the full lock manager --- which depends on
* LWLocks to protect its shared state.
*
* In addition to exclusive and shared modes, lightweight locks can be used to
* wait until a variable changes value. The variable is initially not set
* when the lock is acquired with LWLockAcquire, i.e. it remains set to the
* value it was set to when the lock was released last, and can be updated
* without releasing the lock by calling LWLockUpdateVar. LWLockWaitForVar
* waits for the variable to be updated, or until the lock is free. When
* releasing the lock with LWLockReleaseClearVar() the value can be set to an
* appropriate value for a free lock. The meaning of the variable is up to
* the caller, the lightweight lock code just assigns and compares it.
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/storage/lmgr/lwlock.c
*
* NOTES:
*
* This used to be a pretty straight forward reader-writer lock
* implementation, in which the internal state was protected by a
* spinlock. Unfortunately the overhead of taking the spinlock proved to be
* too high for workloads/locks that were taken in shared mode very
* frequently. Often we were spinning in the (obviously exclusive) spinlock,
* while trying to acquire a shared lock that was actually free.
*
* Thus a new implementation was devised that provides wait-free shared lock
* acquisition for locks that aren't exclusively locked.
*
* The basic idea is to have a single atomic variable 'lockcount' instead of
* the formerly separate shared and exclusive counters and to use atomic
* operations to acquire the lock. That's fairly easy to do for plain
* rw-spinlocks, but a lot harder for something like LWLocks that want to wait
* in the OS.
*
* For lock acquisition we use an atomic compare-and-exchange on the lockcount
* variable. For exclusive lock we swap in a sentinel value
* (LW_VAL_EXCLUSIVE), for shared locks we count the number of holders.
*
* To release the lock we use an atomic decrement to release the lock. If the
* new value is zero (we get that atomically), we know we can/have to release
* waiters.
*
* Obviously it is important that the sentinel value for exclusive locks
* doesn't conflict with the maximum number of possible share lockers -
* luckily MAX_BACKENDS makes that easily possible.
*
*
* The attentive reader might have noticed that naively doing the above has a
* glaring race condition: We try to lock using the atomic operations and
* notice that we have to wait. Unfortunately by the time we have finished
* queuing, the former locker very well might have already finished its
* work. That's problematic because we're now stuck waiting inside the OS.
* To mitigate those races we use a two phased attempt at locking:
* Phase 1: Try to do it atomically, if we succeed, nice
* Phase 2: Add ourselves to the waitqueue of the lock
* Phase 3: Try to grab the lock again, if we succeed, remove ourselves from
* the queue
* Phase 4: Sleep till wake-up, goto Phase 1
*
* This protects us against the problem from above as nobody can release too
* quick, before we're queued, since after Phase 2 we're already queued.
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "port/pg_bitutils.h"
#include "storage/proc.h"
#include "storage/proclist.h"
#include "storage/procnumber.h"
#include "storage/spin.h"
#include "utils/memutils.h"
#ifdef LWLOCK_STATS
#include "utils/hsearch.h"
#endif
#define LW_FLAG_HAS_WAITERS ((uint32) 1 << 31)
#define LW_FLAG_WAKE_IN_PROGRESS ((uint32) 1 << 30)
#define LW_FLAG_LOCKED ((uint32) 1 << 29)
#define LW_FLAG_BITS 3
#define LW_FLAG_MASK (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))
/* assumes MAX_BACKENDS is a (power of 2) - 1, checked below */
#define LW_VAL_EXCLUSIVE (MAX_BACKENDS + 1)
#define LW_VAL_SHARED 1
/* already (power of 2)-1, i.e. suitable for a mask */
#define LW_SHARED_MASK MAX_BACKENDS
#define LW_LOCK_MASK (MAX_BACKENDS | LW_VAL_EXCLUSIVE)
StaticAssertDecl(((MAX_BACKENDS + 1) & MAX_BACKENDS) == 0,
"MAX_BACKENDS + 1 needs to be a power of 2");
StaticAssertDecl((MAX_BACKENDS & LW_FLAG_MASK) == 0,
"MAX_BACKENDS and LW_FLAG_MASK overlap");
StaticAssertDecl((LW_VAL_EXCLUSIVE & LW_FLAG_MASK) == 0,
"LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap");
/*
* There are three sorts of LWLock "tranches":
*
* 1. The individually-named locks defined in lwlocklist.h each have their
* own tranche. We absorb the names of these tranches from there into
* BuiltinTrancheNames here.
*
* 2. There are some predefined tranches for built-in groups of locks defined
* in lwlocklist.h. We absorb the names of these tranches, too.
*
* 3. Extensions can create new tranches, via either RequestNamedLWLockTranche
* or LWLockNewTrancheId. These names are stored in shared memory and can be
* accessed via LWLockTrancheNames.
*
* All these names are user-visible as wait event names, so choose with care
* ... and do not forget to update the documentation's list of wait events.
*/
static const char *const BuiltinTrancheNames[] = {
#define PG_LWLOCK(id, lockname) [id] = CppAsString(lockname),
#define PG_LWLOCKTRANCHE(id, lockname) [LWTRANCHE_##id] = CppAsString(lockname),
#include "storage/lwlocklist.h"
#undef PG_LWLOCK
#undef PG_LWLOCKTRANCHE
};
StaticAssertDecl(lengthof(BuiltinTrancheNames) ==
LWTRANCHE_FIRST_USER_DEFINED,
"missing entries in BuiltinTrancheNames[]");
/*
* This is indexed by tranche ID minus LWTRANCHE_FIRST_USER_DEFINED, and
* points to the shared memory locations of the names of all
* dynamically-created tranches. Backends inherit the pointer by fork from the
* postmaster (except in the EXEC_BACKEND case, where we have special measures
* to pass it down).
*/
char **LWLockTrancheNames = NULL;
/*
* This points to the main array of LWLocks in shared memory. Backends inherit
* the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
* where we have special measures to pass it down).
*/
LWLockPadded *MainLWLockArray = NULL;
/*
* We use this structure to keep track of locked LWLocks for release
* during error recovery. Normally, only a few will be held at once, but
* occasionally the number can be much higher.
*/
#define MAX_SIMUL_LWLOCKS 200
/* struct representing the LWLocks we're holding */
typedef struct LWLockHandle
{
LWLock *lock;
LWLockMode mode;
} LWLockHandle;
static int num_held_lwlocks = 0;
static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS];
/* struct representing the LWLock tranche request for named tranche */
typedef struct NamedLWLockTrancheRequest
{
char tranche_name[NAMEDATALEN];
int num_lwlocks;
} NamedLWLockTrancheRequest;
/*
* NamedLWLockTrancheRequests is the valid length of the request array. These
* variables are non-static so that launch_backend.c can copy them to child
* processes in EXEC_BACKEND builds.
*/
int NamedLWLockTrancheRequests = 0;
NamedLWLockTrancheRequest *NamedLWLockTrancheRequestArray = NULL;
/* postmaster's local copy of the request array */
static NamedLWLockTrancheRequest *LocalNamedLWLockTrancheRequestArray = NULL;
/* shared memory counter of registered tranches */
int *LWLockCounter = NULL;
/* backend-local counter of registered tranches */
static int LocalLWLockCounter;
#define MAX_NAMED_TRANCHES 256
static void InitializeLWLocks(void);
static inline void LWLockReportWaitStart(LWLock *lock);
static inline void LWLockReportWaitEnd(void);
static const char *GetLWTrancheName(uint16 trancheId);
#define T_NAME(lock) \
GetLWTrancheName((lock)->tranche)
#ifdef LWLOCK_STATS
typedef struct lwlock_stats_key
{
int tranche;
void *instance;
} lwlock_stats_key;
typedef struct lwlock_stats
{
lwlock_stats_key key;
int sh_acquire_count;
int ex_acquire_count;
int block_count;
int dequeue_self_count;
int spin_delay_count;
} lwlock_stats;
static HTAB *lwlock_stats_htab;
static lwlock_stats lwlock_stats_dummy;
#endif
#ifdef LOCK_DEBUG
bool Trace_lwlocks = false;
inline static void
PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode)
{
/* hide statement & context here, otherwise the log is just too verbose */
if (Trace_lwlocks)
{
uint32 state = pg_atomic_read_u32(&lock->state);
ereport(LOG,
(errhidestmt(true),
errhidecontext(true),
errmsg_internal("%d: %s(%s %p): excl %u shared %u haswaiters %u waiters %u waking %d",
MyProcPid,
where, T_NAME(lock), lock,
(state & LW_VAL_EXCLUSIVE) != 0,
state & LW_SHARED_MASK,
(state & LW_FLAG_HAS_WAITERS) != 0,
pg_atomic_read_u32(&lock->nwaiters),
(state & LW_FLAG_WAKE_IN_PROGRESS) != 0)));
}
}
inline static void
LOG_LWDEBUG(const char *where, LWLock *lock, const char *msg)
{
/* hide statement & context here, otherwise the log is just too verbose */
if (Trace_lwlocks)
{
ereport(LOG,
(errhidestmt(true),
errhidecontext(true),
errmsg_internal("%s(%s %p): %s", where,
T_NAME(lock), lock, msg)));
}
}
#else /* not LOCK_DEBUG */
#define PRINT_LWDEBUG(a,b,c) ((void)0)
#define LOG_LWDEBUG(a,b,c) ((void)0)
#endif /* LOCK_DEBUG */
#ifdef LWLOCK_STATS
static void init_lwlock_stats(void);
static void print_lwlock_stats(int code, Datum arg);
static lwlock_stats * get_lwlock_stats_entry(LWLock *lock);
static void
init_lwlock_stats(void)
{
HASHCTL ctl;
static MemoryContext lwlock_stats_cxt = NULL;
static bool exit_registered = false;
if (lwlock_stats_cxt != NULL)
MemoryContextDelete(lwlock_stats_cxt);
/*
* The LWLock stats will be updated within a critical section, which
* requires allocating new hash entries. Allocations within a critical
* section are normally not allowed because running out of memory would
* lead to a PANIC, but LWLOCK_STATS is debugging code that's not normally
* turned on in production, so that's an acceptable risk. The hash entries
* are small, so the risk of running out of memory is minimal in practice.
*/
lwlock_stats_cxt = AllocSetContextCreate(TopMemoryContext,
"LWLock stats",
ALLOCSET_DEFAULT_SIZES);
MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true);
ctl.keysize = sizeof(lwlock_stats_key);
ctl.entrysize = sizeof(lwlock_stats);
ctl.hcxt = lwlock_stats_cxt;
lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
if (!exit_registered)
{
on_shmem_exit(print_lwlock_stats, 0);
exit_registered = true;
}
}
static void
print_lwlock_stats(int code, Datum arg)
{
HASH_SEQ_STATUS scan;
lwlock_stats *lwstats;
hash_seq_init(&scan, lwlock_stats_htab);
/* Grab an LWLock to keep different backends from mixing reports */
LWLockAcquire(&MainLWLockArray[0].lock, LW_EXCLUSIVE);
while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
{
fprintf(stderr,
"PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
MyProcPid, GetLWTrancheName(lwstats->key.tranche),
lwstats->key.instance, lwstats->sh_acquire_count,
lwstats->ex_acquire_count, lwstats->block_count,
lwstats->spin_delay_count, lwstats->dequeue_self_count);
}
LWLockRelease(&MainLWLockArray[0].lock);
}
static lwlock_stats *
get_lwlock_stats_entry(LWLock *lock)
{
lwlock_stats_key key;
lwlock_stats *lwstats;
bool found;
/*
* During shared memory initialization, the hash table doesn't exist yet.
* Stats of that phase aren't very interesting, so just collect operations
* on all locks in a single dummy entry.
*/
if (lwlock_stats_htab == NULL)
return &lwlock_stats_dummy;
/* Fetch or create the entry. */
MemSet(&key, 0, sizeof(key));
key.tranche = lock->tranche;
key.instance = lock;
lwstats = hash_search(lwlock_stats_htab, &key, HASH_ENTER, &found);
if (!found)
{
lwstats->sh_acquire_count = 0;
lwstats->ex_acquire_count = 0;
lwstats->block_count = 0;
lwstats->dequeue_self_count = 0;
lwstats->spin_delay_count = 0;
}
return lwstats;
}
#endif /* LWLOCK_STATS */
/*
* Compute number of LWLocks required by named tranches. These will be
* allocated in the main array.
*/
static int
NumLWLocksForNamedTranches(void)
{
int numLocks = 0;
int i;
for (i = 0; i < NamedLWLockTrancheRequests; i++)
numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
return numLocks;
}
/*
* Compute shmem space needed for LWLocks and named tranches.
*/
Size
LWLockShmemSize(void)
{
Size size;
int numLocks = NUM_FIXED_LWLOCKS;
/*
* If re-initializing shared memory, the request array will no longer be
* accessible, so switch to the copy in postmaster's local memory. We'll
* copy it back into shared memory later when CreateLWLocks() is called
* again.
*/
if (LocalNamedLWLockTrancheRequestArray)
NamedLWLockTrancheRequestArray = LocalNamedLWLockTrancheRequestArray;
/* Calculate total number of locks needed in the main array. */
numLocks += NumLWLocksForNamedTranches();
/* Space for dynamic allocation counter. */
size = MAXALIGN(sizeof(int));
/* Space for named tranches. */
size = add_size(size, mul_size(MAX_NAMED_TRANCHES, sizeof(char *)));
size = add_size(size, mul_size(MAX_NAMED_TRANCHES, NAMEDATALEN));
/*
* Make space for named tranche requests. This is done for the benefit of
* EXEC_BACKEND builds, which otherwise wouldn't be able to call
* GetNamedLWLockTranche() outside postmaster.
*/
size = add_size(size, mul_size(NamedLWLockTrancheRequests,
sizeof(NamedLWLockTrancheRequest)));
/* Space for the LWLock array, plus room for cache line alignment. */
size = add_size(size, LWLOCK_PADDED_SIZE);
size = add_size(size, mul_size(numLocks, sizeof(LWLockPadded)));
return size;
}
/*
* Allocate shmem space for the main LWLock array and all tranches and
* initialize it.
*/
void
CreateLWLocks(void)
{
if (!IsUnderPostmaster)
{
Size spaceLocks = LWLockShmemSize();
char *ptr;
/* Allocate space */
ptr = (char *) ShmemAlloc(spaceLocks);
/* Initialize the dynamic-allocation counter for tranches */
LWLockCounter = (int *) ptr;
*LWLockCounter = LWTRANCHE_FIRST_USER_DEFINED;
ptr += MAXALIGN(sizeof(int));
/* Initialize tranche names */
LWLockTrancheNames = (char **) ptr;
ptr += MAX_NAMED_TRANCHES * sizeof(char *);
for (int i = 0; i < MAX_NAMED_TRANCHES; i++)
{
LWLockTrancheNames[i] = ptr;
ptr += NAMEDATALEN;
}
/*
* Move named tranche requests to shared memory. This is done for the
* benefit of EXEC_BACKEND builds, which otherwise wouldn't be able to
* call GetNamedLWLockTranche() outside postmaster.
*/
if (NamedLWLockTrancheRequests > 0)
{
/*
* Save the pointer to the request array in postmaster's local
* memory. We'll need it if we ever need to re-initialize shared
* memory after a crash.
*/
LocalNamedLWLockTrancheRequestArray = NamedLWLockTrancheRequestArray;
memcpy(ptr, NamedLWLockTrancheRequestArray,
NamedLWLockTrancheRequests * sizeof(NamedLWLockTrancheRequest));
NamedLWLockTrancheRequestArray = (NamedLWLockTrancheRequest *) ptr;
ptr += NamedLWLockTrancheRequests * sizeof(NamedLWLockTrancheRequest);
}
/* Ensure desired alignment of LWLock array */
ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;
MainLWLockArray = (LWLockPadded *) ptr;
/* Initialize all LWLocks */
InitializeLWLocks();
}
}
/*
* Initialize LWLocks that are fixed and those belonging to named tranches.
*/
static void
InitializeLWLocks(void)
{
int id;
int i;
int j;
LWLockPadded *lock;
/* Initialize all individual LWLocks in main array */
for (id = 0, lock = MainLWLockArray; id < NUM_INDIVIDUAL_LWLOCKS; id++, lock++)
LWLockInitialize(&lock->lock, id);
/* Initialize buffer mapping LWLocks in main array */
lock = MainLWLockArray + BUFFER_MAPPING_LWLOCK_OFFSET;
for (id = 0; id < NUM_BUFFER_PARTITIONS; id++, lock++)
LWLockInitialize(&lock->lock, LWTRANCHE_BUFFER_MAPPING);
/* Initialize lmgrs' LWLocks in main array */
lock = MainLWLockArray + LOCK_MANAGER_LWLOCK_OFFSET;
for (id = 0; id < NUM_LOCK_PARTITIONS; id++, lock++)
LWLockInitialize(&lock->lock, LWTRANCHE_LOCK_MANAGER);
/* Initialize predicate lmgrs' LWLocks in main array */
lock = MainLWLockArray + PREDICATELOCK_MANAGER_LWLOCK_OFFSET;
for (id = 0; id < NUM_PREDICATELOCK_PARTITIONS; id++, lock++)
LWLockInitialize(&lock->lock, LWTRANCHE_PREDICATE_LOCK_MANAGER);
/*
* Copy the info about any named tranches into shared memory (so that
* other processes can see it), and initialize the requested LWLocks.
*/
if (NamedLWLockTrancheRequests > 0)
{
lock = &MainLWLockArray[NUM_FIXED_LWLOCKS];
for (i = 0; i < NamedLWLockTrancheRequests; i++)
{
NamedLWLockTrancheRequest *request;
int tranche;
request = &NamedLWLockTrancheRequestArray[i];
tranche = LWLockNewTrancheId(request->tranche_name);
for (j = 0; j < request->num_lwlocks; j++, lock++)
LWLockInitialize(&lock->lock, tranche);
}
}
}
/*
* InitLWLockAccess - initialize backend-local state needed to hold LWLocks
*/
void
InitLWLockAccess(void)
{
#ifdef LWLOCK_STATS
init_lwlock_stats();
#endif
}
/*
* GetNamedLWLockTranche - returns the base address of LWLock from the
* specified tranche.
*
* Caller needs to retrieve the requested number of LWLocks starting from
* the base lock address returned by this API. This can be used for
* tranches that are requested by using RequestNamedLWLockTranche() API.
*/
LWLockPadded *
GetNamedLWLockTranche(const char *tranche_name)
{
int lock_pos;
int i;
/*
* Obtain the position of base address of LWLock belonging to requested
* tranche_name in MainLWLockArray. LWLocks for named tranches are placed
* in MainLWLockArray after fixed locks.
*/
lock_pos = NUM_FIXED_LWLOCKS;
for (i = 0; i < NamedLWLockTrancheRequests; i++)
{
if (strcmp(NamedLWLockTrancheRequestArray[i].tranche_name,
tranche_name) == 0)
return &MainLWLockArray[lock_pos];
lock_pos += NamedLWLockTrancheRequestArray[i].num_lwlocks;
}
elog(ERROR, "requested tranche is not registered");
/* just to keep compiler quiet */
return NULL;
}
/*
* Allocate a new tranche ID with the provided name.
*/
int
LWLockNewTrancheId(const char *name)
{
int result;
if (!name)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("tranche name cannot be NULL")));
if (strlen(name) >= NAMEDATALEN)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("tranche name too long"),
errdetail("LWLock tranche names must be no longer than %d bytes.",
NAMEDATALEN - 1)));
/*
* We use the ShmemLock spinlock to protect LWLockCounter and
* LWLockTrancheNames.
*/
SpinLockAcquire(ShmemLock);
if (*LWLockCounter - LWTRANCHE_FIRST_USER_DEFINED >= MAX_NAMED_TRANCHES)
{
SpinLockRelease(ShmemLock);
ereport(ERROR,
(errmsg("maximum number of tranches already registered"),
errdetail("No more than %d tranches may be registered.",
MAX_NAMED_TRANCHES)));
}
result = (*LWLockCounter)++;
LocalLWLockCounter = *LWLockCounter;
strlcpy(LWLockTrancheNames[result - LWTRANCHE_FIRST_USER_DEFINED], name, NAMEDATALEN);
SpinLockRelease(ShmemLock);
return result;
}
/*
* RequestNamedLWLockTranche
* Request that extra LWLocks be allocated during postmaster
* startup.
*
* This may only be called via the shmem_request_hook of a library that is
* loaded into the postmaster via shared_preload_libraries. Calls from
* elsewhere will fail.
*
* The tranche name will be user-visible as a wait event name, so try to
* use a name that fits the style for those.
*/
void
RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
{
NamedLWLockTrancheRequest *request;
static int NamedLWLockTrancheRequestsAllocated;
if (!process_shmem_requests_in_progress)
elog(FATAL, "cannot request additional LWLocks outside shmem_request_hook");
if (!tranche_name)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("tranche name cannot be NULL")));
if (strlen(tranche_name) >= NAMEDATALEN)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("tranche name too long"),
errdetail("LWLock tranche names must be no longer than %d bytes.",
NAMEDATALEN - 1)));
if (NamedLWLockTrancheRequestArray == NULL)
{
NamedLWLockTrancheRequestsAllocated = 16;
NamedLWLockTrancheRequestArray = (NamedLWLockTrancheRequest *)
MemoryContextAlloc(TopMemoryContext,
NamedLWLockTrancheRequestsAllocated
* sizeof(NamedLWLockTrancheRequest));
}
if (NamedLWLockTrancheRequests >= NamedLWLockTrancheRequestsAllocated)
{
int i = pg_nextpower2_32(NamedLWLockTrancheRequests + 1);
NamedLWLockTrancheRequestArray = (NamedLWLockTrancheRequest *)
repalloc(NamedLWLockTrancheRequestArray,
i * sizeof(NamedLWLockTrancheRequest));
NamedLWLockTrancheRequestsAllocated = i;
}
request = &NamedLWLockTrancheRequestArray[NamedLWLockTrancheRequests];
strlcpy(request->tranche_name, tranche_name, NAMEDATALEN);
request->num_lwlocks = num_lwlocks;
NamedLWLockTrancheRequests++;
}
/*
* LWLockInitialize - initialize a new lwlock; it's initially unlocked
*/
void
LWLockInitialize(LWLock *lock, int tranche_id)
{
/* verify the tranche_id is valid */
(void) GetLWTrancheName(tranche_id);
pg_atomic_init_u32(&lock->state, 0);
#ifdef LOCK_DEBUG
pg_atomic_init_u32(&lock->nwaiters, 0);
#endif
lock->tranche = tranche_id;
proclist_init(&lock->waiters);
}
/*
* Report start of wait event for light-weight locks.
*
* This function will be used by all the light-weight lock calls which
* needs to wait to acquire the lock. This function distinguishes wait
* event based on tranche and lock id.
*/
static inline void
LWLockReportWaitStart(LWLock *lock)
{
pgstat_report_wait_start(PG_WAIT_LWLOCK | lock->tranche);
}
/*
* Report end of wait event for light-weight locks.
*/
static inline void
LWLockReportWaitEnd(void)
{
pgstat_report_wait_end();
}
/*
* Return the name of an LWLock tranche.
*/
static const char *
GetLWTrancheName(uint16 trancheId)
{
/* Built-in tranche or individual LWLock? */
if (trancheId < LWTRANCHE_FIRST_USER_DEFINED)
return BuiltinTrancheNames[trancheId];
/*
* We only ever add new entries to LWLockTrancheNames, so most lookups can
* avoid taking the spinlock as long as the backend-local counter
* (LocalLWLockCounter) is greater than the requested tranche ID. Else,
* we need to first update the backend-local counter with ShmemLock held
* before attempting the lookup again. In practice, the latter case is
* probably rare.
*/
if (trancheId >= LocalLWLockCounter)
{
SpinLockAcquire(ShmemLock);
LocalLWLockCounter = *LWLockCounter;
SpinLockRelease(ShmemLock);
if (trancheId >= LocalLWLockCounter)
elog(ERROR, "tranche %d is not registered", trancheId);
}
/*
* It's an extension tranche, so look in LWLockTrancheNames.
*/
trancheId -= LWTRANCHE_FIRST_USER_DEFINED;
return LWLockTrancheNames[trancheId];
}
/*
* Return an identifier for an LWLock based on the wait class and event.
*/
const char *
GetLWLockIdentifier(uint32 classId, uint16 eventId)
{
Assert(classId == PG_WAIT_LWLOCK);
/* The event IDs are just tranche numbers. */
return GetLWTrancheName(eventId);
}
/*
* Internal function that tries to atomically acquire the lwlock in the passed
* in mode.
*
* This function will not block waiting for a lock to become free - that's the
* caller's job.
*
* Returns true if the lock isn't free and we need to wait.
*/
static bool
LWLockAttemptLock(LWLock *lock, LWLockMode mode)
{
uint32 old_state;
Assert(mode == LW_EXCLUSIVE || mode == LW_SHARED);
/*
* Read once outside the loop, later iterations will get the newer value
* via compare & exchange.
*/
old_state = pg_atomic_read_u32(&lock->state);
/* loop until we've determined whether we could acquire the lock or not */
while (true)
{
uint32 desired_state;
bool lock_free;
desired_state = old_state;
if (mode == LW_EXCLUSIVE)
{
lock_free = (old_state & LW_LOCK_MASK) == 0;
if (lock_free)
desired_state += LW_VAL_EXCLUSIVE;
}
else
{
lock_free = (old_state & LW_VAL_EXCLUSIVE) == 0;
if (lock_free)
desired_state += LW_VAL_SHARED;
}
/*
* Attempt to swap in the state we are expecting. If we didn't see
* lock to be free, that's just the old value. If we saw it as free,
* we'll attempt to mark it acquired. The reason that we always swap
* in the value is that this doubles as a memory barrier. We could try
* to be smarter and only swap in values if we saw the lock as free,
* but benchmark haven't shown it as beneficial so far.
*
* Retry if the value changed since we last looked at it.
*/
if (pg_atomic_compare_exchange_u32(&lock->state,
&old_state, desired_state))
{
if (lock_free)
{
/* Great! Got the lock. */
#ifdef LOCK_DEBUG
if (mode == LW_EXCLUSIVE)
lock->owner = MyProc;
#endif
return false;
}
else
return true; /* somebody else has the lock */
}
}
pg_unreachable();
}
/*
* Lock the LWLock's wait list against concurrent activity.
*
* NB: even though the wait list is locked, non-conflicting lock operations
* may still happen concurrently.
*
* Time spent holding mutex should be short!
*/
static void
LWLockWaitListLock(LWLock *lock)
{
uint32 old_state;
#ifdef LWLOCK_STATS
lwlock_stats *lwstats;
uint32 delays = 0;
lwstats = get_lwlock_stats_entry(lock);
#endif
while (true)
{
/*
* Always try once to acquire the lock directly, without setting up
* the spin-delay infrastructure. The work necessary for that shows up
* in profiles and is rarely necessary.
*/
old_state = pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_LOCKED);
if (likely(!(old_state & LW_FLAG_LOCKED)))
break; /* got lock */
/* and then spin without atomic operations until lock is released */
{
SpinDelayStatus delayStatus;
init_local_spin_delay(&delayStatus);
while (old_state & LW_FLAG_LOCKED)
{
perform_spin_delay(&delayStatus);
old_state = pg_atomic_read_u32(&lock->state);
}
#ifdef LWLOCK_STATS
delays += delayStatus.delays;
#endif
finish_spin_delay(&delayStatus);
}
/*
* Retry. The lock might obviously already be re-acquired by the time
* we're attempting to get it again.
*/
}
#ifdef LWLOCK_STATS
lwstats->spin_delay_count += delays;
#endif
}
/*
* Unlock the LWLock's wait list.
*
* Note that it can be more efficient to manipulate flags and release the
* locks in a single atomic operation.
*/
static void
LWLockWaitListUnlock(LWLock *lock)
{
uint32 old_state PG_USED_FOR_ASSERTS_ONLY;
old_state = pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_LOCKED);
Assert(old_state & LW_FLAG_LOCKED);
}
/*
* Wakeup all the lockers that currently have a chance to acquire the lock.
*/
static void
LWLockWakeup(LWLock *lock)
{
bool new_wake_in_progress = false;
bool wokeup_somebody = false;
proclist_head wakeup;
proclist_mutable_iter iter;
proclist_init(&wakeup);
/* lock wait list while collecting backends to wake up */
LWLockWaitListLock(lock);
proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
{
PGPROC *waiter = GetPGProcByNumber(iter.cur);
if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
continue;
proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
{
/*
* Prevent additional wakeups until retryer gets to run. Backends
* that are just waiting for the lock to become free don't retry
* automatically.
*/
new_wake_in_progress = true;
/*
* Don't wakeup (further) exclusive locks.
*/
wokeup_somebody = true;
}
/*
* Signal that the process isn't on the wait list anymore. This allows
* LWLockDequeueSelf() to remove itself of the waitlist with a
* proclist_delete(), rather than having to check if it has been
* removed from the list.
*/
Assert(waiter->lwWaiting == LW_WS_WAITING);
waiter->lwWaiting = LW_WS_PENDING_WAKEUP;
/*
* Once we've woken up an exclusive lock, there's no point in waking
* up anybody else.
*/
if (waiter->lwWaitMode == LW_EXCLUSIVE)
break;
}
Assert(proclist_is_empty(&wakeup) || pg_atomic_read_u32(&lock->state) & LW_FLAG_HAS_WAITERS);
/* unset required flags, and release lock, in one fell swoop */
{
uint32 old_state;
uint32 desired_state;
old_state = pg_atomic_read_u32(&lock->state);
while (true)
{
desired_state = old_state;
/* compute desired flags */
if (new_wake_in_progress)
desired_state |= LW_FLAG_WAKE_IN_PROGRESS;
else
desired_state &= ~LW_FLAG_WAKE_IN_PROGRESS;
if (proclist_is_empty(&lock->waiters))
desired_state &= ~LW_FLAG_HAS_WAITERS;
desired_state &= ~LW_FLAG_LOCKED; /* release lock */
if (pg_atomic_compare_exchange_u32(&lock->state, &old_state,
desired_state))
break;
}
}
/* Awaken any waiters I removed from the queue. */
proclist_foreach_modify(iter, &wakeup, lwWaitLink)
{
PGPROC *waiter = GetPGProcByNumber(iter.cur);
LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
proclist_delete(&wakeup, iter.cur, lwWaitLink);
/*
* Guarantee that lwWaiting being unset only becomes visible once the
* unlink from the link has completed. Otherwise the target backend
* could be woken up for other reason and enqueue for a new lock - if
* that happens before the list unlink happens, the list would end up
* being corrupted.
*
* The barrier pairs with the LWLockWaitListLock() when enqueuing for
* another lock.
*/
pg_write_barrier();
waiter->lwWaiting = LW_WS_NOT_WAITING;
PGSemaphoreUnlock(waiter->sem);
}
}
/*
* Add ourselves to the end of the queue.
*
* NB: Mode can be LW_WAIT_UNTIL_FREE here!
*/
static void
LWLockQueueSelf(LWLock *lock, LWLockMode mode)
{
/*
* If we don't have a PGPROC structure, there's no way to wait. This
* should never occur, since MyProc should only be null during shared
* memory initialization.
*/
if (MyProc == NULL)
elog(PANIC, "cannot wait without a PGPROC structure");
if (MyProc->lwWaiting != LW_WS_NOT_WAITING)
elog(PANIC, "queueing for lock while waiting on another one");
LWLockWaitListLock(lock);
/* setting the flag is protected by the spinlock */
pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_HAS_WAITERS);
MyProc->lwWaiting = LW_WS_WAITING;
MyProc->lwWaitMode = mode;
/* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */
if (mode == LW_WAIT_UNTIL_FREE)
proclist_push_head(&lock->waiters, MyProcNumber, lwWaitLink);
else
proclist_push_tail(&lock->waiters, MyProcNumber, lwWaitLink);
/* Can release the mutex now */
LWLockWaitListUnlock(lock);
#ifdef LOCK_DEBUG
pg_atomic_fetch_add_u32(&lock->nwaiters, 1);
#endif
}
/*
* Remove ourselves from the waitlist.
*
* This is used if we queued ourselves because we thought we needed to sleep
* but, after further checking, we discovered that we don't actually need to
* do so.
*/
static void
LWLockDequeueSelf(LWLock *lock)
{
bool on_waitlist;
#ifdef LWLOCK_STATS
lwlock_stats *lwstats;
lwstats = get_lwlock_stats_entry(lock);
lwstats->dequeue_self_count++;
#endif
LWLockWaitListLock(lock);
/*
* Remove ourselves from the waitlist, unless we've already been removed.
* The removal happens with the wait list lock held, so there's no race in
* this check.
*/
on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
if (on_waitlist)
proclist_delete(&lock->waiters, MyProcNumber, lwWaitLink);
if (proclist_is_empty(&lock->waiters) &&
(pg_atomic_read_u32(&lock->state) & LW_FLAG_HAS_WAITERS) != 0)
{
pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_HAS_WAITERS);
}
/* XXX: combine with fetch_and above? */
LWLockWaitListUnlock(lock);
/* clear waiting state again, nice for debugging */
if (on_waitlist)
MyProc->lwWaiting = LW_WS_NOT_WAITING;
else
{
int extraWaits = 0;
/*
* Somebody else dequeued us and has or will wake us up. Deal with the
* superfluous absorption of a wakeup.
*/
/*
* Clear LW_FLAG_WAKE_IN_PROGRESS if somebody woke us before we
* removed ourselves - they'll have set it.
*/
pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_WAKE_IN_PROGRESS);
/*
* Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
* get reset at some inconvenient point later. Most of the time this
* will immediately return.
*/
for (;;)
{
PGSemaphoreLock(MyProc->sem);
if (MyProc->lwWaiting == LW_WS_NOT_WAITING)
break;
extraWaits++;
}
/*
* Fix the process wait semaphore's count for any absorbed wakeups.
*/
while (extraWaits-- > 0)
PGSemaphoreUnlock(MyProc->sem);
}
#ifdef LOCK_DEBUG
{
/* not waiting anymore */
uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
Assert(nwaiters < MAX_BACKENDS);
}
#endif
}
/*
* LWLockAcquire - acquire a lightweight lock in the specified mode
*
* If the lock is not available, sleep until it is. Returns true if the lock
* was available immediately, false if we had to sleep.
*
* Side effect: cancel/die interrupts are held off until lock release.
*/
bool
LWLockAcquire(LWLock *lock, LWLockMode mode)
{
PGPROC *proc = MyProc;
bool result = true;
int extraWaits = 0;
#ifdef LWLOCK_STATS
lwlock_stats *lwstats;
lwstats = get_lwlock_stats_entry(lock);
#endif
Assert(mode == LW_SHARED || mode == LW_EXCLUSIVE);
PRINT_LWDEBUG("LWLockAcquire", lock, mode);
#ifdef LWLOCK_STATS
/* Count lock acquisition attempts */
if (mode == LW_EXCLUSIVE)
lwstats->ex_acquire_count++;
else
lwstats->sh_acquire_count++;
#endif /* LWLOCK_STATS */
/*
* We can't wait if we haven't got a PGPROC. This should only occur
* during bootstrap or shared memory initialization. Put an Assert here
* to catch unsafe coding practices.
*/
Assert(!(proc == NULL && IsUnderPostmaster));
/* Ensure we will have room to remember the lock */
if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
elog(ERROR, "too many LWLocks taken");
/*
* Lock out cancel/die interrupts until we exit the code section protected
* by the LWLock. This ensures that interrupts will not interfere with
* manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
/*
* Loop here to try to acquire lock after each time we are signaled by
* LWLockRelease.
*
* NOTE: it might seem better to have LWLockRelease actually grant us the
* lock, rather than retrying and possibly having to go back to sleep. But
* in practice that is no good because it means a process swap for every
* lock acquisition when two or more processes are contending for the same
* lock. Since LWLocks are normally used to protect not-very-long
* sections of computation, a process needs to be able to acquire and
* release the same lock many times during a single CPU time slice, even
* in the presence of contention. The efficiency of being able to do that
* outweighs the inefficiency of sometimes wasting a process dispatch
* cycle because the lock is not free when a released waiter finally gets
* to run. See pgsql-hackers archives for 29-Dec-01.
*/
for (;;)
{
bool mustwait;
/*
* Try to grab the lock the first time, we're not in the waitqueue
* yet/anymore.
*/
mustwait = LWLockAttemptLock(lock, mode);
if (!mustwait)
{
LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock");
break; /* got the lock */
}
/*
* Ok, at this point we couldn't grab the lock on the first try. We
* cannot simply queue ourselves to the end of the list and wait to be
* woken up because by now the lock could long have been released.
* Instead add us to the queue and try to grab the lock again. If we
* succeed we need to revert the queuing and be happy, otherwise we
* recheck the lock. If we still couldn't grab it, we know that the
* other locker will see our queue entries when releasing since they
* existed before we checked for the lock.
*/
/* add to the queue */
LWLockQueueSelf(lock, mode);
/* we're now guaranteed to be woken up if necessary */
mustwait = LWLockAttemptLock(lock, mode);
/* ok, grabbed the lock the second time round, need to undo queueing */
if (!mustwait)
{
LOG_LWDEBUG("LWLockAcquire", lock, "acquired, undoing queue");
LWLockDequeueSelf(lock);
break;
}
/*
* Wait until awakened.
*
* It is possible that we get awakened for a reason other than being
* signaled by LWLockRelease. If so, loop back and wait again. Once
* we've gotten the LWLock, re-increment the sema by the number of
* additional signals received.
*/
LOG_LWDEBUG("LWLockAcquire", lock, "waiting");
#ifdef LWLOCK_STATS
lwstats->block_count++;
#endif
LWLockReportWaitStart(lock);
if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
for (;;)
{
PGSemaphoreLock(proc->sem);
if (proc->lwWaiting == LW_WS_NOT_WAITING)
break;
extraWaits++;
}
/* Retrying, allow LWLockRelease to release waiters again. */
pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_WAKE_IN_PROGRESS);
#ifdef LOCK_DEBUG
{
/* not waiting anymore */
uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
Assert(nwaiters < MAX_BACKENDS);
}
#endif
if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
LWLockReportWaitEnd();
LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
/* Now loop back and try to acquire lock again. */
result = false;
}
if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_ENABLED())
TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), mode);
/* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks].lock = lock;
held_lwlocks[num_held_lwlocks++].mode = mode;
/*
* Fix the process wait semaphore's count for any absorbed wakeups.
*/
while (extraWaits-- > 0)
PGSemaphoreUnlock(proc->sem);
return result;
}
/*
* LWLockConditionalAcquire - acquire a lightweight lock in the specified mode
*
* If the lock is not available, return false with no side-effects.
*
* If successful, cancel/die interrupts are held off until lock release.
*/
bool
LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
{
bool mustwait;
Assert(mode == LW_SHARED || mode == LW_EXCLUSIVE);
PRINT_LWDEBUG("LWLockConditionalAcquire", lock, mode);
/* Ensure we will have room to remember the lock */
if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
elog(ERROR, "too many LWLocks taken");
/*
* Lock out cancel/die interrupts until we exit the code section protected
* by the LWLock. This ensures that interrupts will not interfere with
* manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
/* Check for the lock */
mustwait = LWLockAttemptLock(lock, mode);
if (mustwait)
{
/* Failed to get lock, so release interrupt holdoff */
RESUME_INTERRUPTS();
LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL_ENABLED())
TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(T_NAME(lock), mode);
}
else
{
/* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks].lock = lock;
held_lwlocks[num_held_lwlocks++].mode = mode;
if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_ENABLED())
TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(T_NAME(lock), mode);
}
return !mustwait;
}
/*
* LWLockAcquireOrWait - Acquire lock, or wait until it's free
*
* The semantics of this function are a bit funky. If the lock is currently
* free, it is acquired in the given mode, and the function returns true. If
* the lock isn't immediately free, the function waits until it is released
* and returns false, but does not acquire the lock.
*
* This is currently used for WALWriteLock: when a backend flushes the WAL,
* holding WALWriteLock, it can flush the commit records of many other
* backends as a side-effect. Those other backends need to wait until the
* flush finishes, but don't need to acquire the lock anymore. They can just
* wake up, observe that their records have already been flushed, and return.
*/
bool
LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
{
PGPROC *proc = MyProc;
bool mustwait;
int extraWaits = 0;
#ifdef LWLOCK_STATS
lwlock_stats *lwstats;
lwstats = get_lwlock_stats_entry(lock);
#endif
Assert(mode == LW_SHARED || mode == LW_EXCLUSIVE);
PRINT_LWDEBUG("LWLockAcquireOrWait", lock, mode);
/* Ensure we will have room to remember the lock */
if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
elog(ERROR, "too many LWLocks taken");
/*
* Lock out cancel/die interrupts until we exit the code section protected
* by the LWLock. This ensures that interrupts will not interfere with
* manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
/*
* NB: We're using nearly the same twice-in-a-row lock acquisition
* protocol as LWLockAcquire(). Check its comments for details.
*/
mustwait = LWLockAttemptLock(lock, mode);
if (mustwait)
{
LWLockQueueSelf(lock, LW_WAIT_UNTIL_FREE);
mustwait = LWLockAttemptLock(lock, mode);
if (mustwait)
{
/*
* Wait until awakened. Like in LWLockAcquire, be prepared for
* bogus wakeups.
*/
LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
#ifdef LWLOCK_STATS
lwstats->block_count++;
#endif
LWLockReportWaitStart(lock);
if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
for (;;)
{
PGSemaphoreLock(proc->sem);
if (proc->lwWaiting == LW_WS_NOT_WAITING)
break;
extraWaits++;
}
#ifdef LOCK_DEBUG
{
/* not waiting anymore */
uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
Assert(nwaiters < MAX_BACKENDS);
}
#endif
if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
LWLockReportWaitEnd();
LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
}
else
{
LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
/*
* Got lock in the second attempt, undo queueing. We need to treat
* this as having successfully acquired the lock, otherwise we'd
* not necessarily wake up people we've prevented from acquiring
* the lock.
*/
LWLockDequeueSelf(lock);
}
}
/*
* Fix the process wait semaphore's count for any absorbed wakeups.
*/
while (extraWaits-- > 0)
PGSemaphoreUnlock(proc->sem);
if (mustwait)
{
/* Failed to get lock, so release interrupt holdoff */
RESUME_INTERRUPTS();
LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL_ENABLED())
TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(T_NAME(lock), mode);
}
else
{
LOG_LWDEBUG("LWLockAcquireOrWait", lock, "succeeded");
/* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks].lock = lock;
held_lwlocks[num_held_lwlocks++].mode = mode;
if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_ENABLED())
TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(T_NAME(lock), mode);
}
return !mustwait;
}
/*
* Does the lwlock in its current state need to wait for the variable value to
* change?
*
* If we don't need to wait, and it's because the value of the variable has
* changed, store the current value in newval.
*
* *result is set to true if the lock was free, and false otherwise.
*/
static bool
LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,
uint64 *newval, bool *result)
{
bool mustwait;
uint64 value;
/*
* Test first to see if it the slot is free right now.
*
* XXX: the unique caller of this routine, WaitXLogInsertionsToFinish()
* via LWLockWaitForVar(), uses an implied barrier with a spinlock before
* this, so we don't need a memory barrier here as far as the current
* usage is concerned. But that might not be safe in general.
*/
mustwait = (pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) != 0;
if (!mustwait)
{
*result = true;
return false;
}
*result = false;
/*
* Reading this value atomically is safe even on platforms where uint64
* cannot be read without observing a torn value.
*/
value = pg_atomic_read_u64(valptr);
if (value != oldval)
{
mustwait = false;
*newval = value;
}
else
{
mustwait = true;
}
return mustwait;
}
/*
* LWLockWaitForVar - Wait until lock is free, or a variable is updated.
*
* If the lock is held and *valptr equals oldval, waits until the lock is
* either freed, or the lock holder updates *valptr by calling
* LWLockUpdateVar. If the lock is free on exit (immediately or after
* waiting), returns true. If the lock is still held, but *valptr no longer
* matches oldval, returns false and sets *newval to the current value in
* *valptr.
*
* Note: this function ignores shared lock holders; if the lock is held
* in shared mode, returns 'true'.
*
* Be aware that LWLockConflictsWithVar() does not include a memory barrier,
* hence the caller of this function may want to rely on an explicit barrier or
* an implied barrier via spinlock or LWLock to avoid memory ordering issues.
*/
bool
LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,
uint64 *newval)
{
PGPROC *proc = MyProc;
int extraWaits = 0;
bool result = false;
#ifdef LWLOCK_STATS
lwlock_stats *lwstats;
lwstats = get_lwlock_stats_entry(lock);
#endif
PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
/*
* Lock out cancel/die interrupts while we sleep on the lock. There is no
* cleanup mechanism to remove us from the wait queue if we got
* interrupted.
*/
HOLD_INTERRUPTS();
/*
* Loop here to check the lock's status after each time we are signaled.
*/
for (;;)
{
bool mustwait;
mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
&result);
if (!mustwait)
break; /* the lock was free or value didn't match */
/*
* Add myself to wait queue. Note that this is racy, somebody else
* could wakeup before we're finished queuing. NB: We're using nearly
* the same twice-in-a-row lock acquisition protocol as
* LWLockAcquire(). Check its comments for details. The only
* difference is that we also have to check the variable's values when
* checking the state of the lock.
*/
LWLockQueueSelf(lock, LW_WAIT_UNTIL_FREE);
/*
* Clear LW_FLAG_WAKE_IN_PROGRESS flag, to make sure we get woken up
* as soon as the lock is released.
*/
pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_WAKE_IN_PROGRESS);
/*
* We're now guaranteed to be woken up if necessary. Recheck the lock
* and variables state.
*/
mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
&result);
/* Ok, no conflict after we queued ourselves. Undo queueing. */
if (!mustwait)
{
LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue");
LWLockDequeueSelf(lock);
break;
}
/*
* Wait until awakened.
*
* It is possible that we get awakened for a reason other than being
* signaled by LWLockRelease. If so, loop back and wait again. Once
* we've gotten the LWLock, re-increment the sema by the number of
* additional signals received.
*/
LOG_LWDEBUG("LWLockWaitForVar", lock, "waiting");
#ifdef LWLOCK_STATS
lwstats->block_count++;
#endif
LWLockReportWaitStart(lock);
if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), LW_EXCLUSIVE);
for (;;)
{
PGSemaphoreLock(proc->sem);
if (proc->lwWaiting == LW_WS_NOT_WAITING)
break;
extraWaits++;
}
#ifdef LOCK_DEBUG
{
/* not waiting anymore */
uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
Assert(nwaiters < MAX_BACKENDS);
}
#endif
if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), LW_EXCLUSIVE);
LWLockReportWaitEnd();
LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
/* Now loop back and check the status of the lock again. */
}
/*
* Fix the process wait semaphore's count for any absorbed wakeups.
*/
while (extraWaits-- > 0)
PGSemaphoreUnlock(proc->sem);
/*
* Now okay to allow cancel/die interrupts.
*/
RESUME_INTERRUPTS();
return result;
}
/*
* LWLockUpdateVar - Update a variable and wake up waiters atomically
*
* Sets *valptr to 'val', and wakes up all processes waiting for us with
* LWLockWaitForVar(). It first sets the value atomically and then wakes up
* waiting processes so that any process calling LWLockWaitForVar() on the same
* lock is guaranteed to see the new value, and act accordingly.
*
* The caller must be holding the lock in exclusive mode.
*/
void
LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
{
proclist_head wakeup;
proclist_mutable_iter iter;
PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
/*
* Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
* that the variable is updated before waking up waiters.
*/
pg_atomic_exchange_u64(valptr, val);
proclist_init(&wakeup);
LWLockWaitListLock(lock);
Assert(pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE);
/*
* See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
* up. They are always in the front of the queue.
*/
proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
{
PGPROC *waiter = GetPGProcByNumber(iter.cur);
if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
break;
proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
/* see LWLockWakeup() */
Assert(waiter->lwWaiting == LW_WS_WAITING);
waiter->lwWaiting = LW_WS_PENDING_WAKEUP;
}
/* We are done updating shared state of the lock itself. */
LWLockWaitListUnlock(lock);
/*
* Awaken any waiters I removed from the queue.
*/
proclist_foreach_modify(iter, &wakeup, lwWaitLink)
{
PGPROC *waiter = GetPGProcByNumber(iter.cur);
proclist_delete(&wakeup, iter.cur, lwWaitLink);
/* check comment in LWLockWakeup() about this barrier */
pg_write_barrier();
waiter->lwWaiting = LW_WS_NOT_WAITING;
PGSemaphoreUnlock(waiter->sem);
}
}
/*
* LWLockRelease - release a previously acquired lock
*
* NB: This will leave lock->owner pointing to the current backend (if
* LOCK_DEBUG is set). This is somewhat intentional, as it makes it easier to
* debug cases of missing wakeups during lock release.
*/
void
LWLockRelease(LWLock *lock)
{
LWLockMode mode;
uint32 oldstate;
bool check_waiters;
int i;
/*
* Remove lock from list of locks held. Usually, but not always, it will
* be the latest-acquired lock; so search array backwards.
*/
for (i = num_held_lwlocks; --i >= 0;)
if (lock == held_lwlocks[i].lock)
break;
if (i < 0)
elog(ERROR, "lock %s is not held", T_NAME(lock));
mode = held_lwlocks[i].mode;
num_held_lwlocks--;
for (; i < num_held_lwlocks; i++)
held_lwlocks[i] = held_lwlocks[i + 1];
PRINT_LWDEBUG("LWLockRelease", lock, mode);
/*
* Release my hold on lock, after that it can immediately be acquired by
* others, even if we still have to wakeup other waiters.
*/
if (mode == LW_EXCLUSIVE)
oldstate = pg_atomic_sub_fetch_u32(&lock->state, LW_VAL_EXCLUSIVE);
else
oldstate = pg_atomic_sub_fetch_u32(&lock->state, LW_VAL_SHARED);
/* nobody else can have that kind of lock */
Assert(!(oldstate & LW_VAL_EXCLUSIVE));
if (TRACE_POSTGRESQL_LWLOCK_RELEASE_ENABLED())
TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(lock));
/*
* Check if we're still waiting for backends to get scheduled, if so,
* don't wake them up again.
*/
if ((oldstate & LW_FLAG_HAS_WAITERS) &&
!(oldstate & LW_FLAG_WAKE_IN_PROGRESS) &&
(oldstate & LW_LOCK_MASK) == 0)
check_waiters = true;
else
check_waiters = false;
/*
* As waking up waiters requires the spinlock to be acquired, only do so
* if necessary.
*/
if (check_waiters)
{
/* XXX: remove before commit? */
LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters");
LWLockWakeup(lock);
}
/*
* Now okay to allow cancel/die interrupts.
*/
RESUME_INTERRUPTS();
}
/*
* LWLockReleaseClearVar - release a previously acquired lock, reset variable
*/
void
LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
{
/*
* Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
* that the variable is updated before releasing the lock.
*/
pg_atomic_exchange_u64(valptr, val);
LWLockRelease(lock);
}
/*
* LWLockReleaseAll - release all currently-held locks
*
* Used to clean up after ereport(ERROR). An important difference between this
* function and retail LWLockRelease calls is that InterruptHoldoffCount is
* unchanged by this operation. This is necessary since InterruptHoldoffCount
* has been set to an appropriate level earlier in error recovery. We could
* decrement it below zero if we allow it to drop for each released lock!
*
* Note that this function must be safe to call even before the LWLock
* subsystem has been initialized (e.g., during early startup failures).
* In that case, num_held_lwlocks will be 0 and we do nothing.
*/
void
LWLockReleaseAll(void)
{
while (num_held_lwlocks > 0)
{
HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
LWLockRelease(held_lwlocks[num_held_lwlocks - 1].lock);
}
Assert(num_held_lwlocks == 0);
}
/*
* LWLockHeldByMe - test whether my process holds a lock in any mode
*
* This is meant as debug support only.
*/
bool
LWLockHeldByMe(LWLock *lock)
{
int i;
for (i = 0; i < num_held_lwlocks; i++)
{
if (held_lwlocks[i].lock == lock)
return true;
}
return false;
}
/*
* LWLockAnyHeldByMe - test whether my process holds any of an array of locks
*
* This is meant as debug support only.
*/
bool
LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride)
{
char *held_lock_addr;
char *begin;
char *end;
int i;
begin = (char *) lock;
end = begin + nlocks * stride;
for (i = 0; i < num_held_lwlocks; i++)
{
held_lock_addr = (char *) held_lwlocks[i].lock;
if (held_lock_addr >= begin &&
held_lock_addr < end &&
(held_lock_addr - begin) % stride == 0)
return true;
}
return false;
}
/*
* LWLockHeldByMeInMode - test whether my process holds a lock in given mode
*
* This is meant as debug support only.
*/
bool
LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
{
int i;
for (i = 0; i < num_held_lwlocks; i++)
{
if (held_lwlocks[i].lock == lock && held_lwlocks[i].mode == mode)
return true;
}
return false;
}
|
c
|
github
|
https://github.com/postgres/postgres
|
src/backend/storage/lmgr/lwlock.c
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
import unittest
from telemetry.core import platform as platform_module
class PlatformBackendTest(unittest.TestCase):
def testPowerMonitoringSync(self):
# Tests that the act of monitoring power doesn't blow up.
platform = platform_module.GetHostPlatform()
can_monitor_power = platform.CanMonitorPower()
self.assertIsInstance(can_monitor_power, bool)
if not can_monitor_power:
logging.warning('Test not supported on this platform.')
return
browser_mock = lambda: None
# Android needs to access the package of the monitored app.
if platform.GetOSName() == 'android':
# pylint: disable=W0212
browser_mock._browser_backend = lambda: None
# Monitor the launcher, which is always present.
browser_mock._browser_backend.package = 'com.android.launcher'
platform.StartMonitoringPower(browser_mock)
time.sleep(0.001)
output = platform.StopMonitoringPower()
self.assertTrue(output.has_key('energy_consumption_mwh'))
self.assertTrue(output.has_key('identifier'))
|
unknown
|
codeparrot/codeparrot-clean
| ||
package dep
type Interface interface {
N([]byte)
}
|
go
|
github
|
https://github.com/golang/go
|
src/cmd/api/testdata/src/issue21181/dep/p.go
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
import sys
from twisted.trial.unittest import TestCase
class GtkReactorDeprecation(TestCase):
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
class StubGTK:
class GDK:
INPUT_READ = None
def input_add(self, *params):
pass
class StubPyGTK:
def require(self, something):
pass
def setUp(self):
"""
Create a stub for the module 'gtk' if it does not exist, so that it can
be imported without errors or warnings.
"""
self.mods = sys.modules.copy()
sys.modules['gtk'] = self.StubGTK()
sys.modules['pygtk'] = self.StubPyGTK()
def tearDown(self):
"""
Return sys.modules to the way it was before the test.
"""
sys.modules.clear()
sys.modules.update(self.mods)
def lookForDeprecationWarning(self, testmethod, attributeName):
warningsShown = self.flushWarnings([testmethod])
self.assertEqual(len(warningsShown), 1)
self.assertIs(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
warningsShown[0]['message'],
"twisted.internet.gtkreactor." + attributeName + " "
"was deprecated in Twisted 10.1.0: All new applications should be "
"written with gtk 2.x, which is supported by "
"twisted.internet.gtk2reactor.")
def test_gtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.GtkReactor();
self.lookForDeprecationWarning(self.test_gtkReactor, "GtkReactor")
def test_portableGtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.PortableGtkReactor()
self.lookForDeprecationWarning(self.test_portableGtkReactor,
"PortableGtkReactor")
def test_install(self):
"""
Test deprecation of L{gtkreactor.install}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.install)
self.lookForDeprecationWarning(self.test_install, "install")
def test_portableInstall(self):
"""
Test deprecation of L{gtkreactor.portableInstall}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.portableInstall)
self.lookForDeprecationWarning(self.test_portableInstall,
"portableInstall")
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_LOSS_H_
#define TENSORFLOW_CORE_KERNELS_LOSS_H_
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class DualLossUpdater {
public:
virtual ~DualLossUpdater() {}
// Compute update dual (alpha), based on a single example. Various strategies
// can be employed here, like newton step and/or line search or approximate
// step that decreases the dual sub-optimality.
virtual double ComputeUpdatedDual(
const int num_loss_partitions, const double label,
const double example_weight, const double current_dual, const double wx,
const double weighted_example_norm) const = 0;
// Compute dual loss based on the current dual (alpha), example label (y)
// and example weight (cost).
virtual double ComputeDualLoss(const double current_dual,
const double example_label,
const double example_weight) const = 0;
// Compute the primal loss based on current estimate of log-odds(wx),
// example label (y) and example weight (cost).
virtual double ComputePrimalLoss(const double wx, const double example_label,
const double example_weight) const = 0;
// Primal loss derivative used to compute the dual residue in AdaSDCA
virtual double PrimalLossDerivative(const double wx,
const double example_label,
const double example_weight) const = 0;
// This is gamma such that the loss derivative is 1/gamma Lipschitz
virtual double SmoothnessConstant() const = 0;
// Converts binary example labels from 0.0 or 1.0 to appropriate range for
// each loss function.
virtual absl::Status ConvertLabel(float* const example_label) const = 0;
};
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_LOSS_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/core/kernels/loss.h
|
# (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for
:func:`iris.fileformats.grib._save_rules.set_fixed_surfaces`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import gribapi
import numpy as np
import iris.cube
import iris.coords
from iris.fileformats.grib._save_rules import set_fixed_surfaces
class Test(tests.IrisTest):
def test_bounded_altitude_feet(self):
cube = iris.cube.Cube([0])
cube.add_aux_coord(iris.coords.AuxCoord(
1500.0, long_name='altitude', units='ft',
bounds=np.array([1000.0, 2000.0])))
grib = gribapi.grib_new_from_samples("GRIB2")
set_fixed_surfaces(cube, grib)
self.assertEqual(
gribapi.grib_get_double(grib, "scaledValueOfFirstFixedSurface"),
304.0)
self.assertEqual(
gribapi.grib_get_double(grib, "scaledValueOfSecondFixedSurface"),
609.0)
self.assertEqual(
gribapi.grib_get_long(grib, "typeOfFirstFixedSurface"),
102)
self.assertEqual(
gribapi.grib_get_long(grib, "typeOfSecondFixedSurface"),
102)
def test_theta_level(self):
cube = iris.cube.Cube([0])
cube.add_aux_coord(iris.coords.AuxCoord(
230.0, standard_name='air_potential_temperature',
units='K', attributes={'positive': 'up'},
bounds=np.array([220.0, 240.0])))
grib = gribapi.grib_new_from_samples("GRIB2")
set_fixed_surfaces(cube, grib)
self.assertEqual(
gribapi.grib_get_double(grib, "scaledValueOfFirstFixedSurface"),
220.0)
self.assertEqual(
gribapi.grib_get_double(grib, "scaledValueOfSecondFixedSurface"),
240.0)
self.assertEqual(
gribapi.grib_get_long(grib, "typeOfFirstFixedSurface"),
107)
self.assertEqual(
gribapi.grib_get_long(grib, "typeOfSecondFixedSurface"),
107)
if __name__ == "__main__":
tests.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
//===--- SILContext.h -------------------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2025 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_SIL_SILBRIDGINGCONTEXT_H
#define SWIFT_SIL_SILBRIDGINGCONTEXT_H
#include "swift/SIL/SILFunction.h"
#include "swift/SIL/BasicBlockBits.h"
#include "swift/SIL/NodeBits.h"
#include "swift/SIL/OperandBits.h"
namespace swift {
class SILSSAUpdater;
/// The abstract base class for the C++ implementation of Context in SwiftCompilerSources.
/// Referenced in BridgedContext.
///
/// Everything which only needs SIL is directly implemented in this class.
/// Everything which needs the SILPassManager is implemented in the derived class SwiftPassInvocation.
///
class SILContext {
public:
enum NotificationKind : unsigned {
Nothing = 0,
Instructions = 0x1,
Calls = 0x2,
Branches = 0x4,
Effects = 0x8,
FunctionTables = 0x10
};
protected:
SILContext(SILFunction *f) : function(f), mod(&f->getModule()) {}
SILContext(SILModule *mod) : mod(mod) {}
virtual ~SILContext();
/// The currently optimized function or null if this is the context of a module pass.
SILFunction *function = nullptr;
SILModule *mod = nullptr;
/// All slabs, allocated by the pass.
SILModule::SlabList allocatedSlabs;
static constexpr int BlockSetCapacity = SILBasicBlock::numCustomBits;
char blockSetStorage[sizeof(BasicBlockSet) * BlockSetCapacity];
bool aliveBlockSets[BlockSetCapacity];
int numBlockSetsAllocated = 0;
static constexpr int NodeSetCapacity = SILNode::numCustomBits;
char nodeSetStorage[sizeof(NodeSet) * NodeSetCapacity];
bool aliveNodeSets[NodeSetCapacity];
int numNodeSetsAllocated = 0;
static constexpr int OperandSetCapacity = Operand::numCustomBits;
char operandSetStorage[sizeof(OperandSet) * OperandSetCapacity];
bool aliveOperandSets[OperandSetCapacity];
int numOperandSetsAllocated = 0;
int numClonersAllocated = 0;
SILSSAUpdater *ssaUpdater = nullptr;
SmallVector<SILPhiArgument *, 4> insertedPhisBySSAUpdater;
/// Change notifications, collected during a pass run.
NotificationKind changeNotifications = NotificationKind::Nothing;
void verifyEverythingIsCleared();
public:
SILModule *getModule() const { return mod; }
SILFunction *getFunction() const {
ASSERT(function != nullptr && "not in a function pass");
return function;
}
/// Called by the pass when changes are made to the SIL.
void notifyChanges(NotificationKind notification) {
changeNotifications = (NotificationKind)(changeNotifications | notification);
}
NotificationKind getChangeNotifications() const {
return changeNotifications;
}
FixedSizeSlab *allocSlab(FixedSizeSlab *afterSlab);
FixedSizeSlab *freeSlab(FixedSizeSlab *slab);
BasicBlockSet *allocBlockSet();
void freeBlockSet(BasicBlockSet *set);
NodeSet *allocNodeSet();
void freeNodeSet(NodeSet *set);
OperandSet *allocOperandSet();
void freeOperandSet(OperandSet *set);
void notifyNewCloner() { numClonersAllocated++; }
void notifyClonerDestroyed() { numClonersAllocated--; }
virtual void eraseInstruction(SILInstruction *inst, bool salvageDebugInfo) = 0;
virtual SILFunction *createEmptyFunction(StringRef name, ArrayRef<SILParameterInfo> params,
bool hasSelfParam, SILFunction *fromFn) = 0;
virtual void moveFunctionBody(SILFunction *sourceFn, SILFunction *destFn) = 0;
virtual SILFunction *lookupStdlibFunction(StringRef name) = 0;
// The SILSSAUpdater is implemented in the Optimizer. Therefore all the APIs need to take
// the indirection through virtual functions to SwiftPassInvocation.
virtual void initializeSSAUpdater(SILFunction *function, SILType type, ValueOwnershipKind ownership) = 0;
virtual void SSAUpdater_addAvailableValue(SILBasicBlock *block, SILValue value) = 0;
virtual SILValue SSAUpdater_getValueAtEndOfBlock(SILBasicBlock *block) = 0;
virtual SILValue SSAUpdater_getValueInMiddleOfBlock(SILBasicBlock *block) = 0;
ArrayRef<SILPhiArgument*> SSAUpdater_getInsertedPhis() { return insertedPhisBySSAUpdater; }
};
} // namespace swift
#endif
|
c
|
github
|
https://github.com/apple/swift
|
include/swift/SIL/SILContext.h
|
# This suite should not be run in evergreen, it is combined into
# sharding_max_mirroring_opportunistic_secondary_targeting_ese
base_suite: sharding
overrides:
- "encryption.testdata_ese"
excludes:
- "encryption.excluded_files"
|
unknown
|
github
|
https://github.com/mongodb/mongo
|
buildscripts/resmokeconfig/matrix_suites/mappings/sharding_ese.yml
|
"""
SMA Solar Webconnect interface.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.sma/
"""
import asyncio
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_HOST, CONF_PASSWORD, CONF_SCAN_INTERVAL)
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['pysma==0.1.3']
_LOGGER = logging.getLogger(__name__)
CONF_GROUP = 'group'
CONF_SENSORS = 'sensors'
CONF_CUSTOM = 'custom'
GROUP_INSTALLER = 'installer'
GROUP_USER = 'user'
GROUPS = [GROUP_USER, GROUP_INSTALLER]
SENSOR_OPTIONS = ['current_consumption', 'current_power', 'total_consumption',
'total_yield']
def _check_sensor_schema(conf):
"""Check sensors and attributes are valid."""
valid = list(conf[CONF_CUSTOM].keys())
valid.extend(SENSOR_OPTIONS)
for sensor, attrs in conf[CONF_SENSORS].items():
if sensor not in valid:
raise vol.Invalid("{} does not exist".format(sensor))
for attr in attrs:
if attr in valid:
continue
raise vol.Invalid("{} does not exist [{}]".format(attr, sensor))
return conf
PLATFORM_SCHEMA = vol.All(PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_GROUP, default=GROUPS[0]): vol.In(GROUPS),
vol.Required(CONF_SENSORS): vol.Schema({cv.slug: cv.ensure_list}),
vol.Optional(CONF_CUSTOM, default={}): vol.Schema({
cv.slug: {
vol.Required('key'): vol.All(str, vol.Length(min=13, max=13)),
vol.Required('unit'): str,
vol.Optional('factor', default=1): vol.Coerce(float),
}})
}, extra=vol.PREVENT_EXTRA), _check_sensor_schema)
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up SMA WebConnect sensor."""
import pysma
# Sensor_defs from the library
sensor_defs = dict(zip(SENSOR_OPTIONS, [
(pysma.KEY_CURRENT_CONSUMPTION_W, 'W', 1),
(pysma.KEY_CURRENT_POWER_W, 'W', 1),
(pysma.KEY_TOTAL_CONSUMPTION_KWH, 'kWh', 1000),
(pysma.KEY_TOTAL_YIELD_KWH, 'kWh', 1000)]))
# Sensor_defs from the custom config
for name, prop in config[CONF_CUSTOM].items():
if name in sensor_defs:
_LOGGER.warning("Custom sensor %s replace built-in sensor", name)
sensor_defs[name] = (prop['key'], prop['unit'], prop['factor'])
# Prepare all HASS sensor entities
hass_sensors = []
used_sensors = []
for name, attr in config[CONF_SENSORS].items():
hass_sensors.append(SMAsensor(name, attr, sensor_defs))
used_sensors.append(name)
used_sensors.extend(attr)
# Remove sensor_defs not in use
sensor_defs = {name: val for name, val in sensor_defs.items()
if name in used_sensors}
async_add_devices(hass_sensors)
# Init the SMA interface
session = async_get_clientsession(hass)
grp = {GROUP_INSTALLER: pysma.GROUP_INSTALLER,
GROUP_USER: pysma.GROUP_USER}[config[CONF_GROUP]]
sma = pysma.SMA(session, config[CONF_HOST], config[CONF_PASSWORD],
group=grp)
# Ensure we logout on shutdown
@asyncio.coroutine
def async_close_session(event):
"""Close the session."""
yield from sma.close_session()
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, async_close_session)
# Read SMA values periodically & update sensors
names_to_query = list(sensor_defs.keys())
keys_to_query = [sensor_defs[name][0] for name in names_to_query]
backoff = 0
@asyncio.coroutine
def async_sma(event):
"""Update all the SMA sensors."""
nonlocal backoff
if backoff > 1:
backoff -= 1
return
values = yield from sma.read(keys_to_query)
if values is None:
backoff = 3
return
values = [0 if val is None else val for val in values]
res = dict(zip(names_to_query, values))
res = {key: val // sensor_defs[key][2] for key, val in res.items()}
_LOGGER.debug("Update sensors %s %s %s", keys_to_query, values, res)
tasks = []
for sensor in hass_sensors:
task = sensor.async_update_values(res)
if task:
tasks.append(task)
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
interval = config.get(CONF_SCAN_INTERVAL) or timedelta(seconds=5)
async_track_time_interval(hass, async_sma, interval)
class SMAsensor(Entity):
"""Representation of a Bitcoin sensor."""
def __init__(self, sensor_name, attr, sensor_defs):
"""Initialize the sensor."""
self._name = sensor_name
self._key, self._unit_of_measurement, _ = sensor_defs[sensor_name]
self._state = None
self._sensor_defs = sensor_defs
self._attr = {att: "" for att in attr}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return self._attr
@property
def poll(self):
"""SMA sensors are updated & don't poll."""
return False
def async_update_values(self, key_values):
"""Update this sensor using the data."""
update = False
for key, val in self._attr.items():
newval = '{} {}'.format(key_values[key], self._sensor_defs[key][1])
if val != newval:
update = True
self._attr[key] = newval
new_state = key_values[self._name]
if new_state != self._state:
update = True
self._state = new_state
return self.async_update_ha_state() if update else None \
# pylint: disable=protected-access
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# subunit: extensions to Python unittest to get test results from subprocesses.
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Support for dealing with progress state."""
class ProgressModel(object):
"""A model of progress indicators as subunit defines it.
Instances of this class represent a single logical operation that is
progressing. The operation may have many steps, and some of those steps may
supply their own progress information. ProgressModel uses a nested concept
where the overall state can be pushed, creating new starting state, and
later pushed to return to the prior state. Many user interfaces will want
to display an overall summary though, and accordingly the pos() and width()
methods return overall summary information rather than information on the
current subtask.
The default state is 0/0 - indicating that the overall progress is unknown.
Anytime the denominator of pos/width is 0, rendering of a ProgressModel
should should take this into consideration.
:ivar: _tasks. This private attribute stores the subtasks. Each is a tuple:
pos, width, overall_numerator, overall_denominator. The overall fields
store the calculated overall numerator and denominator for the state
that was pushed.
"""
def __init__(self):
"""Create a ProgressModel.
The new model has no progress data at all - it will claim a summary
width of zero and position of 0.
"""
self._tasks = []
self.push()
def adjust_width(self, offset):
"""Adjust the with of the current subtask."""
self._tasks[-1][1] += offset
def advance(self):
"""Advance the current subtask."""
self._tasks[-1][0] += 1
def pop(self):
"""Pop a subtask off the ProgressModel.
See push for a description of how push and pop work.
"""
self._tasks.pop()
def pos(self):
"""Return how far through the operation has progressed."""
if not self._tasks:
return 0
task = self._tasks[-1]
if len(self._tasks) > 1:
# scale up the overall pos by the current task or preserve it if
# no current width is known.
offset = task[2] * (task[1] or 1)
else:
offset = 0
return offset + task[0]
def push(self):
"""Push a new subtask.
After pushing a new subtask, the overall progress hasn't changed. Calls
to adjust_width, advance, set_width will only after the progress within
the range that calling 'advance' would have before - the subtask
represents progressing one step in the earlier task.
Call pop() to restore the progress model to the state before push was
called.
"""
self._tasks.append([0, 0, self.pos(), self.width()])
def set_width(self, width):
"""Set the width of the current subtask."""
self._tasks[-1][1] = width
def width(self):
"""Return the total width of the operation."""
if not self._tasks:
return 0
task = self._tasks[-1]
if len(self._tasks) > 1:
# scale up the overall width by the current task or preserve it if
# no current width is known.
return task[3] * (task[1] or 1)
else:
return task[1]
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
import argparse
import re
def read_file(filename):
with open(filename) as f:
lines = f.readlines()
text = ''.join(map(lambda l: re.sub('//.*', '', l), lines))
return process(text)
def process(text):
text = remove_whitespaces(text)
text = re.sub('/\*.*\*/', '', text)
text = rename_variables_php(text)
return text
def remove_whitespaces(text):
res = ''
for i in range(1, len(text) - 1):
if text[i].isspace():
if not text[i - 1].isalnum() or not text[i + 1].isalnum():
continue
res += text[i]
return res
def rename_variables_php(text):
return re.sub('\$\w+', '$V', text)
def hash(text, b, mod):
h = 0
for c in text:
h = (h + ord(c))*b % mod
return h
def ngrams(text, n, b=224737, mod=104729):
h = hash(text[:n], b, mod)
hashes = [h]
for i in range(1, len(text) - n + 1):
h = (h - ord(text[i - 1]) * b**n + ord(text[i + n - 1])) * b % mod
hashes.append(h)
return hashes
def find_min(a):
m = min(a)
for j in range(len(a) - 1, -1, -1):
if a[j] == m:
return m, j
def winnowing(hashes, w):
m, i = find_min(hashes[:w])
fp = [m]
idx = [i]
for j in range(1, len(hashes) - w + 1):
if i < j:
m, i = find_min(hashes[j:(j + w)])
i += j
fp.append(m)
idx.append(i)
elif hashes[j + w - 1] <= m:
m, i = hashes[j + w - 1], j + w - 1
fp.append(m)
idx.append(i)
return fp, idx
def fingerprint(text, k, t):
return winnowing(ngrams(text, k), t - k + 1)
def similarity(text1, text2, k, t):
fp1, _ = fingerprint(text1, k, t)
fp2, _ = fingerprint(text2, k, t)
return score(fp1, fp2)
def score(X, Y):
m = len(X)
n = len(Y)
L = [[None]*(n + 1) for i in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0 :
L[i][j] = 0
elif X[i-1] == Y[j-1]:
L[i][j] = L[i-1][j-1] + 1
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
return L[m][n]
# def score(fp1, fp2):
# cnt = 0
# for h in fp1:
# cnt += fp2.count(h)
# return cnt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute a fingerprint of the file.')
parser.add_argument('file', type=str, help='input file')
parser.add_argument('--k', type=int, default=2, help='n-gram length (default: 2)')
parser.add_argument('--t', type=int, default=5, help='min length of the guaranteed match (default: 5)')
args = parser.parse_args()
k, t = args.k, args.t
text = read_file(args.file)
print(text)
# TODO(xlionell): Do some source code transformations
# fp, _ = fingerprint(text, k, t)
# print(fp)
# print('Fingerprint: ', fp)
# print('Indexes: ', idx)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import itertools
import calendar
import sys
import gc
import time
import math
from peewee import (InsertQuery, Check, CompositeKey, ForeignKeyField,
SmallIntegerField, IntegerField, CharField, DoubleField,
BooleanField, DateTimeField, fn, DeleteQuery, FloatField,
TextField, BigIntegerField, PrimaryKeyField,
JOIN, OperationalError)
from playhouse.flask_utils import FlaskDB
from playhouse.pool import PooledMySQLDatabase
from playhouse.shortcuts import RetryOperationalError, case
from playhouse.migrate import migrate, MySQLMigrator
from datetime import datetime, timedelta
from cachetools import TTLCache
from cachetools import cached
from timeit import default_timer
from .utils import (get_pokemon_name, get_pokemon_types,
get_args, cellid, in_radius, date_secs, clock_between,
get_move_name, get_move_damage, get_move_energy,
get_move_type, calc_pokemon_level, peewee_attr_to_col)
from .transform import transform_from_wgs_to_gcj, get_new_coords
from .customLog import printPokemon
from .account import check_login, setup_api, pokestop_spinnable, spin_pokestop
from .proxy import get_new_proxy
from .apiRequests import encounter
log = logging.getLogger(__name__)
args = get_args()
flaskDb = FlaskDB()
cache = TTLCache(maxsize=100, ttl=60 * 5)
db_schema_version = 30
class MyRetryDB(RetryOperationalError, PooledMySQLDatabase):
pass
# Reduction of CharField to fit max length inside 767 bytes for utf8mb4 charset
class Utf8mb4CharField(CharField):
def __init__(self, max_length=191, *args, **kwargs):
self.max_length = max_length
super(CharField, self).__init__(*args, **kwargs)
class UBigIntegerField(BigIntegerField):
db_field = 'bigint unsigned'
def init_database(app):
log.info('Connecting to MySQL database on %s:%i...',
args.db_host, args.db_port)
db = MyRetryDB(
args.db_name,
user=args.db_user,
password=args.db_pass,
host=args.db_host,
port=args.db_port,
stale_timeout=30,
max_connections=None,
charset='utf8mb4')
# Using internal method as the other way would be using internal var, we
# could use initializer but db is initialized later
flaskDb._load_database(app, db)
if app is not None:
flaskDb._register_handlers(app)
return db
class BaseModel(flaskDb.Model):
@classmethod
def database(cls):
return cls._meta.database
@classmethod
def get_all(cls):
return [m for m in cls.select().dicts()]
class LatLongModel(BaseModel):
@classmethod
def get_all(cls):
results = [m for m in cls.select().dicts()]
if args.china:
for result in results:
result['latitude'], result['longitude'] = \
transform_from_wgs_to_gcj(
result['latitude'], result['longitude'])
return results
class Pokemon(LatLongModel):
# We are base64 encoding the ids delivered by the api
# because they are too big for sqlite to handle.
encounter_id = UBigIntegerField(primary_key=True)
spawnpoint_id = UBigIntegerField(index=True)
pokemon_id = SmallIntegerField(index=True)
latitude = DoubleField()
longitude = DoubleField()
disappear_time = DateTimeField()
individual_attack = SmallIntegerField(null=True)
individual_defense = SmallIntegerField(null=True)
individual_stamina = SmallIntegerField(null=True)
move_1 = SmallIntegerField(null=True)
move_2 = SmallIntegerField(null=True)
cp = SmallIntegerField(null=True)
cp_multiplier = FloatField(null=True)
weight = FloatField(null=True)
height = FloatField(null=True)
gender = SmallIntegerField(null=True)
costume = SmallIntegerField(null=True)
form = SmallIntegerField(null=True)
weather_boosted_condition = SmallIntegerField(null=True)
last_modified = DateTimeField(
null=True, index=True, default=datetime.utcnow)
class Meta:
indexes = (
(('latitude', 'longitude'), False),
(('disappear_time', 'pokemon_id'), False)
)
@staticmethod
def get_active(swLat, swLng, neLat, neLng, timestamp=0, oSwLat=None,
oSwLng=None, oNeLat=None, oNeLng=None):
now_date = datetime.utcnow()
query = Pokemon.select()
if not (swLat and swLng and neLat and neLng):
query = (query
.where(Pokemon.disappear_time > now_date)
.dicts())
elif timestamp > 0:
# If timestamp is known only load modified Pokemon.
query = (query
.where(((Pokemon.last_modified >
datetime.utcfromtimestamp(timestamp / 1000)) &
(Pokemon.disappear_time > now_date)) &
((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng)))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng:
# Send Pokemon in view but exclude those within old boundaries.
# Only send newly uncovered Pokemon.
query = (query
.where(((Pokemon.disappear_time > now_date) &
(((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))) &
~((Pokemon.disappear_time > now_date) &
(Pokemon.latitude >= oSwLat) &
(Pokemon.longitude >= oSwLng) &
(Pokemon.latitude <= oNeLat) &
(Pokemon.longitude <= oNeLng))))
.dicts())
else:
query = (Pokemon
.select()
# Add 1 hour buffer to include spawnpoints that persist
# after tth, like shsh.
.where((Pokemon.disappear_time > now_date) &
(((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))))
.dicts())
return list(query)
@staticmethod
def get_active_by_id(ids, swLat, swLng, neLat, neLng):
if not (swLat and swLng and neLat and neLng):
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()))
.dicts())
else:
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()) &
(Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))
.dicts())
return list(query)
# Get all Pokémon spawn counts based on the last x hours.
# More efficient than get_seen(): we don't do any unnecessary mojo.
# Returns a dict:
# { 'pokemon': [ {'pokemon_id': '', 'count': 1} ], 'total': 1 }.
@staticmethod
def get_spawn_counts(hours):
query = (Pokemon
.select(Pokemon.pokemon_id,
fn.Count(Pokemon.pokemon_id).alias('count')))
# Allow 0 to query everything.
if hours:
hours = datetime.utcnow() - timedelta(hours=hours)
# Not using WHERE speeds up the query.
query = query.where(Pokemon.disappear_time > hours)
query = query.group_by(Pokemon.pokemon_id).dicts()
# We need a total count. Use reduce() instead of sum() for O(n)
# instead of O(2n) caused by list comprehension.
total = reduce(lambda x, y: x + y['count'], query, 0)
return {'pokemon': query, 'total': total}
@staticmethod
@cached(cache)
def get_seen(timediff):
if timediff:
timediff = datetime.utcnow() - timedelta(hours=timediff)
# Note: pokemon_id+0 forces SQL to ignore the pokemon_id index
# and should use the disappear_time index and hopefully
# improve performance
pokemon_count_query = (Pokemon
.select((Pokemon.pokemon_id+0).alias(
'pokemon_id'),
fn.COUNT((Pokemon.pokemon_id+0)).alias(
'count'),
fn.MAX(Pokemon.disappear_time).alias(
'lastappeared')
)
.where(Pokemon.disappear_time > timediff)
.group_by((Pokemon.pokemon_id+0))
.alias('counttable')
)
query = (Pokemon
.select(Pokemon.pokemon_id,
Pokemon.disappear_time,
Pokemon.latitude,
Pokemon.longitude,
pokemon_count_query.c.count)
.join(pokemon_count_query,
on=(Pokemon.pokemon_id ==
pokemon_count_query.c.pokemon_id))
.distinct()
.where(Pokemon.disappear_time ==
pokemon_count_query.c.lastappeared)
.dicts()
)
# Performance: disable the garbage collector prior to creating a
# (potentially) large dict with append().
gc.disable()
pokemon = []
total = 0
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
pokemon.append(p)
total += p['count']
# Re-enable the GC.
gc.enable()
return {'pokemon': pokemon, 'total': total}
@staticmethod
def get_appearances(pokemon_id, timediff):
'''
:param pokemon_id: id of Pokemon that we need appearances for
:param timediff: limiting period of the selection
:return: list of Pokemon appearances over a selected period
'''
if timediff:
timediff = datetime.utcnow() - timedelta(hours=timediff)
query = (Pokemon
.select(Pokemon.latitude, Pokemon.longitude,
Pokemon.pokemon_id,
fn.Count(Pokemon.spawnpoint_id).alias('count'),
Pokemon.spawnpoint_id)
.where((Pokemon.pokemon_id == pokemon_id) &
(Pokemon.disappear_time > timediff)
)
.group_by(Pokemon.latitude, Pokemon.longitude,
Pokemon.pokemon_id, Pokemon.spawnpoint_id)
.dicts()
)
return list(query)
@staticmethod
def get_appearances_times_by_spawnpoint(pokemon_id, spawnpoint_id,
timediff):
'''
:param pokemon_id: id of Pokemon that we need appearances times for.
:param spawnpoint_id: spawnpoint id we need appearances times for.
:param timediff: limiting period of the selection.
:return: list of time appearances over a selected period.
'''
if timediff:
timediff = datetime.utcnow() - timedelta(hours=timediff)
query = (Pokemon
.select(Pokemon.disappear_time)
.where((Pokemon.pokemon_id == pokemon_id) &
(Pokemon.spawnpoint_id == spawnpoint_id) &
(Pokemon.disappear_time > timediff)
)
.order_by(Pokemon.disappear_time.asc())
.tuples()
)
return list(itertools.chain(*query))
class Pokestop(LatLongModel):
pokestop_id = Utf8mb4CharField(primary_key=True, max_length=50)
enabled = BooleanField()
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
lure_expiration = DateTimeField(null=True, index=True)
active_fort_modifier = SmallIntegerField(null=True, index=True)
last_updated = DateTimeField(
null=True, index=True, default=datetime.utcnow)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_stops(swLat, swLng, neLat, neLng, timestamp=0, oSwLat=None,
oSwLng=None, oNeLat=None, oNeLng=None, lured=False):
query = Pokestop.select(Pokestop.active_fort_modifier,
Pokestop.enabled, Pokestop.latitude,
Pokestop.longitude, Pokestop.last_modified,
Pokestop.lure_expiration, Pokestop.pokestop_id)
if not (swLat and swLng and neLat and neLng):
query = (query
.dicts())
elif timestamp > 0:
query = (query
.where(((Pokestop.last_updated >
datetime.utcfromtimestamp(timestamp / 1000))) &
(Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng and lured:
query = (query
.where((((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng)) &
(Pokestop.active_fort_modifier.is_null(False))) &
~((Pokestop.latitude >= oSwLat) &
(Pokestop.longitude >= oSwLng) &
(Pokestop.latitude <= oNeLat) &
(Pokestop.longitude <= oNeLng)) &
(Pokestop.active_fort_modifier.is_null(False)))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng:
# Send stops in view but exclude those within old boundaries. Only
# send newly uncovered stops.
query = (query
.where(((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng)) &
~((Pokestop.latitude >= oSwLat) &
(Pokestop.longitude >= oSwLng) &
(Pokestop.latitude <= oNeLat) &
(Pokestop.longitude <= oNeLng)))
.dicts())
elif lured:
query = (query
.where(((Pokestop.last_updated >
datetime.utcfromtimestamp(timestamp / 1000))) &
((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng)) &
(Pokestop.active_fort_modifier.is_null(False)))
.dicts())
else:
query = (query
.where((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng))
.dicts())
# Performance: disable the garbage collector prior to creating a
# (potentially) large dict with append().
gc.disable()
pokestops = []
for p in query:
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokestops.append(p)
# Re-enable the GC.
gc.enable()
return pokestops
class Gym(LatLongModel):
gym_id = Utf8mb4CharField(primary_key=True, max_length=50)
team_id = SmallIntegerField()
guard_pokemon_id = SmallIntegerField()
slots_available = SmallIntegerField()
enabled = BooleanField()
park = BooleanField(default=False)
latitude = DoubleField()
longitude = DoubleField()
total_cp = SmallIntegerField()
last_modified = DateTimeField(index=True)
last_scanned = DateTimeField(default=datetime.utcnow, index=True)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_gyms(swLat, swLng, neLat, neLng, timestamp=0, oSwLat=None,
oSwLng=None, oNeLat=None, oNeLng=None):
if not (swLat and swLng and neLat and neLng):
results = (Gym
.select()
.dicts())
elif timestamp > 0:
# If timestamp is known only send last scanned Gyms.
results = (Gym
.select()
.where(((Gym.last_scanned >
datetime.utcfromtimestamp(timestamp / 1000)) &
(Gym.latitude >= swLat) &
(Gym.longitude >= swLng) &
(Gym.latitude <= neLat) &
(Gym.longitude <= neLng)))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng:
# Send gyms in view but exclude those within old boundaries. Only
# send newly uncovered gyms.
results = (Gym
.select()
.where(((Gym.latitude >= swLat) &
(Gym.longitude >= swLng) &
(Gym.latitude <= neLat) &
(Gym.longitude <= neLng)) &
~((Gym.latitude >= oSwLat) &
(Gym.longitude >= oSwLng) &
(Gym.latitude <= oNeLat) &
(Gym.longitude <= oNeLng)))
.dicts())
else:
results = (Gym
.select()
.where((Gym.latitude >= swLat) &
(Gym.longitude >= swLng) &
(Gym.latitude <= neLat) &
(Gym.longitude <= neLng))
.dicts())
# Performance: disable the garbage collector prior to creating a
# (potentially) large dict with append().
gc.disable()
gyms = {}
gym_ids = []
for g in results:
g['name'] = None
g['pokemon'] = []
g['raid'] = None
gyms[g['gym_id']] = g
gym_ids.append(g['gym_id'])
if len(gym_ids) > 0:
pokemon = (GymMember
.select(
GymMember.gym_id,
GymPokemon.cp.alias('pokemon_cp'),
GymMember.cp_decayed,
GymMember.deployment_time,
GymMember.last_scanned,
GymPokemon.pokemon_id,
GymPokemon.costume,
GymPokemon.form,
GymPokemon.shiny)
.join(Gym, on=(GymMember.gym_id == Gym.gym_id))
.join(GymPokemon, on=(GymMember.pokemon_uid ==
GymPokemon.pokemon_uid))
.where(GymMember.gym_id << gym_ids)
.where(GymMember.last_scanned > Gym.last_modified)
.distinct()
.dicts())
for p in pokemon:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
gyms[p['gym_id']]['pokemon'].append(p)
details = (GymDetails
.select(
GymDetails.gym_id,
GymDetails.name)
.where(GymDetails.gym_id << gym_ids)
.dicts())
for d in details:
gyms[d['gym_id']]['name'] = d['name']
raids = (Raid
.select()
.where(Raid.gym_id << gym_ids)
.dicts())
for r in raids:
if r['pokemon_id']:
r['pokemon_name'] = get_pokemon_name(r['pokemon_id'])
r['pokemon_types'] = get_pokemon_types(r['pokemon_id'])
gyms[r['gym_id']]['raid'] = r
# Re-enable the GC.
gc.enable()
return gyms
@staticmethod
def get_gym(id):
try:
result = (Gym
.select(Gym.gym_id,
Gym.team_id,
GymDetails.name,
GymDetails.description,
Gym.guard_pokemon_id,
Gym.slots_available,
Gym.latitude,
Gym.longitude,
Gym.last_modified,
Gym.last_scanned,
Gym.total_cp)
.join(GymDetails, JOIN.LEFT_OUTER,
on=(Gym.gym_id == GymDetails.gym_id))
.where(Gym.gym_id == id)
.dicts()
.get())
except Gym.DoesNotExist:
return None
result['guard_pokemon_name'] = get_pokemon_name(
result['guard_pokemon_id']) if result['guard_pokemon_id'] else ''
result['pokemon'] = []
pokemon = (GymMember
.select(GymPokemon.cp.alias('pokemon_cp'),
GymMember.cp_decayed,
GymMember.deployment_time,
GymMember.last_scanned,
GymPokemon.pokemon_id,
GymPokemon.pokemon_uid,
GymPokemon.move_1,
GymPokemon.move_2,
GymPokemon.iv_attack,
GymPokemon.iv_defense,
GymPokemon.iv_stamina,
GymPokemon.costume,
GymPokemon.form,
GymPokemon.shiny)
.join(Gym, on=(GymMember.gym_id == Gym.gym_id))
.join(GymPokemon,
on=(GymMember.pokemon_uid == GymPokemon.pokemon_uid))
.where(GymMember.gym_id == id)
.where(GymMember.last_scanned > Gym.last_modified)
.order_by(GymMember.deployment_time.desc())
.distinct()
.dicts())
for p in pokemon:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
p['move_1_name'] = get_move_name(p['move_1'])
p['move_1_damage'] = get_move_damage(p['move_1'])
p['move_1_energy'] = get_move_energy(p['move_1'])
p['move_1_type'] = get_move_type(p['move_1'])
p['move_2_name'] = get_move_name(p['move_2'])
p['move_2_damage'] = get_move_damage(p['move_2'])
p['move_2_energy'] = get_move_energy(p['move_2'])
p['move_2_type'] = get_move_type(p['move_2'])
result['pokemon'].append(p)
try:
raid = Raid.select(Raid).where(Raid.gym_id == id).dicts().get()
if raid['pokemon_id']:
raid['pokemon_name'] = get_pokemon_name(raid['pokemon_id'])
raid['pokemon_types'] = get_pokemon_types(raid['pokemon_id'])
result['raid'] = raid
except Raid.DoesNotExist:
pass
return result
@staticmethod
def set_gyms_in_park(gyms, park):
gym_ids = [gym['gym_id'] for gym in gyms]
Gym.update(park=park).where(Gym.gym_id << gym_ids).execute()
@staticmethod
def get_gyms_park(id):
with Gym.database().execution_context():
gym_by_id = Gym.select(Gym.park).where(
Gym.gym_id == id).dicts()
if gym_by_id:
return gym_by_id[0]['park']
return False
class Raid(BaseModel):
gym_id = Utf8mb4CharField(primary_key=True, max_length=50)
level = IntegerField(index=True)
spawn = DateTimeField(index=True)
start = DateTimeField(index=True)
end = DateTimeField(index=True)
pokemon_id = SmallIntegerField(null=True)
cp = IntegerField(null=True)
move_1 = SmallIntegerField(null=True)
move_2 = SmallIntegerField(null=True)
last_scanned = DateTimeField(default=datetime.utcnow, index=True)
class LocationAltitude(LatLongModel):
cellid = UBigIntegerField(primary_key=True)
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True, default=datetime.utcnow,
null=True)
altitude = DoubleField()
class Meta:
indexes = ((('latitude', 'longitude'), False),)
# DB format of a new location altitude
@staticmethod
def new_loc(loc, altitude):
return {'cellid': cellid(loc),
'latitude': loc[0],
'longitude': loc[1],
'altitude': altitude}
# find a nearby altitude from the db
# looking for one within 140m
@staticmethod
def get_nearby_altitude(loc):
n, e, s, w = hex_bounds(loc, radius=0.14) # 140m
# Get all location altitudes in that box.
query = (LocationAltitude
.select()
.where((LocationAltitude.latitude <= n) &
(LocationAltitude.latitude >= s) &
(LocationAltitude.longitude >= w) &
(LocationAltitude.longitude <= e))
.dicts())
altitude = None
if len(list(query)):
altitude = query[0]['altitude']
return altitude
@staticmethod
def save_altitude(loc, altitude):
InsertQuery(
LocationAltitude,
rows=[LocationAltitude.new_loc(loc, altitude)]).upsert().execute()
class PlayerLocale(BaseModel):
location = Utf8mb4CharField(primary_key=True, max_length=50, index=True)
country = Utf8mb4CharField(max_length=2)
language = Utf8mb4CharField(max_length=2)
timezone = Utf8mb4CharField(max_length=50)
@staticmethod
def get_locale(location):
locale = None
with PlayerLocale.database().execution_context():
try:
query = PlayerLocale.get(PlayerLocale.location == location)
locale = {
'country': query.country,
'language': query.language,
'timezone': query.timezone
}
except PlayerLocale.DoesNotExist:
log.debug('This location is not yet in PlayerLocale DB table.')
return locale
class ScannedLocation(LatLongModel):
cellid = UBigIntegerField(primary_key=True)
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(
index=True, default=datetime.utcnow, null=True)
# Marked true when all five bands have been completed.
done = BooleanField(default=False)
# Five scans/hour is required to catch all spawns.
# Each scan must be at least 12 minutes from the previous check,
# with a 2 minute window during which the scan can be done.
# Default of -1 is for bands not yet scanned.
band1 = SmallIntegerField(default=-1)
band2 = SmallIntegerField(default=-1)
band3 = SmallIntegerField(default=-1)
band4 = SmallIntegerField(default=-1)
band5 = SmallIntegerField(default=-1)
# midpoint is the center of the bands relative to band 1.
# If band 1 is 10.4 minutes, and band 4 is 34.0 minutes, midpoint
# is -0.2 minutes in minsec. Extra 10 seconds in case of delay in
# recording now time.
midpoint = SmallIntegerField(default=0)
# width is how wide the valid window is. Default is 0, max is 2 minutes.
# If band 1 is 10.4 minutes, and band 4 is 34.0 minutes, midpoint
# is 0.4 minutes in minsec.
width = SmallIntegerField(default=0)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
constraints = [Check('band1 >= -1'), Check('band1 < 3600'),
Check('band2 >= -1'), Check('band2 < 3600'),
Check('band3 >= -1'), Check('band3 < 3600'),
Check('band4 >= -1'), Check('band4 < 3600'),
Check('band5 >= -1'), Check('band5 < 3600'),
Check('midpoint >= -130'), Check('midpoint <= 130'),
Check('width >= 0'), Check('width <= 130')]
@staticmethod
def get_recent(swLat, swLng, neLat, neLng, timestamp=0, oSwLat=None,
oSwLng=None, oNeLat=None, oNeLng=None):
activeTime = (datetime.utcnow() - timedelta(minutes=15))
if timestamp > 0:
query = (ScannedLocation
.select()
.where(((ScannedLocation.last_modified >=
datetime.utcfromtimestamp(timestamp / 1000))) &
(ScannedLocation.latitude >= swLat) &
(ScannedLocation.longitude >= swLng) &
(ScannedLocation.latitude <= neLat) &
(ScannedLocation.longitude <= neLng))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng:
# Send scannedlocations in view but exclude those within old
# boundaries. Only send newly uncovered scannedlocations.
query = (ScannedLocation
.select()
.where((((ScannedLocation.last_modified >= activeTime)) &
(ScannedLocation.latitude >= swLat) &
(ScannedLocation.longitude >= swLng) &
(ScannedLocation.latitude <= neLat) &
(ScannedLocation.longitude <= neLng)) &
~(((ScannedLocation.last_modified >= activeTime)) &
(ScannedLocation.latitude >= oSwLat) &
(ScannedLocation.longitude >= oSwLng) &
(ScannedLocation.latitude <= oNeLat) &
(ScannedLocation.longitude <= oNeLng)))
.dicts())
else:
query = (ScannedLocation
.select()
.where((ScannedLocation.last_modified >= activeTime) &
(ScannedLocation.latitude >= swLat) &
(ScannedLocation.longitude >= swLng) &
(ScannedLocation.latitude <= neLat) &
(ScannedLocation.longitude <= neLng))
.order_by(ScannedLocation.last_modified.asc())
.dicts())
return list(query)
# DB format of a new location.
@staticmethod
def new_loc(loc):
return {'cellid': cellid(loc),
'latitude': loc[0],
'longitude': loc[1],
'done': False,
'band1': -1,
'band2': -1,
'band3': -1,
'band4': -1,
'band5': -1,
'width': 0,
'midpoint': 0,
'last_modified': None}
# Used to update bands.
@staticmethod
def db_format(scan, band, nowms):
scan.update({'band' + str(band): nowms})
scan['done'] = reduce(lambda x, y: x and (
scan['band' + str(y)] > -1), range(1, 6), True)
return scan
# Shorthand helper for DB dict.
@staticmethod
def _q_init(scan, start, end, kind, sp_id=None):
return {'loc': scan['loc'], 'kind': kind, 'start': start, 'end': end,
'step': scan['step'], 'sp': sp_id}
@staticmethod
def get_by_cellids(cellids):
d = {}
with ScannedLocation.database().execution_context():
query = (ScannedLocation
.select()
.where(ScannedLocation.cellid << cellids)
.dicts())
for sl in list(query):
key = "{}".format(sl['cellid'])
d[key] = sl
return d
@staticmethod
def find_in_locs(loc, locs):
key = "{}".format(cellid(loc))
return locs[key] if key in locs else ScannedLocation.new_loc(loc)
# Return value of a particular scan from loc, or default dict if not found.
@staticmethod
def get_by_loc(loc):
with ScannedLocation.database().execution_context():
query = (ScannedLocation
.select()
.where(ScannedLocation.cellid == cellid(loc))
.dicts())
result = query[0] if len(
list(query)) else ScannedLocation.new_loc(loc)
return result
# Check if spawnpoints in a list are in any of the existing
# spannedlocation records. Otherwise, search through the spawnpoint list
# and update scan_spawn_point dict for DB bulk upserting.
@staticmethod
def link_spawn_points(scans, initial, spawn_points, distance):
index = 0
scan_spawn_point = {}
for cell, scan in scans.iteritems():
# Difference in degrees at the equator for 70m is actually 0.00063
# degrees and gets smaller the further north or south you go
deg_at_lat = 0.0007 / math.cos(math.radians(scan['loc'][0]))
for sp in spawn_points:
if (abs(sp['latitude'] - scan['loc'][0]) > 0.0008 or
abs(sp['longitude'] - scan['loc'][1]) > deg_at_lat):
continue
if in_radius((sp['latitude'], sp['longitude']),
scan['loc'], distance * 1000):
scan_spawn_point[index] = {
'spawnpoint': sp['id'],
'scannedlocation': cell}
index += 1
return scan_spawn_point
# Return list of dicts for upcoming valid band times.
@staticmethod
def linked_spawn_points(cell):
# Unable to use a normal join, since MySQL produces foreignkey
# constraint errors when trying to upsert fields that are foreignkeys
# on another table
with SpawnPoint.database().execution_context():
query = (SpawnPoint
.select()
.join(ScanSpawnPoint)
.join(ScannedLocation)
.where(ScannedLocation.cellid == cell).dicts())
result = list(query)
return result
# Return list of dicts for upcoming valid band times.
@staticmethod
def get_cell_to_linked_spawn_points(cellids, location_change_date):
# Get all spawnpoints from the hive's cells
sp_from_cells = (ScanSpawnPoint
.select(ScanSpawnPoint.spawnpoint)
.where(ScanSpawnPoint.scannedlocation << cellids)
.alias('spcells'))
# A new SL (new ones are created when the location changes) or
# it can be a cell from another active hive
one_sp_scan = (
ScanSpawnPoint.select(
ScanSpawnPoint.spawnpoint,
fn.MAX(ScanSpawnPoint.scannedlocation).alias('cellid'))
.join(
sp_from_cells,
on=sp_from_cells.c.spawnpoint_id == ScanSpawnPoint.spawnpoint)
.join(
ScannedLocation,
on=(ScannedLocation.cellid == ScanSpawnPoint.scannedlocation))
.where(((ScannedLocation.last_modified >= (location_change_date)) &
(ScannedLocation.last_modified >
(datetime.utcnow() - timedelta(minutes=60)))) | (
ScannedLocation.cellid << cellids))
.group_by(ScanSpawnPoint.spawnpoint).alias('maxscan'))
# As scan locations overlap,spawnpoints can belong to up to 3 locations
# This sub-query effectively assigns each SP to exactly one location.
ret = {}
with SpawnPoint.database().execution_context():
query = (SpawnPoint
.select(SpawnPoint, one_sp_scan.c.cellid)
.join(one_sp_scan, on=(SpawnPoint.id ==
one_sp_scan.c.spawnpoint_id))
.where(one_sp_scan.c.cellid << cellids)
.dicts())
spawns = list(query)
for item in spawns:
if item['cellid'] not in ret:
ret[item['cellid']] = []
ret[item['cellid']].append(item)
return ret
# Return list of dicts for upcoming valid band times.
@staticmethod
def get_times(scan, now_date, scanned_locations):
s = ScannedLocation.find_in_locs(scan['loc'], scanned_locations)
if s['done']:
return []
max = 3600 * 2 + 250 # Greater than maximum possible value.
min = {'end': max}
nowms = date_secs(now_date)
if s['band1'] == -1:
return [ScannedLocation._q_init(scan, nowms, nowms + 3599, 'band')]
# Find next window.
basems = s['band1']
for i in range(2, 6):
ms = s['band' + str(i)]
# Skip bands already done.
if ms > -1:
continue
radius = 120 - s['width'] / 2
end = (basems + s['midpoint'] + radius + (i - 1) * 720 - 10) % 3600
end = end if end >= nowms else end + 3600
if end < min['end']:
min = ScannedLocation._q_init(scan, end - radius * 2 + 10, end,
'band')
return [min] if min['end'] < max else []
# Checks if now falls within an unfilled band for a scanned location.
# Returns the updated scan location dict.
@staticmethod
def update_band(scan, now_date):
scan['last_modified'] = now_date
if scan['done']:
return scan
now_secs = date_secs(now_date)
if scan['band1'] == -1:
return ScannedLocation.db_format(scan, 1, now_secs)
# Calculate if number falls in band with remaining points.
basems = scan['band1']
delta = (now_secs - basems - scan['midpoint']) % 3600
band = int(round(delta / 12 / 60.0) % 5) + 1
# Check if that band is already filled.
if scan['band' + str(band)] > -1:
return scan
# Check if this result falls within the band's 2 minute window.
offset = (delta + 1080) % 720 - 360
if abs(offset) > 120 - scan['width'] / 2:
return scan
# Find band midpoint/width.
scan = ScannedLocation.db_format(scan, band, now_secs)
bts = [scan['band' + str(i)] for i in range(1, 6)]
bts = filter(lambda ms: ms > -1, bts)
bts_delta = map(lambda ms: (ms - basems) % 3600, bts)
bts_offsets = map(lambda ms: (ms + 1080) % 720 - 360, bts_delta)
min_scan = min(bts_offsets)
max_scan = max(bts_offsets)
scan['width'] = max_scan - min_scan
scan['midpoint'] = (max_scan + min_scan) / 2
return scan
@staticmethod
def get_bands_filled_by_cellids(cellids):
with SpawnPoint.database().execution_context():
result = int(
ScannedLocation.select(
fn.SUM(
case(ScannedLocation.band1, ((-1, 0),), 1) +
case(ScannedLocation.band2, ((-1, 0),), 1) + case(
ScannedLocation.band3, ((-1, 0),), 1) + case(
ScannedLocation.band4, ((-1, 0),), 1) + case(
ScannedLocation.band5, ((-1, 0),), 1))
.alias('band_count'))
.where(ScannedLocation.cellid << cellids).scalar() or 0)
return result
@staticmethod
def reset_bands(scan_loc):
scan_loc['done'] = False
scan_loc['last_modified'] = datetime.utcnow()
for i in range(1, 6):
scan_loc['band' + str(i)] = -1
@staticmethod
def select_in_hex(locs):
# There should be a way to delegate this to SpawnPoint.select_in_hex,
# but w/e.
cells = []
for i, e in enumerate(locs):
cells.append(cellid(e[1]))
in_hex = []
# Get all spawns for the locations.
with SpawnPoint.database().execution_context():
sp = list(ScannedLocation
.select()
.where(ScannedLocation.cellid << cells)
.dicts())
# For each spawn work out if it is in the hex
# (clipping the diagonals).
for spawn in sp:
in_hex.append(spawn)
return in_hex
class MainWorker(BaseModel):
worker_name = Utf8mb4CharField(primary_key=True, max_length=50)
message = TextField(null=True, default="")
method = Utf8mb4CharField(max_length=50)
last_modified = DateTimeField(index=True)
accounts_working = IntegerField()
accounts_captcha = IntegerField()
accounts_failed = IntegerField()
success = IntegerField(default=0)
fail = IntegerField(default=0)
empty = IntegerField(default=0)
skip = IntegerField(default=0)
captcha = IntegerField(default=0)
start = IntegerField(default=0)
elapsed = IntegerField(default=0)
@staticmethod
def get_account_stats(age_minutes=30):
stats = {'working': 0, 'captcha': 0, 'failed': 0}
timeout = datetime.utcnow() - timedelta(minutes=age_minutes)
with MainWorker.database().execution_context():
account_stats = (MainWorker
.select(fn.SUM(MainWorker.accounts_working),
fn.SUM(MainWorker.accounts_captcha),
fn.SUM(MainWorker.accounts_failed))
.where(MainWorker.last_modified >= timeout)
.scalar(as_tuple=True))
if account_stats[0] is not None:
stats.update({
'working': int(account_stats[0]),
'captcha': int(account_stats[1]),
'failed': int(account_stats[2])
})
return stats
@staticmethod
def get_recent(age_minutes=30):
status = []
timeout = datetime.utcnow() - timedelta(minutes=age_minutes)
try:
with MainWorker.database().execution_context():
query = (MainWorker
.select()
.where(MainWorker.last_modified >= timeout)
.order_by(MainWorker.worker_name.asc())
.dicts())
status = [dbmw for dbmw in query]
except Exception as e:
log.exception('Failed to retrieve main worker status: %s.', e)
return status
class WorkerStatus(LatLongModel):
username = Utf8mb4CharField(primary_key=True, max_length=50)
worker_name = Utf8mb4CharField(index=True, max_length=50)
success = IntegerField()
fail = IntegerField()
no_items = IntegerField()
skip = IntegerField()
captcha = IntegerField()
last_modified = DateTimeField(index=True)
message = Utf8mb4CharField(max_length=191)
last_scan_date = DateTimeField(index=True)
latitude = DoubleField(null=True)
longitude = DoubleField(null=True)
@staticmethod
def db_format(status, name='status_worker_db'):
status['worker_name'] = status.get('worker_name', name)
return {'username': status['username'],
'worker_name': status['worker_name'],
'success': status['success'],
'fail': status['fail'],
'no_items': status['noitems'],
'skip': status['skip'],
'captcha': status['captcha'],
'last_modified': datetime.utcnow(),
'message': status['message'],
'last_scan_date': status.get('last_scan_date',
datetime.utcnow()),
'latitude': status.get('latitude', None),
'longitude': status.get('longitude', None)}
@staticmethod
def get_recent(age_minutes=30):
status = []
timeout = datetime.utcnow() - timedelta(minutes=age_minutes)
try:
with WorkerStatus.database().execution_context():
query = (WorkerStatus
.select()
.where(WorkerStatus.last_modified >= timeout)
.order_by(WorkerStatus.username.asc())
.dicts())
status = [dbws for dbws in query]
except Exception as e:
log.exception('Failed to retrieve worker status: %s.', e)
return status
@staticmethod
def get_worker(username):
res = None
with WorkerStatus.database().execution_context():
try:
res = WorkerStatus.select().where(
WorkerStatus.username == username).dicts().get()
except WorkerStatus.DoesNotExist:
pass
return res
class SpawnPoint(LatLongModel):
id = UBigIntegerField(primary_key=True)
latitude = DoubleField()
longitude = DoubleField()
last_scanned = DateTimeField(index=True)
# kind gives the four quartiles of the spawn, as 's' for seen
# or 'h' for hidden. For example, a 30 minute spawn is 'hhss'.
kind = Utf8mb4CharField(max_length=4, default='hhhs')
# links shows whether a Pokemon encounter id changes between quartiles or
# stays the same. Both 1x45 and 1x60h3 have the kind of 'sssh', but the
# different links shows when the encounter id changes. Same encounter id
# is shared between two quartiles, links shows a '+'. A different
# encounter id between two quartiles is a '-'.
#
# For the hidden times, an 'h' is used. Until determined, '?' is used.
# Note index is shifted by a half. links[0] is the link between
# kind[0] and kind[1] and so on. links[3] is the link between
# kind[3] and kind[0]
links = Utf8mb4CharField(max_length=4, default='????')
# Count consecutive times spawn should have been seen, but wasn't.
# If too high, will not be scheduled for review, and treated as inactive.
missed_count = IntegerField(default=0)
# Next 2 fields are to narrow down on the valid TTH window.
# Seconds after the hour of the latest Pokemon seen time within the hour.
latest_seen = SmallIntegerField()
# Seconds after the hour of the earliest time Pokemon wasn't seen after an
# appearance.
earliest_unseen = SmallIntegerField()
class Meta:
indexes = ((('latitude', 'longitude'), False),)
constraints = [Check('earliest_unseen >= 0'),
Check('earliest_unseen <= 3600'),
Check('latest_seen >= 0'),
Check('latest_seen <= 3600')]
# Returns the spawnpoint dict from ID, or a new dict if not found.
@staticmethod
def get_by_id(id, latitude=0, longitude=0):
with SpawnPoint.database().execution_context():
query = (SpawnPoint
.select()
.where(SpawnPoint.id == id)
.dicts())
result = query[0] if query else {
'id': id,
'latitude': latitude,
'longitude': longitude,
'last_scanned': None, # Null value used as new flag.
'kind': 'hhhs',
'links': '????',
'missed_count': 0,
'latest_seen': 0,
'earliest_unseen': 0
}
return result
@staticmethod
def get_spawnpoints(swLat, swLng, neLat, neLng, timestamp=0,
oSwLat=None, oSwLng=None, oNeLat=None, oNeLng=None):
spawnpoints = {}
with SpawnPoint.database().execution_context():
query = (SpawnPoint.select(
SpawnPoint.latitude, SpawnPoint.longitude, SpawnPoint.id,
SpawnPoint.links, SpawnPoint.kind, SpawnPoint.latest_seen,
SpawnPoint.earliest_unseen, ScannedLocation.done)
.join(ScanSpawnPoint).join(ScannedLocation).dicts())
if timestamp > 0:
query = (
query.where(((SpawnPoint.last_scanned >
datetime.utcfromtimestamp(timestamp / 1000)))
& ((SpawnPoint.latitude >= swLat) &
(SpawnPoint.longitude >= swLng) &
(SpawnPoint.latitude <= neLat) &
(SpawnPoint.longitude <= neLng))).dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng:
# Send spawnpoints in view but exclude those within old
# boundaries. Only send newly uncovered spawnpoints.
query = (query
.where((((SpawnPoint.latitude >= swLat) &
(SpawnPoint.longitude >= swLng) &
(SpawnPoint.latitude <= neLat) &
(SpawnPoint.longitude <= neLng))) &
~((SpawnPoint.latitude >= oSwLat) &
(SpawnPoint.longitude >= oSwLng) &
(SpawnPoint.latitude <= oNeLat) &
(SpawnPoint.longitude <= oNeLng)))
.dicts())
elif swLat and swLng and neLat and neLng:
query = (query
.where((SpawnPoint.latitude <= neLat) &
(SpawnPoint.latitude >= swLat) &
(SpawnPoint.longitude >= swLng) &
(SpawnPoint.longitude <= neLng)))
queryDict = query.dicts()
for sp in queryDict:
key = sp['id']
appear_time, disappear_time = SpawnPoint.start_end(sp)
spawnpoints[key] = sp
spawnpoints[key]['disappear_time'] = disappear_time
spawnpoints[key]['appear_time'] = appear_time
if not SpawnPoint.tth_found(sp) or not sp['done']:
spawnpoints[key]['uncertain'] = True
# Helping out the GC.
for sp in spawnpoints.values():
del sp['done']
del sp['kind']
del sp['links']
del sp['latest_seen']
del sp['earliest_unseen']
return list(spawnpoints.values())
# Confirm if TTH has been found.
@staticmethod
def tth_found(sp):
# Fully identified if no '?' in links and
# latest_seen % 3600 == earliest_unseen % 3600.
# Warning: python uses modulo as the least residue, not as
# remainder, so we don't apply it to the result.
latest_seen = (sp['latest_seen'] % 3600)
earliest_unseen = (sp['earliest_unseen'] % 3600)
return latest_seen - earliest_unseen == 0
# Return [start, end] in seconds after the hour for the spawn, despawn
# time of a spawnpoint.
@staticmethod
def start_end(sp, spawn_delay=0, links=False):
links_arg = links
links = links if links else str(sp['links'])
if links == '????': # Clean up for old data.
links = str(sp['kind'].replace('s', '?'))
# Make some assumptions if link not fully identified.
if links.count('-') == 0:
links = links[:-1] + '-'
links = links.replace('?', '+')
links = links[:-1] + '-'
plus_or_minus = links.index('+') if links.count('+') else links.index(
'-')
start = sp['earliest_unseen'] - (4 - plus_or_minus) * 900 + spawn_delay
no_tth_adjust = 60 if not links_arg and not SpawnPoint.tth_found(
sp) else 0
end = sp['latest_seen'] - (3 - links.index('-')) * 900 + no_tth_adjust
return [start % 3600, end % 3600]
# Return a list of dicts with the next spawn times.
@staticmethod
def get_times(cell, scan, now_date, scan_delay,
cell_to_linked_spawn_points, sp_by_id):
result = []
now_secs = date_secs(now_date)
linked_spawn_points = (cell_to_linked_spawn_points[cell]
if cell in cell_to_linked_spawn_points else [])
for sp in linked_spawn_points:
if sp['missed_count'] > 5:
continue
endpoints = SpawnPoint.start_end(sp, scan_delay)
SpawnPoint.add_if_not_scanned('spawn', result, sp, scan,
endpoints[0], endpoints[1], now_date,
now_secs, sp_by_id)
# Check to see if still searching for valid TTH.
if SpawnPoint.tth_found(sp):
continue
# Add a spawnpoint check between latest_seen and earliest_unseen.
start = sp['latest_seen']
end = sp['earliest_unseen']
# So if the gap between start and end < 89 seconds make the gap
# 89 seconds
if ((end > start and end - start < 89) or
(start > end and (end + 3600) - start < 89)):
end = (start + 89) % 3600
# So we move the search gap on 45 to within 45 and 89 seconds from
# the last scan. TTH appears in the last 90 seconds of the Spawn.
start = sp['latest_seen'] + 45
SpawnPoint.add_if_not_scanned('TTH', result, sp, scan, start, end,
now_date, now_secs, sp_by_id)
return result
@staticmethod
def add_if_not_scanned(kind, l, sp, scan, start,
end, now_date, now_secs, sp_by_id):
# Make sure later than now_secs.
while end < now_secs:
start, end = start + 3600, end + 3600
# Ensure start before end.
while start > end:
start -= 3600
while start < 0:
start, end = start + 3600, end + 3600
last_scanned = sp_by_id[sp['id']]['last_scanned']
if ((now_date - last_scanned).total_seconds() > now_secs - start):
l.append(ScannedLocation._q_init(scan, start, end, kind, sp['id']))
@staticmethod
def select_in_hex_by_cellids(cellids, location_change_date):
# Get all spawnpoints from the hive's cells
sp_from_cells = (ScanSpawnPoint
.select(ScanSpawnPoint.spawnpoint)
.where(ScanSpawnPoint.scannedlocation << cellids)
.alias('spcells'))
# Allocate a spawnpoint to one cell only, this can either be
# A new SL (new ones are created when the location changes) or
# it can be a cell from another active hive
one_sp_scan = (ScanSpawnPoint
.select(ScanSpawnPoint.spawnpoint,
fn.MAX(ScanSpawnPoint.scannedlocation).alias(
'Max_ScannedLocation_id'))
.join(sp_from_cells, on=sp_from_cells.c.spawnpoint_id
== ScanSpawnPoint.spawnpoint)
.join(
ScannedLocation,
on=(ScannedLocation.cellid
== ScanSpawnPoint.scannedlocation))
.where(((ScannedLocation.last_modified
>= (location_change_date)) & (
ScannedLocation.last_modified > (
datetime.utcnow() - timedelta(minutes=60)))) |
(ScannedLocation.cellid << cellids))
.group_by(ScanSpawnPoint.spawnpoint)
.alias('maxscan'))
in_hex = []
with SpawnPoint.database().execution_context():
query = (SpawnPoint
.select(SpawnPoint)
.join(one_sp_scan,
on=(one_sp_scan.c.spawnpoint_id == SpawnPoint.id))
.where(one_sp_scan.c.Max_ScannedLocation_id << cellids)
.dicts())
for spawn in list(query):
in_hex.append(spawn)
return in_hex
@staticmethod
def select_in_hex_by_location(center, steps):
R = 6378.1 # KM radius of the earth
hdist = ((steps * 120.0) - 50.0) / 1000.0
n, e, s, w = hex_bounds(center, steps)
in_hex = []
# Get all spawns in that box.
with SpawnPoint.database().execution_context():
sp = list(SpawnPoint
.select()
.where((SpawnPoint.latitude <= n) &
(SpawnPoint.latitude >= s) &
(SpawnPoint.longitude >= w) &
(SpawnPoint.longitude <= e))
.dicts())
# For each spawn work out if it is in the hex
# (clipping the diagonals).
for spawn in sp:
# Get the offset from the center of each spawn in km.
offset = [math.radians(spawn['latitude'] - center[0]) * R,
math.radians(spawn['longitude'] - center[1]) *
(R * math.cos(math.radians(center[0])))]
# Check against the 4 lines that make up the diagonals.
if (offset[1] + (offset[0] * 0.5)) > hdist: # Too far NE
continue
if (offset[1] - (offset[0] * 0.5)) > hdist: # Too far SE
continue
if ((offset[0] * 0.5) - offset[1]) > hdist: # Too far NW
continue
if ((0 - offset[1]) - (offset[0] * 0.5)) > hdist: # Too far SW
continue
# If it gets to here it's a good spawn.
in_hex.append(spawn)
return in_hex
class ScanSpawnPoint(BaseModel):
scannedlocation = ForeignKeyField(ScannedLocation, null=True)
spawnpoint = ForeignKeyField(SpawnPoint, null=True)
class Meta:
primary_key = CompositeKey('spawnpoint', 'scannedlocation')
class SpawnpointDetectionData(BaseModel):
id = PrimaryKeyField()
# Removed ForeignKeyField since it caused MySQL issues.
encounter_id = UBigIntegerField()
# Removed ForeignKeyField since it caused MySQL issues.
spawnpoint_id = UBigIntegerField(index=True)
scan_time = DateTimeField()
tth_secs = SmallIntegerField(null=True)
@staticmethod
def set_default_earliest_unseen(sp):
sp['earliest_unseen'] = (sp['latest_seen'] + 15 * 60) % 3600
@staticmethod
def classify(sp, scan_loc, now_secs, sighting=None):
# Get past sightings.
with SpawnpointDetectionData.database().execution_context():
query = list(
SpawnpointDetectionData.select()
.where(SpawnpointDetectionData.spawnpoint_id == sp['id'])
.order_by(SpawnpointDetectionData.scan_time.asc()).dicts())
if sighting:
query.append(sighting)
tth_found = False
for s in query:
if s['tth_secs'] is not None:
tth_found = True
tth_secs = (s['tth_secs'] - 1) % 3600
# To reduce CPU usage, give an intial reading of 15 minute spawns if
# not done with initial scan of location.
if not scan_loc['done']:
# We only want to reset a SP if it is new and not due the
# location changing (which creates new Scannedlocations)
if not tth_found:
sp['kind'] = 'hhhs'
if not sp['earliest_unseen']:
sp['latest_seen'] = now_secs
SpawnpointDetectionData.set_default_earliest_unseen(sp)
elif clock_between(sp['latest_seen'], now_secs,
sp['earliest_unseen']):
sp['latest_seen'] = now_secs
return
# Make a record of links, so we can reset earliest_unseen
# if it changes.
old_kind = str(sp['kind'])
# Make a sorted list of the seconds after the hour.
seen_secs = sorted(map(lambda x: date_secs(x['scan_time']), query))
# Include and entry for the TTH if it found
if tth_found:
seen_secs.append(tth_secs)
seen_secs.sort()
# Add the first seen_secs to the end as a clock wrap around.
if seen_secs:
seen_secs.append(seen_secs[0] + 3600)
# Make a list of gaps between sightings.
gap_list = [seen_secs[i + 1] - seen_secs[i]
for i in range(len(seen_secs) - 1)]
max_gap = max(gap_list)
# An hour minus the largest gap in minutes gives us the duration the
# spawn was there. Round up to the nearest 15 minute interval for our
# current best guess duration.
duration = (int((60 - max_gap / 60.0) / 15) + 1) * 15
# If the second largest gap is larger than 15 minutes, then there are
# two gaps greater than 15 minutes. It must be a double spawn.
if len(gap_list) > 4 and sorted(gap_list)[-2] > 900:
sp['kind'] = 'hshs'
sp['links'] = 'h?h?'
else:
# Convert the duration into a 'hhhs', 'hhss', 'hsss', 'ssss' string
# accordingly. 's' is for seen, 'h' is for hidden.
sp['kind'] = ''.join(
['s' if i > (3 - duration / 15) else 'h' for i in range(0, 4)])
# Assume no hidden times.
sp['links'] = sp['kind'].replace('s', '?')
if sp['kind'] != 'ssss':
# Cover all bases, make sure we're using values < 3600.
# Warning: python uses modulo as the least residue, not as
# remainder, so we don't apply it to the result.
residue_unseen = sp['earliest_unseen'] % 3600
residue_seen = sp['latest_seen'] % 3600
if (not sp['earliest_unseen'] or
residue_unseen != residue_seen or
not tth_found):
# New latest_seen will be just before max_gap.
sp['latest_seen'] = seen_secs[gap_list.index(max_gap)]
# if we don't have a earliest_unseen yet or if the kind of
# spawn has changed, reset to latest_seen + 14 minutes.
if not sp['earliest_unseen'] or sp['kind'] != old_kind:
SpawnpointDetectionData.set_default_earliest_unseen(sp)
return
# Only ssss spawns from here below.
sp['links'] = '+++-'
# Cover all bases, make sure we're using values < 3600.
# Warning: python uses modulo as the least residue, not as
# remainder, so we don't apply it to the result.
residue_unseen = sp['earliest_unseen'] % 3600
residue_seen = sp['latest_seen'] % 3600
if residue_unseen == residue_seen:
return
# Make a sight_list of dicts:
# {date: first seen time,
# delta: duration of sighting,
# same: whether encounter ID was same or different over that time}
#
# For 60 minute spawns ('ssss'), the largest gap doesn't give the
# earliest spawnpoint because a Pokemon is always there. Use the union
# of all intervals where the same encounter ID was seen to find the
# latest_seen. If a different encounter ID was seen, then the
# complement of that interval was the same ID, so union that
# complement as well.
sight_list = [{'date': query[i]['scan_time'],
'delta': query[i + 1]['scan_time'] -
query[i]['scan_time'],
'same': query[i + 1]['encounter_id'] ==
query[i]['encounter_id']
}
for i in range(len(query) - 1)
if query[i + 1]['scan_time'] - query[i]['scan_time'] <
timedelta(hours=1)
]
start_end_list = []
for s in sight_list:
if s['same']:
# Get the seconds past the hour for start and end times.
start = date_secs(s['date'])
end = (start + int(s['delta'].total_seconds())) % 3600
else:
# Convert diff range to same range by taking the clock
# complement.
start = date_secs(s['date'] + s['delta']) % 3600
end = date_secs(s['date'])
start_end_list.append([start, end])
# Take the union of all the ranges.
while True:
# union is list of unions of ranges with the same encounter id.
union = []
for start, end in start_end_list:
if not union:
union.append([start, end])
continue
# Cycle through all ranges in union, since it might overlap
# with any of them.
for u in union:
if clock_between(u[0], start, u[1]):
u[1] = end if not(clock_between(
u[0], end, u[1])) else u[1]
elif clock_between(u[0], end, u[1]):
u[0] = start if not(clock_between(
u[0], start, u[1])) else u[0]
elif union.count([start, end]) == 0:
union.append([start, end])
# Are no more unions possible?
if union == start_end_list:
break
start_end_list = union # Make another pass looking for unions.
# If more than one disparate union, take the largest as our starting
# point.
union = reduce(lambda x, y: x if (x[1] - x[0]) % 3600 >
(y[1] - y[0]) % 3600 else y, union, [0, 3600])
sp['latest_seen'] = union[1]
sp['earliest_unseen'] = union[0]
log.info('1x60: appear %d, despawn %d, duration: %d min.',
union[0], union[1], ((union[1] - union[0]) % 3600) / 60)
# Expand the seen times for 30 minute spawnpoints based on scans when spawn
# wasn't there. Return true if spawnpoint dict changed.
@staticmethod
def unseen(sp, now_secs):
# Return if we already have a tth.
# Cover all bases, make sure we're using values < 3600.
# Warning: python uses modulo as the least residue, not as
# remainder, so we don't apply it to the result.
residue_unseen = sp['earliest_unseen'] % 3600
residue_seen = sp['latest_seen'] % 3600
if residue_seen == residue_unseen:
return False
# If now_secs is later than the latest seen return.
if not clock_between(sp['latest_seen'], now_secs,
sp['earliest_unseen']):
return False
sp['earliest_unseen'] = now_secs
return True
class Versions(BaseModel):
key = Utf8mb4CharField()
val = SmallIntegerField()
class Meta:
primary_key = False
class GymMember(BaseModel):
gym_id = Utf8mb4CharField(index=True)
pokemon_uid = UBigIntegerField(index=True)
last_scanned = DateTimeField(default=datetime.utcnow, index=True)
deployment_time = DateTimeField()
cp_decayed = SmallIntegerField()
class Meta:
primary_key = False
class GymPokemon(BaseModel):
pokemon_uid = UBigIntegerField(primary_key=True)
pokemon_id = SmallIntegerField()
cp = SmallIntegerField()
num_upgrades = SmallIntegerField(null=True)
move_1 = SmallIntegerField(null=True)
move_2 = SmallIntegerField(null=True)
height = FloatField(null=True)
weight = FloatField(null=True)
stamina = SmallIntegerField(null=True)
stamina_max = SmallIntegerField(null=True)
cp_multiplier = FloatField(null=True)
additional_cp_multiplier = FloatField(null=True)
iv_defense = SmallIntegerField(null=True)
iv_stamina = SmallIntegerField(null=True)
iv_attack = SmallIntegerField(null=True)
costume = SmallIntegerField(null=True)
form = SmallIntegerField(null=True)
shiny = SmallIntegerField(null=True)
last_seen = DateTimeField(default=datetime.utcnow)
class GymDetails(BaseModel):
gym_id = Utf8mb4CharField(primary_key=True, max_length=50)
name = Utf8mb4CharField()
description = TextField(null=True, default="")
url = Utf8mb4CharField()
last_scanned = DateTimeField(default=datetime.utcnow)
class Token(BaseModel):
token = TextField()
last_updated = DateTimeField(default=datetime.utcnow, index=True)
@staticmethod
def get_valid(limit=15):
# Make sure we don't grab more than we can process
if limit > 15:
limit = 15
valid_time = datetime.utcnow() - timedelta(seconds=30)
token_ids = []
tokens = []
try:
with Token.database().execution_context():
query = (Token
.select()
.where(Token.last_updated > valid_time)
.order_by(Token.last_updated.asc())
.limit(limit)
.dicts())
for t in query:
token_ids.append(t['id'])
tokens.append(t['token'])
if tokens:
log.debug('Retrieved Token IDs: %s.', token_ids)
query = DeleteQuery(Token).where(Token.id << token_ids)
rows = query.execute()
log.debug('Claimed and removed %d captcha tokens.', rows)
except OperationalError as e:
log.exception('Failed captcha token transactional query: %s.', e)
return tokens
class HashKeys(BaseModel):
key = Utf8mb4CharField(primary_key=True, max_length=20)
maximum = IntegerField(default=0)
remaining = IntegerField(default=0)
peak = IntegerField(default=0)
expires = DateTimeField(null=True)
last_updated = DateTimeField(default=datetime.utcnow)
# Obfuscate hashing keys before sending them to the front-end.
@staticmethod
def get_obfuscated_keys():
hashkeys = HashKeys.get_all()
for i, s in enumerate(hashkeys):
hashkeys[i]['key'] = s['key'][:-9] + '*'*9
return hashkeys
# Retrieve stored 'peak' value from recently used hashing keys.
@staticmethod
def get_stored_peaks():
hashkeys = {}
try:
with HashKeys.database().execution_context():
query = (HashKeys
.select(HashKeys.key, HashKeys.peak)
.where(HashKeys.last_updated >
(datetime.utcnow() - timedelta(minutes=30)))
.dicts())
for dbhk in query:
hashkeys[dbhk['key']] = dbhk['peak']
except OperationalError as e:
log.exception('Failed to get hashing keys stored peaks: %s.', e)
return hashkeys
def hex_bounds(center, steps=None, radius=None):
# Make a box that is (70m * step_limit * 2) + 70m away from the
# center point. Rationale is that you need to travel.
sp_dist = 0.07 * (2 * steps + 1) if steps else radius
n = get_new_coords(center, sp_dist, 0)[0]
e = get_new_coords(center, sp_dist, 90)[1]
s = get_new_coords(center, sp_dist, 180)[0]
w = get_new_coords(center, sp_dist, 270)[1]
return (n, e, s, w)
# todo: this probably shouldn't _really_ be in "models" anymore, but w/e.
def parse_map(args, map_dict, scan_coords, scan_location, db_update_queue,
wh_update_queue, key_scheduler, api, status, now_date, account,
account_sets):
pokemon = {}
pokestops = {}
gyms = {}
raids = {}
skipped = 0
filtered = 0
stopsskipped = 0
forts = []
forts_count = 0
wild_pokemon = []
wild_pokemon_count = 0
nearby_pokemon = 0
spawn_points = {}
scan_spawn_points = {}
sightings = {}
new_spawn_points = []
sp_id_list = []
# Consolidate the individual lists in each cell into two lists of Pokemon
# and a list of forts.
cells = map_dict['responses']['GET_MAP_OBJECTS'].map_cells
# Get the level for the pokestop spin, and to send to webhook.
level = account['level']
# Use separate level indicator for our L30 encounters.
encounter_level = level
for i, cell in enumerate(cells):
# If we have map responses then use the time from the request
if i == 0:
now_date = datetime.utcfromtimestamp(
cell.current_timestamp_ms / 1000)
nearby_pokemon += len(cell.nearby_pokemons)
# Parse everything for stats (counts). Future enhancement -- we don't
# necessarily need to know *how many* forts/wild/nearby were found but
# we'd like to know whether or not *any* were found to help determine
# if a scan was actually bad.
if not args.no_pokemon:
wild_pokemon += cell.wild_pokemons
if not args.no_pokestops or not args.no_gyms:
forts += cell.forts
wild_pokemon_count += len(cell.wild_pokemons)
forts_count += len(cell.forts)
now_secs = date_secs(now_date)
del map_dict['responses']['GET_MAP_OBJECTS']
# If there are no wild or nearby Pokemon...
if not wild_pokemon and not nearby_pokemon:
# ...and there are no gyms/pokestops then it's unusable/bad.
if not forts:
log.warning('Bad scan. Parsing found absolutely nothing'
+ ' using account %s.', account['username'])
log.info('Common causes: captchas or IP bans.')
elif not args.no_pokemon:
# When gym scanning we'll go over the speed limit
# and Pokémon will be invisible, but we'll still be able
# to scan gyms so we disable the error logging.
# No wild or nearby Pokemon but there are forts. It's probably
# a speed violation.
log.warning('No nearby or wild Pokemon but there are visible '
'gyms or pokestops. Possible speed violation.')
done_already = scan_location['done']
ScannedLocation.update_band(scan_location, now_date)
just_completed = not done_already and scan_location['done']
if wild_pokemon and not args.no_pokemon:
encounter_ids = [p.encounter_id for p in wild_pokemon]
# For all the wild Pokemon we found check if an active Pokemon is in
# the database.
with Pokemon.database().execution_context():
query = (Pokemon
.select(Pokemon.encounter_id, Pokemon.spawnpoint_id)
.where((Pokemon.disappear_time >= now_date) &
(Pokemon.encounter_id << encounter_ids))
.dicts())
# Store all encounter_ids and spawnpoint_ids for the Pokemon in
# query.
# All of that is needed to make sure it's unique.
encountered_pokemon = [
(p['encounter_id'], p['spawnpoint_id']) for p in query]
for p in wild_pokemon:
spawn_id = int(p.spawn_point_id, 16)
sp = SpawnPoint.get_by_id(spawn_id, p.latitude,
p.longitude)
spawn_points[spawn_id] = sp
sp['missed_count'] = 0
sighting = {
'encounter_id': p.encounter_id,
'spawnpoint_id': spawn_id,
'scan_time': now_date,
'tth_secs': None
}
# Keep a list of sp_ids to return.
sp_id_list.append(spawn_id)
# time_till_hidden_ms was overflowing causing a negative integer.
# It was also returning a value above 3.6M ms.
if 0 < p.time_till_hidden_ms < 3600000:
d_t_secs = date_secs(datetime.utcfromtimestamp(
(p.last_modified_timestamp_ms +
p.time_till_hidden_ms) / 1000.0))
# Cover all bases, make sure we're using values < 3600.
# Warning: python uses modulo as the least residue, not as
# remainder, so we don't apply it to the result.
residue_unseen = sp['earliest_unseen'] % 3600
residue_seen = sp['latest_seen'] % 3600
if (residue_seen != residue_unseen or
not sp['last_scanned']):
log.info('TTH found for spawnpoint %s.', sp['id'])
sighting['tth_secs'] = d_t_secs
# Only update when TTH is seen for the first time.
# Just before Pokemon migrations, Niantic sets all TTH
# to the exact time of the migration, not the normal
# despawn time.
sp['latest_seen'] = d_t_secs
sp['earliest_unseen'] = d_t_secs
scan_spawn_points[len(scan_spawn_points)+1] = {
'spawnpoint': sp['id'],
'scannedlocation': scan_location['cellid']}
if not sp['last_scanned']:
log.info('New Spawn Point found.')
new_spawn_points.append(sp)
# If we found a new spawnpoint after the location was already
# fully scanned then either it's new, or we had a bad scan.
# Either way, rescan the location.
if scan_location['done'] and not just_completed:
log.warning('Location was fully scanned, and yet a brand '
'new spawnpoint found.')
log.warning('Redoing scan of this location to identify '
'new spawnpoint.')
ScannedLocation.reset_bands(scan_location)
if (not SpawnPoint.tth_found(sp) or sighting['tth_secs'] or
not scan_location['done'] or just_completed):
SpawnpointDetectionData.classify(sp, scan_location, now_secs,
sighting)
sightings[p.encounter_id] = sighting
sp['last_scanned'] = datetime.utcfromtimestamp(
p.last_modified_timestamp_ms / 1000.0)
if ((p.encounter_id, spawn_id) in encountered_pokemon):
# If Pokemon has been encountered before don't process it.
skipped += 1
continue
start_end = SpawnPoint.start_end(sp, 1)
seconds_until_despawn = (start_end[1] - now_secs) % 3600
disappear_time = now_date + \
timedelta(seconds=seconds_until_despawn)
pokemon_id = p.pokemon_data.pokemon_id
# If this is an ignored pokemon, skip this whole section.
# We want the stuff above or we will impact spawn detection
# but we don't want to insert it, or send it to webhooks.
if args.ignorelist_file and pokemon_id in args.ignorelist:
log.debug('Ignoring Pokemon id: %i.', pokemon_id)
filtered += 1
continue
printPokemon(pokemon_id, p.latitude, p.longitude,
disappear_time)
# Scan for IVs/CP and moves.
pokemon_info = False
if args.encounter and (pokemon_id in args.enc_whitelist):
pokemon_info = encounter_pokemon(
args, p, account, api, account_sets, status, key_scheduler)
pokemon[p.encounter_id] = {
'encounter_id': p.encounter_id,
'spawnpoint_id': spawn_id,
'pokemon_id': pokemon_id,
'latitude': p.latitude,
'longitude': p.longitude,
'disappear_time': disappear_time,
'individual_attack': None,
'individual_defense': None,
'individual_stamina': None,
'move_1': None,
'move_2': None,
'cp': None,
'cp_multiplier': None,
'height': None,
'weight': None,
'gender': p.pokemon_data.pokemon_display.gender,
'costume': p.pokemon_data.pokemon_display.costume,
'form': p.pokemon_data.pokemon_display.form,
'weather_boosted_condition': None
}
# Store Pokémon boosted condition.
# TODO: Move pokemon_display to the top.
pokemon_display = p.pokemon_data.pokemon_display
boosted = pokemon_display.weather_boosted_condition
if boosted:
pokemon[p.encounter_id]['weather_boosted_condition'] = boosted
# We need to check if exist and is not false due to a
# request error.
if pokemon_info:
pokemon[p.encounter_id].update({
'individual_attack': pokemon_info.individual_attack,
'individual_defense': pokemon_info.individual_defense,
'individual_stamina': pokemon_info.individual_stamina,
'move_1': pokemon_info.move_1,
'move_2': pokemon_info.move_2,
'height': pokemon_info.height_m,
'weight': pokemon_info.weight_kg,
'cp': pokemon_info.cp,
'cp_multiplier': pokemon_info.cp_multiplier,
'gender': pokemon_info.pokemon_display.gender
})
if 'pokemon' in args.wh_types:
if (pokemon_id in args.webhook_whitelist or
(not args.webhook_whitelist and pokemon_id
not in args.webhook_blacklist)):
wh_poke = pokemon[p.encounter_id].copy()
wh_poke.update({
'disappear_time': calendar.timegm(
disappear_time.timetuple()),
'last_modified_time': p.last_modified_timestamp_ms,
'time_until_hidden_ms': p.time_till_hidden_ms,
'verified': SpawnPoint.tth_found(sp),
'seconds_until_despawn': seconds_until_despawn,
'spawn_start': start_end[0],
'spawn_end': start_end[1],
'player_level': encounter_level
})
if wh_poke['cp_multiplier'] is not None:
wh_poke.update({
'pokemon_level': calc_pokemon_level(
wh_poke['cp_multiplier'])
})
wh_update_queue.put(('pokemon', wh_poke))
if forts and (not args.no_pokestops or not args.no_gyms):
if not args.no_pokestops:
stop_ids = [f.id for f in forts if f.type == 1]
if stop_ids:
with Pokemon.database().execution_context():
query = (Pokestop.select(
Pokestop.pokestop_id, Pokestop.last_modified).where(
(Pokestop.pokestop_id << stop_ids)).dicts())
encountered_pokestops = [(f['pokestop_id'], int(
(f['last_modified'] - datetime(1970, 1,
1)).total_seconds()))
for f in query]
for f in forts:
if not args.no_pokestops and f.type == 1: # Pokestops.
if len(f.active_fort_modifier) > 0:
lure_expiration = (datetime.utcfromtimestamp(
f.last_modified_timestamp_ms / 1000.0) +
timedelta(minutes=args.lure_duration))
active_fort_modifier = f.active_fort_modifier[0]
else:
lure_expiration, active_fort_modifier = None, None
if ((f.id, int(f.last_modified_timestamp_ms / 1000.0))
in encountered_pokestops):
# If pokestop has been encountered before and hasn't
# changed don't process it.
stopsskipped += 1
continue
pokestops[f.id] = {
'pokestop_id': f.id,
'enabled': f.enabled,
'latitude': f.latitude,
'longitude': f.longitude,
'last_modified': datetime.utcfromtimestamp(
f.last_modified_timestamp_ms / 1000.0),
'lure_expiration': lure_expiration,
'active_fort_modifier': active_fort_modifier
}
# Send all pokestops to webhooks.
if 'pokestop' in args.wh_types or (
'lure' in args.wh_types and
lure_expiration is not None):
l_e = None
if lure_expiration is not None:
l_e = calendar.timegm(lure_expiration.timetuple())
wh_pokestop = pokestops[f.id].copy()
wh_pokestop.update({
'pokestop_id': f.id,
'last_modified': f.last_modified_timestamp_ms,
'lure_expiration': l_e,
})
wh_update_queue.put(('pokestop', wh_pokestop))
# Currently, there are only stops and gyms.
elif not args.no_gyms and f.type == 0:
b64_gym_id = str(f.id)
gym_display = f.gym_display
raid_info = f.raid_info
park = Gym.get_gyms_park(f.id)
# Send gyms to webhooks.
if 'gym' in args.wh_types:
raid_active_until = 0
raid_battle_ms = raid_info.raid_battle_ms
raid_end_ms = raid_info.raid_end_ms
if raid_battle_ms / 1000 > time.time():
raid_active_until = raid_end_ms / 1000
# Explicitly set 'webhook_data', in case we want to change
# the information pushed to webhooks. Similar to above
# and previous commits.
wh_update_queue.put(('gym', {
'gym_id':
b64_gym_id,
'team_id':
f.owned_by_team,
'park':
park,
'guard_pokemon_id':
f.guard_pokemon_id,
'slots_available':
gym_display.slots_available,
'total_cp':
gym_display.total_gym_cp,
'enabled':
f.enabled,
'latitude':
f.latitude,
'longitude':
f.longitude,
'lowest_pokemon_motivation':
gym_display.lowest_pokemon_motivation,
'occupied_since':
calendar.timegm((datetime.utcnow() - timedelta(
milliseconds=gym_display.occupied_millis)
).timetuple()),
'last_modified':
f.last_modified_timestamp_ms,
'raid_active_until':
raid_active_until
}))
gyms[f.id] = {
'gym_id':
f.id,
'team_id':
f.owned_by_team,
'park':
park,
'guard_pokemon_id':
f.guard_pokemon_id,
'slots_available':
gym_display.slots_available,
'total_cp':
gym_display.total_gym_cp,
'enabled':
f.enabled,
'latitude':
f.latitude,
'longitude':
f.longitude,
'last_modified':
datetime.utcfromtimestamp(
f.last_modified_timestamp_ms / 1000.0),
}
if not args.no_raids and f.type == 0:
if f.HasField('raid_info'):
raids[f.id] = {
'gym_id': f.id,
'level': raid_info.raid_level,
'spawn': datetime.utcfromtimestamp(
raid_info.raid_spawn_ms / 1000.0),
'start': datetime.utcfromtimestamp(
raid_info.raid_battle_ms / 1000.0),
'end': datetime.utcfromtimestamp(
raid_info.raid_end_ms / 1000.0),
'pokemon_id': None,
'cp': None,
'move_1': None,
'move_2': None
}
if raid_info.HasField('raid_pokemon'):
raid_pokemon = raid_info.raid_pokemon
raids[f.id].update({
'pokemon_id': raid_pokemon.pokemon_id,
'cp': raid_pokemon.cp,
'move_1': raid_pokemon.move_1,
'move_2': raid_pokemon.move_2
})
if ('egg' in args.wh_types and
raids[f.id]['pokemon_id'] is None) or (
'raid' in args.wh_types and
raids[f.id]['pokemon_id'] is not None):
wh_raid = raids[f.id].copy()
wh_raid.update({
'gym_id': b64_gym_id,
'team_id': f.owned_by_team,
'spawn': raid_info.raid_spawn_ms / 1000,
'start': raid_info.raid_battle_ms / 1000,
'end': raid_info.raid_end_ms / 1000,
'latitude': f.latitude,
'longitude': f.longitude
})
wh_update_queue.put(('raid', wh_raid))
# Let db do it's things while we try to spin.
if args.pokestop_spinning:
for f in forts:
# Spin Pokestop with 50% chance.
if f.type == 1 and pokestop_spinnable(f, scan_coords):
spin_pokestop(api, account, args, f, scan_coords)
# Helping out the GC.
del forts
log.info('Parsing found Pokemon: %d (%d filtered), nearby: %d, ' +
'pokestops: %d, gyms: %d, raids: %d.',
len(pokemon) + skipped,
filtered,
nearby_pokemon,
len(pokestops) + stopsskipped,
len(gyms),
len(raids))
log.debug('Skipped Pokemon: %d, pokestops: %d.', skipped, stopsskipped)
# Look for spawnpoints within scan_loc that are not here to see if we
# can narrow down tth window.
for sp in ScannedLocation.linked_spawn_points(scan_location['cellid']):
if sp['missed_count'] > 5:
continue
if sp['id'] in sp_id_list:
# Don't overwrite changes from this parse with DB version.
sp = spawn_points[sp['id']]
else:
# If the cell has completed, we need to classify all
# the SPs that were not picked up in the scan
if just_completed:
SpawnpointDetectionData.classify(sp, scan_location, now_secs)
spawn_points[sp['id']] = sp
if SpawnpointDetectionData.unseen(sp, now_secs):
spawn_points[sp['id']] = sp
endpoints = SpawnPoint.start_end(sp, args.spawn_delay)
if clock_between(endpoints[0], now_secs, endpoints[1]):
sp['missed_count'] += 1
spawn_points[sp['id']] = sp
log.warning('%s kind spawnpoint %s has no Pokemon %d times'
' in a row.',
sp['kind'], sp['id'], sp['missed_count'])
log.info('Possible causes: Still doing initial scan, super'
' rare double spawnpoint during')
log.info('hidden period, or Niantic has removed '
'spawnpoint.')
if (not SpawnPoint.tth_found(sp) and scan_location['done'] and
(now_secs - sp['latest_seen'] -
args.spawn_delay) % 3600 < 60):
# Warning: python uses modulo as the least residue, not as
# remainder, so we don't apply it to the result. Just a
# safety measure until we can guarantee there's never a negative
# result.
log.warning('Spawnpoint %s was unable to locate a TTH, with '
'only %ss after Pokemon last seen.', sp['id'],
(now_secs % 3600 - sp['latest_seen'] % 3600))
log.info('Restarting current 15 minute search for TTH.')
if sp['id'] not in sp_id_list:
SpawnpointDetectionData.classify(sp, scan_location, now_secs)
sp['latest_seen'] = (sp['latest_seen'] - 60) % 3600
sp['earliest_unseen'] = (
sp['earliest_unseen'] + 14 * 60) % 3600
spawn_points[sp['id']] = sp
db_update_queue.put((ScannedLocation, {0: scan_location}))
if pokemon:
db_update_queue.put((Pokemon, pokemon))
if pokestops:
db_update_queue.put((Pokestop, pokestops))
if gyms:
db_update_queue.put((Gym, gyms))
if raids:
db_update_queue.put((Raid, raids))
if spawn_points:
db_update_queue.put((SpawnPoint, spawn_points))
db_update_queue.put((ScanSpawnPoint, scan_spawn_points))
if sightings:
db_update_queue.put((SpawnpointDetectionData, sightings))
if not nearby_pokemon and not wild_pokemon:
# After parsing the forts, we'll mark this scan as bad due to
# a possible speed violation.
return {
'count': wild_pokemon_count + forts_count,
'gyms': gyms,
'sp_id_list': sp_id_list,
'bad_scan': True,
'scan_secs': now_secs
}
return {
'count': wild_pokemon_count + forts_count,
'gyms': gyms,
'sp_id_list': sp_id_list,
'bad_scan': False,
'scan_secs': now_secs
}
def encounter_pokemon(args, pokemon, account, api, account_sets, status,
key_scheduler):
using_accountset = False
hlvl_account = None
pokemon_id = None
result = False
try:
hlvl_api = None
pokemon_id = pokemon.pokemon_data.pokemon_id
scan_location = [pokemon.latitude, pokemon.longitude]
# If the host has L30s in the regular account pool, we
# can just use the current account.
if account['level'] >= 30:
hlvl_account = account
hlvl_api = api
else:
# Get account to use for IV and CP scanning.
hlvl_account = account_sets.next('30', scan_location)
using_accountset = True
time.sleep(args.encounter_delay)
# If we didn't get an account, we can't encounter.
if not hlvl_account:
log.error('No L30 accounts are available, please' +
' consider adding more. Skipping encounter.')
return False
# Logging.
log.info('Encountering Pokemon ID %s with account %s at %s, %s.',
pokemon_id, hlvl_account['username'], scan_location[0],
scan_location[1])
# If not args.no_api_store is enabled, we need to
# re-use an old API object if it's stored and we're
# using an account from the AccountSet.
if not args.no_api_store and using_accountset:
hlvl_api = hlvl_account.get('api', None)
# Make new API for this account if we're not using an
# API that's already logged in.
if not hlvl_api:
hlvl_api = setup_api(args, status, hlvl_account)
# If the already existent API is using a proxy but
# it's not alive anymore, we need to get a new proxy.
elif (args.proxy and
(hlvl_api._session.proxies['http'] not in args.proxy)):
proxy_idx, proxy_new = get_new_proxy(args)
hlvl_api.set_proxy({
'http': proxy_new,
'https': proxy_new})
hlvl_api._auth_provider.set_proxy({
'http': proxy_new,
'https': proxy_new})
# Hashing key.
# TODO: Rework inefficient threading.
if args.hash_key:
key = key_scheduler.next()
log.debug('Using hashing key %s for this encounter.', key)
hlvl_api.activate_hash_server(key)
# We have an API object now. If necessary, store it.
if using_accountset and not args.no_api_store:
hlvl_account['api'] = hlvl_api
# Set location.
hlvl_api.set_position(*scan_location)
# Log in.
check_login(args, hlvl_account, hlvl_api, status['proxy_url'])
encounter_level = hlvl_account['level']
# User error -> we skip freeing the account.
if encounter_level < 30:
log.warning('Expected account of level 30 or higher, ' +
'but account %s is only level %d',
hlvl_account['username'], encounter_level)
return False
# Encounter Pokémon.
encounter_result = encounter(
hlvl_api, hlvl_account, pokemon.encounter_id,
pokemon.spawn_point_id, scan_location)
# Handle errors.
if encounter_result:
enc_responses = encounter_result['responses']
# Check for captcha.
if 'CHECK_CHALLENGE' in enc_responses:
captcha_url = enc_responses['CHECK_CHALLENGE'].challenge_url
# Throw warning but finish parsing.
if len(captcha_url) > 1:
# Flag account.
hlvl_account['captcha'] = True
log.error('Account %s encountered a captcha.' +
' Account will not be used.',
hlvl_account['username'])
if ('ENCOUNTER' in enc_responses and
enc_responses['ENCOUNTER'].status != 1):
log.error('There was an error encountering Pokemon ID %s with '
+ 'account %s: %d.', pokemon_id,
hlvl_account['username'],
enc_responses['ENCOUNTER'].status)
else:
pokemon_info = enc_responses[
'ENCOUNTER'].wild_pokemon.pokemon_data
# Logging: let the user know we succeeded.
log.info('Encounter for Pokemon ID %s at %s, %s ' +
'successful: %s/%s/%s, %s CP.', pokemon_id,
pokemon.latitude, pokemon.longitude,
pokemon_info.individual_attack,
pokemon_info.individual_defense,
pokemon_info.individual_stamina, pokemon_info.cp)
result = pokemon_info
except Exception as e:
# Account may not be selected yet.
if hlvl_account:
log.warning('Exception occured during encounter with'
' high-level account %s.',
hlvl_account['username'])
log.exception('There was an error encountering Pokemon ID %s: %s.',
pokemon_id,
e)
# We're done with the encounter. If it's from an
# AccountSet, release account back to the pool.
if using_accountset:
account_sets.release(hlvl_account)
return result
def parse_gyms(args, gym_responses, wh_update_queue, db_update_queue):
gym_details = {}
gym_members = {}
gym_pokemon = {}
i = 0
for g in gym_responses.values():
gym_state = g.gym_status_and_defenders
gym_id = gym_state.pokemon_fort_proto.id
gym_details[gym_id] = {
'gym_id': gym_id,
'name': g.name,
'description': g.description,
'url': g.url
}
if 'gym-info' in args.wh_types:
webhook_data = {
'id': str(gym_id),
'latitude': gym_state.pokemon_fort_proto.latitude,
'longitude': gym_state.pokemon_fort_proto.longitude,
'team': gym_state.pokemon_fort_proto.owned_by_team,
'name': g.name,
'description': g.description,
'url': g.url,
'pokemon': [],
}
for member in gym_state.gym_defender:
pokemon = member.motivated_pokemon.pokemon
gym_members[i] = {
'gym_id':
gym_id,
'pokemon_uid':
pokemon.id,
'cp_decayed':
member.motivated_pokemon.cp_now,
'deployment_time':
datetime.utcnow() -
timedelta(milliseconds=member.deployment_totals
.deployment_duration_ms)
}
gym_pokemon[i] = {
'pokemon_uid': pokemon.id,
'pokemon_id': pokemon.pokemon_id,
'cp': member.motivated_pokemon.cp_when_deployed,
'num_upgrades': pokemon.num_upgrades,
'move_1': pokemon.move_1,
'move_2': pokemon.move_2,
'height': pokemon.height_m,
'weight': pokemon.weight_kg,
'stamina': pokemon.stamina,
'stamina_max': pokemon.stamina_max,
'cp_multiplier': pokemon.cp_multiplier,
'additional_cp_multiplier': pokemon.additional_cp_multiplier,
'iv_defense': pokemon.individual_defense,
'iv_stamina': pokemon.individual_stamina,
'iv_attack': pokemon.individual_attack,
'costume': pokemon.pokemon_display.costume,
'form': pokemon.pokemon_display.form,
'shiny': pokemon.pokemon_display.shiny,
'last_seen': datetime.utcnow(),
}
if 'gym-info' in args.wh_types:
wh_pokemon = gym_pokemon[i].copy()
del wh_pokemon['last_seen']
wh_pokemon.update({
'cp_decayed':
member.motivated_pokemon.cp_now,
'deployment_time': calendar.timegm(
gym_members[i]['deployment_time'].timetuple())
})
webhook_data['pokemon'].append(wh_pokemon)
i += 1
if 'gym-info' in args.wh_types:
wh_update_queue.put(('gym_details', webhook_data))
# All this database stuff is synchronous (not using the upsert queue) on
# purpose. Since the search workers load the GymDetails model from the
# database to determine if a gym needs to be rescanned, we need to be sure
# the GymDetails get fully committed to the database before moving on.
#
# We _could_ synchronously upsert GymDetails, then queue the other tables
# for upsert, but that would put that Gym's overall information in a weird
# non-atomic state.
# Upsert all the models.
if gym_details:
db_update_queue.put((GymDetails, gym_details))
if gym_pokemon:
db_update_queue.put((GymPokemon, gym_pokemon))
# Get rid of all the gym members, we're going to insert new records.
if gym_details:
with GymMember.database().execution_context():
DeleteQuery(GymMember).where(
GymMember.gym_id << gym_details.keys()).execute()
# Insert new gym members.
if gym_members:
db_update_queue.put((GymMember, gym_members))
log.info('Upserted gyms: %d, gym members: %d.',
len(gym_details),
len(gym_members))
def db_updater(q, db):
# The forever loop.
while True:
try:
# Loop the queue.
while True:
model, data = q.get()
start_timer = default_timer()
bulk_upsert(model, data, db)
q.task_done()
log.debug('Upserted to %s, %d records (upsert queue '
'remaining: %d) in %.6f seconds.',
model.__name__,
len(data),
q.qsize(),
default_timer() - start_timer)
# Helping out the GC.
del model
del data
if q.qsize() > 50:
log.warning(
"DB queue is > 50 (@%d); try increasing --db-threads.",
q.qsize())
except Exception as e:
log.exception('Exception in db_updater: %s', repr(e))
time.sleep(5)
def clean_db_loop(args):
# Run regular database cleanup once every minute.
regular_cleanup_secs = 60
# Run full database cleanup once every 10 minutes.
full_cleanup_timer = default_timer()
full_cleanup_secs = 600
while True:
try:
db_cleanup_regular()
# Remove old worker status entries.
if args.db_cleanup_worker > 0:
db_cleanup_worker_status(args.db_cleanup_worker)
# Check if it's time to run full database cleanup.
now = default_timer()
if now - full_cleanup_timer > full_cleanup_secs:
# Remove old pokemon spawns.
if args.db_cleanup_pokemon > 0:
db_clean_pokemons(args.db_cleanup_pokemon)
# Remove old gym data.
if args.db_cleanup_gym > 0:
db_clean_gyms(args.db_cleanup_gym)
# Remove old and extinct spawnpoint data.
if args.db_cleanup_spawnpoint > 0:
db_clean_spawnpoints(args.db_cleanup_spawnpoint)
# Remove old pokestop and gym locations.
if args.db_cleanup_forts > 0:
db_clean_forts(args.db_cleanup_forts)
log.info('Full database cleanup completed.')
full_cleanup_timer = now
time.sleep(regular_cleanup_secs)
except Exception as e:
log.exception('Database cleanup failed: %s.', e)
def db_cleanup_regular():
log.debug('Regular database cleanup started.')
start_timer = default_timer()
now = datetime.utcnow()
# http://docs.peewee-orm.com/en/latest/peewee/database.html#advanced-connection-management
# When using an execution context, a separate connection from the pool
# will be used inside the wrapped block and a transaction will be started.
with Token.database().execution_context():
# Remove unusable captcha tokens.
query = (Token
.delete()
.where(Token.last_updated < now - timedelta(seconds=120)))
query.execute()
# Remove active modifier from expired lured pokestops.
query = (Pokestop
.update(lure_expiration=None, active_fort_modifier=None)
.where(Pokestop.lure_expiration < now))
query.execute()
# Remove expired or inactive hashing keys.
query = (HashKeys
.delete()
.where((HashKeys.expires < now - timedelta(days=1)) |
(HashKeys.last_updated < now - timedelta(days=7))))
query.execute()
time_diff = default_timer() - start_timer
log.debug('Completed regular cleanup in %.6f seconds.', time_diff)
def db_cleanup_worker_status(age_minutes):
log.debug('Beginning cleanup of old worker status.')
start_timer = default_timer()
worker_status_timeout = datetime.utcnow() - timedelta(minutes=age_minutes)
with MainWorker.database().execution_context():
# Remove status information from inactive instances.
query = (MainWorker
.delete()
.where(MainWorker.last_modified < worker_status_timeout))
query.execute()
# Remove worker status information that are inactive.
query = (WorkerStatus
.delete()
.where(MainWorker.last_modified < worker_status_timeout))
query.execute()
time_diff = default_timer() - start_timer
log.debug('Completed cleanup of old worker status in %.6f seconds.',
time_diff)
def db_clean_pokemons(age_hours):
log.debug('Beginning cleanup of old pokemon spawns.')
start_timer = default_timer()
pokemon_timeout = datetime.utcnow() - timedelta(hours=age_hours)
with Pokemon.database().execution_context():
query = (Pokemon
.delete()
.where(Pokemon.disappear_time < pokemon_timeout))
rows = query.execute()
log.debug('Deleted %d old Pokemon entries.', rows)
time_diff = default_timer() - start_timer
log.debug('Completed cleanup of old pokemon spawns in %.6f seconds.',
time_diff)
def db_clean_gyms(age_hours, gyms_age_days=30):
log.debug('Beginning cleanup of old gym data.')
start_timer = default_timer()
gym_info_timeout = datetime.utcnow() - timedelta(hours=age_hours)
with Gym.database().execution_context():
# Remove old GymDetails entries.
query = (GymDetails
.delete()
.where(GymDetails.last_scanned < gym_info_timeout))
rows = query.execute()
log.debug('Deleted %d old GymDetails entries.', rows)
# Remove old Raid entries.
query = (Raid
.delete()
.where(Raid.end < gym_info_timeout))
rows = query.execute()
log.debug('Deleted %d old Raid entries.', rows)
# Remove old GymMember entries.
query = (GymMember
.delete()
.where(GymMember.last_scanned < gym_info_timeout))
rows = query.execute()
log.debug('Deleted %d old GymMember entries.', rows)
# Remove old GymPokemon entries.
query = (GymPokemon
.delete()
.where(GymPokemon.last_seen < gym_info_timeout))
rows = query.execute()
log.debug('Deleted %d old GymPokemon entries.', rows)
time_diff = default_timer() - start_timer
log.debug('Completed cleanup of old gym data in %.6f seconds.',
time_diff)
def db_clean_spawnpoints(age_hours, missed=5):
log.debug('Beginning cleanup of old spawnpoint data.')
start_timer = default_timer()
# Maximum number of variables to include in a single query.
step = 500
spawnpoint_timeout = datetime.utcnow() - timedelta(hours=age_hours)
with SpawnPoint.database().execution_context():
# Select old SpawnPoint entries.
query = (SpawnPoint
.select(SpawnPoint.id)
.where((SpawnPoint.last_scanned < spawnpoint_timeout) &
(SpawnPoint.missed_count > missed))
.dicts())
old_sp = [(sp['id']) for sp in query]
num_records = len(old_sp)
log.debug('Found %d old SpawnPoint entries.', num_records)
# Remove SpawnpointDetectionData entries associated to old spawnpoints.
num_rows = 0
for i in range(0, num_records, step):
query = (SpawnpointDetectionData
.delete()
.where((SpawnpointDetectionData.spawnpoint_id <<
old_sp[i:min(i + step, num_records)])))
num_rows += query.execute()
# Remove old SpawnPointDetectionData entries.
query = (SpawnpointDetectionData
.delete()
.where((SpawnpointDetectionData.scan_time <
spawnpoint_timeout)))
num_rows += query.execute()
log.debug('Deleted %d old SpawnpointDetectionData entries.', num_rows)
# Select ScannedLocation entries associated to old spawnpoints.
sl_delete = set()
for i in range(0, num_records, step):
query = (ScanSpawnPoint
.select()
.where((ScanSpawnPoint.spawnpoint <<
old_sp[i:min(i + step, num_records)]))
.dicts())
for sp in query:
sl_delete.add(sp['scannedlocation'])
log.debug('Found %d ScannedLocation entries from old spawnpoints.',
len(sl_delete))
# Remove ScanSpawnPoint entries associated to old spawnpoints.
num_rows = 0
for i in range(0, num_records, step):
query = (ScanSpawnPoint
.delete()
.where((ScanSpawnPoint.spawnpoint <<
old_sp[i:min(i + step, num_records)])))
num_rows += query.execute()
log.debug('Deleted %d ScanSpawnPoint entries from old spawnpoints.',
num_rows)
# Remove old and invalid SpawnPoint entries.
num_rows = 0
for i in range(0, num_records, step):
query = (SpawnPoint
.delete()
.where((SpawnPoint.id <<
old_sp[i:min(i + step, num_records)])))
num_rows += query.execute()
log.debug('Deleted %d old SpawnPoint entries.', num_rows)
sl_delete = list(sl_delete)
num_records = len(sl_delete)
# Remove ScanSpawnPoint entries associated with old scanned locations.
num_rows = 0
for i in range(0, num_records, step):
query = (ScanSpawnPoint
.delete()
.where((ScanSpawnPoint.scannedlocation <<
sl_delete[i:min(i + step, num_records)])))
num_rows += query.execute()
log.debug('Deleted %d ScanSpawnPoint entries from old scan locations.',
num_rows)
# Remove ScannedLocation entries associated with old spawnpoints.
num_rows = 0
for i in range(0, num_records, step):
query = (ScannedLocation
.delete()
.where((ScannedLocation.cellid <<
sl_delete[i:min(i + step, num_records)]) &
(ScannedLocation.last_modified <
spawnpoint_timeout)))
num_rows += query.execute()
log.debug('Deleted %d ScannedLocation entries from old spawnpoints.',
num_rows)
time_diff = default_timer() - start_timer
log.debug('Completed cleanup of old spawnpoint data in %.6f seconds.',
time_diff)
def db_clean_forts(age_hours):
log.debug('Beginning cleanup of old forts.')
start_timer = default_timer()
fort_timeout = datetime.utcnow() - timedelta(hours=age_hours)
with Gym.database().execution_context():
# Remove old Gym entries.
query = (Gym
.delete()
.where(Gym.last_scanned < fort_timeout))
rows = query.execute()
log.debug('Deleted %d old Gym entries.', rows)
# Remove old Pokestop entries.
query = (Pokestop
.delete()
.where(Pokestop.last_updated < fort_timeout))
rows = query.execute()
log.debug('Deleted %d old Pokestop entries.', rows)
time_diff = default_timer() - start_timer
log.debug('Completed cleanup of old forts in %.6f seconds.',
time_diff)
def bulk_upsert(cls, data, db):
rows = data.values()
num_rows = len(rows)
i = 0
# This shouldn't happen, ever, but anyways...
if num_rows < 1:
return
# We used to support SQLite and it has a default max 999 parameters,
# so we limited how many rows we insert for it.
# Oracle: 64000
# MySQL: 65535
# PostgreSQL: 34464
# Sqlite: 999
step = 500
# Prepare for our query.
conn = db.get_conn()
cursor = db.get_cursor()
# We build our own INSERT INTO ... ON DUPLICATE KEY UPDATE x=VALUES(x)
# query, making sure all data is properly escaped. We use
# placeholders for VALUES(%s, %s, ...) so we can use executemany().
# We use peewee's InsertQuery to retrieve the fields because it
# takes care of peewee's internals (e.g. required default fields).
query = InsertQuery(cls, rows=[rows[0]])
# Take the first row. We need to call _iter_rows() for peewee internals.
# Using next() for a single item is not considered "pythonic".
first_row = {}
for row in query._iter_rows():
first_row = row
break
# Convert the row to its fields, sorted by peewee.
row_fields = sorted(first_row.keys(), key=lambda x: x._sort_key)
row_fields = map(lambda x: x.name, row_fields)
# Translate to proper column name, e.g. foreign keys.
db_columns = [peewee_attr_to_col(cls, f) for f in row_fields]
# Store defaults so we can fall back to them if a value
# isn't set.
defaults = {}
for f in cls._meta.fields.values():
# Use DB column name as key.
field_name = f.name
field_default = cls._meta.defaults.get(f, None)
defaults[field_name] = field_default
# Assign fields, placeholders and assignments after defaults
# so our lists/keys stay in order.
table = '`'+conn.escape_string(cls._meta.db_table)+'`'
escaped_fields = ['`'+conn.escape_string(f)+'`' for f in db_columns]
placeholders = ['%s' for escaped_field in escaped_fields]
assignments = ['{x} = VALUES({x})'.format(
x=escaped_field
) for escaped_field in escaped_fields]
# We build our own MySQL query because peewee only supports
# REPLACE INTO for upserting, which deletes the old row before
# adding the new one, giving a serious performance hit.
query_string = ('INSERT INTO {table} ({fields}) VALUES'
+ ' ({placeholders}) ON DUPLICATE KEY UPDATE'
+ ' {assignments}')
# Prepare transaction.
with db.atomic():
while i < num_rows:
start = i
end = min(i + step, num_rows)
name = cls.__name__
log.debug('Inserting items %d to %d for %s.', start, end, name)
try:
# Turn off FOREIGN_KEY_CHECKS on MySQL, because apparently it's
# unable to recognize strings to update unicode keys for
# foreign key fields, thus giving lots of foreign key
# constraint errors.
db.execute_sql('SET FOREIGN_KEY_CHECKS=0;')
# Time to bulk upsert our data. Convert objects to a list of
# values for executemany(), and fall back to defaults if
# necessary.
batch = []
batch_rows = rows[i:min(i + step, num_rows)]
# We pop them off one by one so we can gradually release
# memory as we pass each item. No duplicate memory usage.
while len(batch_rows) > 0:
row = batch_rows.pop()
row_data = []
# Parse rows, build arrays of values sorted via row_fields.
for field in row_fields:
# Take a default if we need it.
if field not in row:
default = defaults.get(field, None)
# peewee's defaults can be callable, e.g. current
# time. We only call when needed to insert.
if callable(default):
default = default()
row[field] = default
# Append to keep the exact order, and only these
# fields.
row_data.append(row[field])
# Done preparing, add it to the batch.
batch.append(row_data)
# Format query and go.
formatted_query = query_string.format(
table=table,
fields=', '.join(escaped_fields),
placeholders=', '.join(placeholders),
assignments=', '.join(assignments)
)
cursor.executemany(formatted_query, batch)
db.execute_sql('SET FOREIGN_KEY_CHECKS=1;')
except Exception as e:
# If there is a DB table constraint error, dump the data and
# don't retry.
#
# Unrecoverable error strings:
unrecoverable = ['constraint', 'has no attribute',
'peewee.IntegerField object at']
has_unrecoverable = filter(
lambda x: x in str(e), unrecoverable)
if has_unrecoverable:
log.exception('%s. Data is:', repr(e))
log.warning(data.items())
else:
log.warning('%s... Retrying...', repr(e))
time.sleep(1)
continue
i += step
def create_tables(db):
tables = [Pokemon, Pokestop, Gym, Raid, ScannedLocation, GymDetails,
GymMember, GymPokemon, MainWorker, WorkerStatus,
SpawnPoint, ScanSpawnPoint, SpawnpointDetectionData,
Token, LocationAltitude, PlayerLocale, HashKeys]
with db.execution_context():
for table in tables:
if not table.table_exists():
log.info('Creating table: %s', table.__name__)
db.create_tables([table], safe=True)
else:
log.debug('Skipping table %s, it already exists.',
table.__name__)
def drop_tables(db):
tables = [Pokemon, Pokestop, Gym, Raid, ScannedLocation, Versions,
GymDetails, GymMember, GymPokemon, MainWorker,
WorkerStatus, SpawnPoint, ScanSpawnPoint,
SpawnpointDetectionData, LocationAltitude, PlayerLocale,
Token, HashKeys]
with db.execution_context():
db.execute_sql('SET FOREIGN_KEY_CHECKS=0;')
for table in tables:
if table.table_exists():
log.info('Dropping table: %s', table.__name__)
db.drop_tables([table], safe=True)
db.execute_sql('SET FOREIGN_KEY_CHECKS=1;')
def verify_table_encoding(db):
with db.execution_context():
cmd_sql = '''
SELECT table_name FROM information_schema.tables WHERE
table_collation != "utf8mb4_unicode_ci"
AND table_schema = "%s";
''' % args.db_name
change_tables = db.execute_sql(cmd_sql)
cmd_sql = "SHOW tables;"
tables = db.execute_sql(cmd_sql)
if change_tables.rowcount > 0:
log.info('Changing collation and charset on %s tables.',
change_tables.rowcount)
if change_tables.rowcount == tables.rowcount:
log.info('Changing whole database,' +
' this might a take while.')
db.execute_sql('SET FOREIGN_KEY_CHECKS=0;')
for table in change_tables:
log.debug('Changing collation and charset on table %s.',
table[0])
cmd_sql = '''ALTER TABLE %s CONVERT TO CHARACTER SET utf8mb4
COLLATE utf8mb4_unicode_ci;''' % str(table[0])
db.execute_sql(cmd_sql)
db.execute_sql('SET FOREIGN_KEY_CHECKS=1;')
def verify_database_schema(db):
if not Versions.table_exists():
db.create_tables([Versions])
if ScannedLocation.table_exists():
# Versions table doesn't exist, but there are tables. This must
# mean the user is coming from a database that existed before we
# started tracking the schema version. Perform a full upgrade.
InsertQuery(Versions, {Versions.key: 'schema_version',
Versions.val: 0}).execute()
database_migrate(db, 0)
else:
InsertQuery(Versions, {Versions.key: 'schema_version',
Versions.val: db_schema_version}).execute()
else:
db_ver = Versions.get(Versions.key == 'schema_version').val
if db_ver < db_schema_version:
if not database_migrate(db, db_ver):
log.error('Error migrating database')
sys.exit(1)
elif db_ver > db_schema_version:
log.error('Your database version (%i) appears to be newer than '
'the code supports (%i).', db_ver, db_schema_version)
log.error('Please upgrade your code base or drop all tables in '
'your database.')
sys.exit(1)
db.close()
def database_migrate(db, old_ver):
# Update database schema version.
Versions.update(val=db_schema_version).where(
Versions.key == 'schema_version').execute()
log.info('Detected database version %i, updating to %i...',
old_ver, db_schema_version)
# Perform migrations here.
migrator = MySQLMigrator(db)
if old_ver < 20:
migrate(
migrator.drop_column('gym', 'gym_points'),
migrator.add_column('gym', 'slots_available',
SmallIntegerField(null=False, default=0)),
migrator.add_column('gymmember', 'cp_decayed',
SmallIntegerField(null=False, default=0)),
migrator.add_column('gymmember', 'deployment_time',
DateTimeField(
null=False, default=datetime.utcnow())),
migrator.add_column('gym', 'total_cp',
SmallIntegerField(null=False, default=0))
)
if old_ver < 21:
# First rename all tables being modified.
db.execute_sql('RENAME TABLE `pokemon` TO `pokemon_old`;')
db.execute_sql(
'RENAME TABLE `locationaltitude` TO `locationaltitude_old`;')
db.execute_sql(
'RENAME TABLE `scannedlocation` TO `scannedlocation_old`;')
db.execute_sql('RENAME TABLE `spawnpoint` TO `spawnpoint_old`;')
db.execute_sql('RENAME TABLE `spawnpointdetectiondata` TO ' +
'`spawnpointdetectiondata_old`;')
db.execute_sql('RENAME TABLE `gymmember` TO `gymmember_old`;')
db.execute_sql('RENAME TABLE `gympokemon` TO `gympokemon_old`;')
db.execute_sql(
'RENAME TABLE `scanspawnpoint` TO `scanspawnpoint_old`;')
# Then create all tables that we renamed with the proper fields.
create_tables(db)
# Insert data back with the correct format
db.execute_sql(
'INSERT INTO `pokemon` SELECT ' +
'FROM_BASE64(encounter_id) as encounter_id, ' +
'CONV(spawnpoint_id, 16,10) as spawnpoint_id, ' +
'pokemon_id, latitude, longitude, disappear_time, ' +
'individual_attack, individual_defense, individual_stamina, ' +
'move_1, move_2, cp, cp_multiplier, weight, height, gender, ' +
'form, last_modified ' +
'FROM `pokemon_old`;')
db.execute_sql(
'INSERT INTO `locationaltitude` SELECT ' +
'CONV(cellid, 16,10) as cellid, ' +
'latitude, longitude, last_modified, altitude ' +
'FROM `locationaltitude_old`;')
db.execute_sql(
'INSERT INTO `scannedlocation` SELECT ' +
'CONV(cellid, 16,10) as cellid, ' +
'latitude, longitude, last_modified, done, band1, band2, band3, ' +
'band4, band5, midpoint, width ' +
'FROM `scannedlocation_old`;')
db.execute_sql(
'INSERT INTO `spawnpoint` SELECT ' +
'CONV(id, 16,10) as id, ' +
'latitude, longitude, last_scanned, kind, links, missed_count, ' +
'latest_seen, earliest_unseen ' +
'FROM `spawnpoint_old`;')
db.execute_sql(
'INSERT INTO `spawnpointdetectiondata` ' +
'(encounter_id, spawnpoint_id, scan_time, tth_secs) SELECT ' +
'FROM_BASE64(encounter_id) as encounter_id, ' +
'CONV(spawnpoint_id, 16,10) as spawnpoint_id, ' +
'scan_time, tth_secs ' +
'FROM `spawnpointdetectiondata_old`;')
# A simple alter table does not work ¯\_(ツ)_/¯
db.execute_sql(
'INSERT INTO `gymmember` SELECT * FROM `gymmember_old`;')
db.execute_sql(
'INSERT INTO `gympokemon` SELECT * FROM `gympokemon_old`;')
db.execute_sql(
'INSERT INTO `scanspawnpoint` SELECT ' +
'CONV(scannedlocation_id, 16,10) as scannedlocation_id, ' +
'CONV(spawnpoint_id, 16,10) as spawnpoint_id ' +
'FROM `scanspawnpoint_old`;')
db.execute_sql(
'ALTER TABLE `pokestop` MODIFY active_fort_modifier SMALLINT(6);')
# Drop all _old tables
db.execute_sql('DROP TABLE `scanspawnpoint_old`;')
db.execute_sql('DROP TABLE `pokemon_old`;')
db.execute_sql('DROP TABLE `locationaltitude_old`;')
db.execute_sql('DROP TABLE `spawnpointdetectiondata_old`;')
db.execute_sql('DROP TABLE `scannedlocation_old`;')
db.execute_sql('DROP TABLE `spawnpoint_old`;')
db.execute_sql('DROP TABLE `gymmember_old`;')
db.execute_sql('DROP TABLE `gympokemon_old`;')
if old_ver < 22:
# Drop and add CONSTRAINT_2 with the <= fix.
db.execute_sql('ALTER TABLE `spawnpoint` '
'DROP CONSTRAINT CONSTRAINT_2;')
db.execute_sql('ALTER TABLE `spawnpoint` '
'ADD CONSTRAINT CONSTRAINT_2 ' +
'CHECK (`earliest_unseen` <= 3600);')
# Drop and add CONSTRAINT_4 with the <= fix.
db.execute_sql('ALTER TABLE `spawnpoint` '
'DROP CONSTRAINT CONSTRAINT_4;')
db.execute_sql('ALTER TABLE `spawnpoint` '
'ADD CONSTRAINT CONSTRAINT_4 CHECK ' +
'(`latest_seen` <= 3600);')
if old_ver < 23:
db.drop_tables([WorkerStatus])
db.drop_tables([MainWorker])
if old_ver < 24:
migrate(
migrator.drop_index('pokemon', 'pokemon_disappear_time'),
migrator.add_index('pokemon',
('disappear_time', 'pokemon_id'), False)
)
if old_ver < 25:
migrate(
# Add `costume` column to `pokemon`
migrator.add_column('pokemon', 'costume',
SmallIntegerField(null=True)),
# Add `form` column to `gympokemon`
migrator.add_column('gympokemon', 'form',
SmallIntegerField(null=True)),
# Add `costume` column to `gympokemon`
migrator.add_column('gympokemon', 'costume',
SmallIntegerField(null=True))
)
if old_ver < 26:
migrate(
# Add `park` column to `gym`
migrator.add_column('gym', 'park', BooleanField(default=False))
)
if old_ver < 27:
migrate(
# Add `shiny` column to `gympokemon`
migrator.add_column('gympokemon', 'shiny',
SmallIntegerField(null=True))
)
if old_ver < 28:
migrate(
migrator.add_column('pokemon', 'weather_boosted_condition',
SmallIntegerField(null=True))
)
if old_ver < 29:
db.execute_sql('DROP TABLE `trainer`;')
migrate(
# drop trainer from gympokemon
migrator.drop_column('gympokemon', 'trainer_name')
)
if old_ver < 30:
db.execute_sql(
'ALTER TABLE `hashkeys` '
'MODIFY COLUMN `maximum` INTEGER,'
'MODIFY COLUMN `remaining` INTEGER,'
'MODIFY COLUMN `peak` INTEGER;'
)
# Always log that we're done.
log.info('Schema upgrade complete.')
return True
|
unknown
|
codeparrot/codeparrot-clean
| ||
import re
import uuid
import six
from django.test import TestCase
from django_extensions.db.fields import PostgreSQLUUIDField
from .testapp.models import (
UUIDTestAgregateModel, UUIDTestManyToManyModel, UUIDTestModel_field,
UUIDTestModel_pk,
)
class UUIDFieldTest(TestCase):
def test_UUID_field_create(self):
j = UUIDTestModel_field.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000'))
def test_UUID_field_pk_create(self):
j = UUIDTestModel_pk.objects.create(uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.pk, six.u('550e8400-e29b-41d4-a716-446655440000'))
def test_UUID_field_pk_agregate_create(self):
j = UUIDTestAgregateModel.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440001'))
self.assertEqual(j.a, 6)
self.assertIsInstance(j.pk, six.string_types)
self.assertEqual(len(j.pk), 36)
def test_UUID_field_manytomany_create(self):
j = UUIDTestManyToManyModel.objects.create(uuid_field=six.u('550e8400-e29b-41d4-a716-446655440010'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440010'))
self.assertEqual(j.pk, six.u('550e8400-e29b-41d4-a716-446655440010'))
class PostgreSQLUUIDFieldTest(TestCase):
def test_uuid_casting(self):
# As explain by postgres documentation
# http://www.postgresql.org/docs/9.1/static/datatype-uuid.html
# an uuid needs to be a sequence of lower-case hexadecimal digits, in
# several groups separated by hyphens, specifically a group of 8 digits
# followed by three groups of 4 digits followed by a group of 12 digits
matcher = re.compile('^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}'
'-[\da-f]{12}$')
field = PostgreSQLUUIDField()
for value in (str(uuid.uuid4()), uuid.uuid4().urn, uuid.uuid4().hex,
uuid.uuid4().int, uuid.uuid4().bytes):
prepared_value = field.get_db_prep_value(value, None)
self.assertTrue(matcher.match(prepared_value) is not None,
prepared_value)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from yosai.core.realm.realm import AccountStoreRealm
from yosai.core.authz.authz import DefaultPermission
from yosai.core.subject.identifier import SimpleIdentifierCollection
from yosai.core.exceptions import IncorrectCredentialsException
from anchore_engine.db import AccountTypes
from anchore_engine.plugins.authorization.client import AuthzPluginHttpClient, Action
from anchore_engine.apis.authentication import IdentityContext
from anchore_engine.subsys import logger
from anchore_engine.subsys.auth.stores.verifier import JwtToken
class CaseSensitivePermission(DefaultPermission):
def __init__(self, wildcard_string=None, parts=None, case_sensitive=True):
# Replace constructor with code from the WildcardPermission constructor directly, but with parts init from DefaultPermission
# This is necessary to get the case-sensitivity to init properly since the Default->Wildcard path messes it up
self.case_sensitive = case_sensitive
self.parts = {"domain": {"*"}, "action": {"*"}, "target": {"*"}}
if wildcard_string:
self.setparts(wildcard_string, case_sensitive)
else:
self.parts = {
"domain": set([parts.get("domain", "*")]),
"action": set(parts.get("action", "*")),
"target": set(parts.get("target", "*")),
}
class UsernamePasswordRealm(AccountStoreRealm):
"""
Anchore customized version of the default AccountStoreRealm from yosai.
Uses a username/password db store.
"""
__external_authorizer__ = None
# --------------------------------------------------------------------------
# Authentication
# --------------------------------------------------------------------------
def get_authentication_info(self, identifier):
"""
The default authentication caching policy is to cache an account's
credentials that are queried from an account store, for a specific
user, so to facilitate any subsequent authentication attempts for
that user. Naturally, in order to cache one must have a CacheHandler.
If a user were to fail to authenticate, perhaps due to an
incorrectly entered password, during the the next authentication
attempt (of that user id) the cached account will be readily
available from cache and used to match credentials, boosting
performance.
:returns: an Account object
"""
account_info = None
ch = self.cache_handler
def query_authc_info(self):
msg = (
"Could not obtain cached credentials for [{0}]. "
"Will try to acquire credentials from account store.".format(identifier)
)
logger.debug(msg)
# account_info is a dict
account_info = self.account_store.get_authc_info(identifier)
if account_info is None:
msg = "Could not get stored credentials for {0}".format(identifier)
raise ValueError(msg)
return account_info
try:
msg2 = "Attempting to get cached credentials for [{0}]".format(identifier)
logger.debug(msg2)
# account_info is a dict
account_info = ch.get_or_create(
domain="authentication:" + self.name,
identifier=identifier,
creator_func=query_authc_info,
creator=self,
)
except AttributeError:
# this means the cache_handler isn't configured
account_info = query_authc_info(self)
except ValueError:
msg3 = (
"No account credentials found for identifiers [{0}]. "
"Returning None.".format(identifier)
)
logger.warn(msg3)
if account_info:
# Expect anchore to add the account_id already
accnt_id = account_info.get("anchore_identity", identifier)
account_info["account_id"] = SimpleIdentifierCollection(
source_name=self.name, identifier=accnt_id
)
return account_info
@staticmethod
def _should_use_external(identity: IdentityContext):
# # If a service account or admin account user, use the default handler, not external calls
return identity.user_account_type not in [
AccountTypes.service,
AccountTypes.admin,
]
def is_permitted(self, identifiers, permission_s):
"""
If the authorization info cannot be obtained from the accountstore,
permission check tuple yields False.
:type identifiers: subject_abcs.IdentifierCollection
:param permission_s: a collection of one or more permissions, represented
as string-based permissions or Permission objects
and NEVER comingled types
:type permission_s: list of string(s)
:yields: tuple(Permission, Boolean)
"""
logger.debug("Identifiers for is_permitted: {}".format(identifiers.__dict__))
identifier = identifiers.primary_identifier
if self.__external_authorizer__ and self._should_use_external(identifier):
return self.__external_authorizer__.is_permitted(identifiers, permission_s)
else:
return self._check_internal_permitted(identifier, permission_s)
def _check_internal_permitted(self, identifier, permission_s):
"""
Do an internal perm check
:param identifier:
:param permission_s:
:return:
"""
for required_perm in permission_s:
required_permission = CaseSensitivePermission(wildcard_string=required_perm)
# get_authzd_permissions returns a list of DefaultPermission instances,
# requesting from cache using '*' and permission.domain as hash keys:
domain = next(iter(required_permission.domain))
assigned_permission_s = self.get_authzd_permissions(identifier, domain)
is_permitted = False
for authorized_permission in assigned_permission_s:
if authorized_permission.implies(required_permission):
is_permitted = True
break
yield (required_perm, is_permitted)
class ExternalAuthorizer(object):
"""
A realm for doing external authz and internal authc
__client__ is the initialized http client for requesting authorization
__account_type_provider__ is a callable that takes a single parameter: username and returns the account type
"""
def __init__(self, config, enabled=False):
logger.debug("Configuring realm with config: {}".format(config))
self.enabled = enabled
self.client = AuthzPluginHttpClient(
url=config.get("endpoint"), verify_ssl=config.get("verify_ssl")
)
def is_permitted(self, identifiers, permission_s):
"""
:type identifiers: SimpleRealmCollection
"""
# Fail all if not configured
if not self.enabled or not self.client:
return [(p, False) for p in permission_s]
result_list = [] # List of tuples (required_perm, is_permitted)
identifier = identifiers.primary_identifier
if isinstance(identifier, IdentityContext):
username = identifier.username
else:
username = identifier
actions = {}
for required_perm in permission_s:
required_permission = CaseSensitivePermission(wildcard_string=required_perm)
actions[
Action(
domain=",".join(required_permission.domain),
action=",".join(required_permission.action),
target=",".join(required_permission.target),
)
] = required_perm
if actions:
try:
resp = self.client.authorize(
principal=username, action_s=list(actions.keys())
)
for i in resp.allowed:
result_list.append((actions[i], True))
for i in resp.denied:
result_list.append((actions[i], False))
except Exception as e:
logger.exception(
"Unexpected error invoking authorization plugin via client: {}".format(
e
)
)
logger.error(
"Authorization plugin invocation error. Could not perform a proper authz check. Please check configuration and/or authz service status: {}".format(
self.client.url
)
)
raise e
return result_list
class JwtRealm(UsernamePasswordRealm):
"""
Customized version of the UsernamePassword realm but for interacting with a TokenStore
"""
def authenticate_account(self, authc_token: JwtToken):
try:
assert authc_token.identifier is not None
# Lookup the account info to verify the user identified by the token is still valid
authc_info = self.get_authentication_info(authc_token.identifier)
# Overwrite any creds found in db. Cleanup of token vs password is outside the scope of this handler.
if not authc_info or not authc_info["authc_info"]:
# No user exists for the identifier
raise IncorrectCredentialsException
else:
return authc_info
except:
logger.debug_exception("Could not authenticate token")
raise IncorrectCredentialsException()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operations list command."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.deployment_manager import dm_v2_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.deployment_manager import dm_base
class List(base.ListCommand):
"""List operations in a project.
Prints a table with summary information on all operations in the project.
"""
detailed_help = {
'EXAMPLES': """\
To print out a list of operations with some summary information about each, run:
$ {command}
To print only the name of each operation, run:
$ {command} --simple-list
""",
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
dm_v2_util.SIMPLE_LIST_FLAG.AddToParser(parser)
def Collection(self):
return 'deploymentmanager.operations'
def Run(self, args):
"""Run 'operations list'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
The list of operations for this project.
Raises:
HttpException: An http error response was received while executing api
request.
"""
request = dm_base.GetMessages().DeploymentmanagerOperationsListRequest(
project=dm_base.GetProject(),
)
return dm_v2_util.YieldWithHttpExceptions(list_pager.YieldFromList(
dm_base.GetClient().operations, request, field='operations',
limit=args.limit, batch_size=args.page_size))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import os
import mock
from apache.thermos.monitoring.detector import FixedPathDetector
from apache.thermos.observer.http.file_browser import TaskObserverFileBrowser
from apache.thermos.observer.task_observer import TaskObserver
MOCK_TASK_ID = 'abcd'
MOCK_BASE_PATH = '/a/b/c'
MOCK_FILENAME = 'd'
class PatchingTaskObserverFileBrowser(TaskObserverFileBrowser):
def __init__(self, *args, **kw):
self.__tasks = []
super(PatchingTaskObserverFileBrowser, self).__init__()
self._observer = self.make_observer()
@classmethod
def make_observer(cls):
return TaskObserver(FixedPathDetector(MOCK_BASE_PATH))
def make_mocks(base_path=MOCK_BASE_PATH, filename=MOCK_FILENAME, process=None,
logtype='stdout'):
mock_logs = {
'task_id': MOCK_TASK_ID,
'filename': filename,
'process': process,
'run': 1,
'logtype': logtype,
}
mock_file = {
'task_id': MOCK_TASK_ID,
'filename': filename,
}
mock_observer_logs = {
'stdout': [os.path.join(base_path, filename), 'stdout'],
'stderr': [os.path.join(base_path, filename), 'stderr'],
}
return mock_logs, mock_file, mock_observer_logs
class TestFileBrowser(object):
def test_handle_logs_with_stdout(self):
""" test observer handle_logs with logtype stdout """
process = mock.Mock()
mock_logs, _, mock_ologs = make_mocks(base_path=MOCK_BASE_PATH,
filename=MOCK_FILENAME,
process=process)
with contextlib.nested(
mock.patch('apache.thermos.observer.task_observer.TaskObserver.logs',
return_value=mock_ologs),
mock.patch('apache.thermos.observer.task_observer.TaskObserver.valid_path',
return_value=(MOCK_BASE_PATH, MOCK_FILENAME))):
ptfb = PatchingTaskObserverFileBrowser()
actual_logs = ptfb.handle_logs(MOCK_TASK_ID, process, 1, 'stdout')
assert mock_logs == actual_logs
assert actual_logs['task_id'] == MOCK_TASK_ID
assert actual_logs['filename'] == MOCK_FILENAME
assert actual_logs['process'] == process
assert actual_logs['run'] == 1
assert actual_logs['logtype'] == 'stdout'
def test_handle_logs_with_stderr(self):
""" test observer handle_logs with logtype stderr """
process = mock.Mock()
mock_logs, _, mock_ologs = make_mocks(base_path=MOCK_BASE_PATH,
filename=MOCK_FILENAME,
process=process,
logtype='stderr')
with contextlib.nested(
mock.patch('apache.thermos.observer.task_observer.TaskObserver.logs',
return_value=mock_ologs),
mock.patch('apache.thermos.observer.task_observer.TaskObserver.valid_path',
return_value=(MOCK_BASE_PATH, MOCK_FILENAME))):
ptfb = PatchingTaskObserverFileBrowser()
actual_logs = ptfb.handle_logs(MOCK_TASK_ID, process, 1, 'stderr')
assert mock_logs == actual_logs
assert actual_logs['task_id'] == MOCK_TASK_ID
assert actual_logs['filename'] == MOCK_FILENAME
assert actual_logs['process'] == process
assert actual_logs['run'] == 1
assert actual_logs['logtype'] == 'stderr'
def test_handle_file_with_stdout(self):
""" test observer handle_file with logtype stdout """
_, mock_file, mock_ologs = make_mocks(base_path=MOCK_BASE_PATH,
filename=MOCK_FILENAME)
ptfb = PatchingTaskObserverFileBrowser()
actual_file = ptfb.handle_file(MOCK_TASK_ID, MOCK_FILENAME)
assert mock_file == actual_file
assert actual_file['task_id'] == MOCK_TASK_ID
assert actual_file['filename'] == MOCK_FILENAME
def test_handle_file_with_stderr(self):
""" test observer handle_file with logtype stderr """
_, mock_file, mock_ologs = make_mocks(base_path=MOCK_BASE_PATH,
filename=MOCK_FILENAME,
logtype='stderr')
ptfb = PatchingTaskObserverFileBrowser()
actual_file = ptfb.handle_file(MOCK_TASK_ID, MOCK_FILENAME)
assert mock_file == actual_file
assert actual_file['task_id'] == MOCK_TASK_ID
assert actual_file['filename'] == MOCK_FILENAME
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_chart(osv.osv_memory):
_name = 'account.analytic.chart'
_description = 'Account Analytic Chart'
_columns = {
'from_date': fields.date('From'),
'to_date': fields.date('To'),
}
def analytic_account_chart_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result_context = {}
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_account_analytic_account_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
data = self.read(cr, uid, ids, [])[0]
if data['from_date']:
result_context.update({'from_date': data['from_date']})
if data['to_date']:
result_context.update({'to_date': data['to_date']})
result['context'] = str(result_context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
#ifndef Py_OPCODE_H
#define Py_OPCODE_H
#ifdef __cplusplus
extern "C" {
#endif
#include "opcode_ids.h"
#define NB_ADD 0
#define NB_AND 1
#define NB_FLOOR_DIVIDE 2
#define NB_LSHIFT 3
#define NB_MATRIX_MULTIPLY 4
#define NB_MULTIPLY 5
#define NB_REMAINDER 6
#define NB_OR 7
#define NB_POWER 8
#define NB_RSHIFT 9
#define NB_SUBTRACT 10
#define NB_TRUE_DIVIDE 11
#define NB_XOR 12
#define NB_INPLACE_ADD 13
#define NB_INPLACE_AND 14
#define NB_INPLACE_FLOOR_DIVIDE 15
#define NB_INPLACE_LSHIFT 16
#define NB_INPLACE_MATRIX_MULTIPLY 17
#define NB_INPLACE_MULTIPLY 18
#define NB_INPLACE_REMAINDER 19
#define NB_INPLACE_OR 20
#define NB_INPLACE_POWER 21
#define NB_INPLACE_RSHIFT 22
#define NB_INPLACE_SUBTRACT 23
#define NB_INPLACE_TRUE_DIVIDE 24
#define NB_INPLACE_XOR 25
#define NB_SUBSCR 26
#define NB_OPARG_LAST 26
#ifdef __cplusplus
}
#endif
#endif /* !Py_OPCODE_H */
|
c
|
github
|
https://github.com/python/cpython
|
Include/opcode.h
|
from __future__ import print_function
import os
import sys
import re
import datetime
import string
includesParser = re.compile( r'\s*#include\s*"(.*)"' )
commentParser1 = re.compile( r'^\s*/\*')
commentParser2 = re.compile( r'^ \*')
blankParser = re.compile( r'^\s*$')
seenHeaders = set([])
path = os.path.dirname(os.path.realpath( os.path.dirname(sys.argv[0])))
rootPath = os.path.join( path, 'include/' )
outputPath = os.path.join( path, 'single_include/ecs.h' )
includeImpl = True
out = open( outputPath, 'w' )
def write( line ):
out.write( line )
def parseFile( path, filename ):
f = open( path + filename, 'r' )
blanks = 0
for line in f:
m = includesParser.match( line )
if m:
header = m.group(1)
headerPath, sep, headerFile = header.rpartition( "/" )
if not headerFile in seenHeaders:
seenHeaders.add( headerFile )
write( "// #included from: {0}\n".format( header ) )
if os.path.exists( path + headerPath + sep + headerFile ):
parseFile( path + headerPath + sep, headerFile )
else:
parseFile( rootPath + headerPath + sep, headerFile )
else:
#if (not guardParser.match( line ) or defineParser.match( line ) ) and not commentParser1.match( line )and not commentParser2.match( line ):
if blankParser.match( line ):
blanks = blanks + 1
else:
blanks = 0
if blanks < 2:
write( line.rstrip() + "\n" )
out.write( "///\n" )
out.write( "/// OpenEcs v{0}\n".format( "0.1.101" ) )
out.write( "/// Generated: {0}\n".format( datetime.datetime.now() ) )
out.write( "/// ----------------------------------------------------------\n" )
out.write( "/// This file has been generated from multiple files. Do not modify\n" )
out.write( "/// ----------------------------------------------------------\n" )
out.write( "///\n" )
out.write( "#ifndef ECS_SINGLE_INCLUDE_H\n" )
out.write( "#define ECS_SINGLE_INCLUDE_H\n" )
parseFile( rootPath, 'ecs.h' )
out.write( "#endif // ECS_SINGLE_INCLUDE_H\n\n" )
print ("Generated single include for OpenEcs\n" )
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
import re, time, random
from openerp import api
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
"""
account.invoice object:
- Add support for Belgian structured communication
- Rename 'reference' field labels to 'Communication'
"""
class account_invoice(osv.osv):
_inherit = 'account.invoice'
@api.cr_uid_context
def _get_reference_type(self, cursor, user, context=None):
"""Add BBA Structured Communication Type and change labels from 'reference' into 'communication' """
res = super(account_invoice, self)._get_reference_type(cursor, user,
context=context)
res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = \
('none', _('Free Communication'))
res.append(('bba', _('BBA Structured Communication')))
#l_logger.warning('reference_type = %s' %res )
return res
def check_bbacomm(self, val):
supported_chars = '0-9+*/ '
pattern = re.compile('[^' + supported_chars + ']')
if pattern.findall(val or ''):
return False
bbacomm = re.sub('\D', '', val or '')
if len(bbacomm) == 12:
base = int(bbacomm[:10])
mod = base % 97 or 97
if mod == int(bbacomm[-2:]):
return True
return False
def _check_communication(self, cr, uid, ids):
for inv in self.browse(cr, uid, ids):
if inv.reference_type == 'bba':
return self.check_bbacomm(inv.reference)
return True
@api.onchange('partner_id')
def _onchange_partner_id(self):
result = super(account_invoice, self)._onchange_partner_id()
# reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type']
# _logger.warning('partner_id %s' % partner_id)
reference = False
reference_type = 'none'
if self.partner_id:
if (self.type == 'out_invoice'):
reference_type = self.partner_id.out_inv_comm_type
if reference_type:
reference = self.generate_bbacomm(self.type, reference_type, self.partner_id.id, '')['value']['reference']
self.reference_type = reference_type or 'none'
self.reference = reference
return result
def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None):
partner_obj = self.pool.get('res.partner')
reference = reference or ''
algorithm = False
if partner_id:
algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm
algorithm = algorithm or 'random'
if (type == 'out_invoice'):
if reference_type == 'bba':
if algorithm == 'date':
if not self.check_bbacomm(reference):
doy = time.strftime('%j')
year = time.strftime('%Y')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise UserError(_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = doy + year + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod)
elif algorithm == 'partner_ref':
if not self.check_bbacomm(reference):
partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref
partner_ref_nr = re.sub('\D', '', partner_ref or '')
if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7):
raise UserError(_('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \
'\nPlease correct the Partner record.'))
else:
partner_ref_nr = partner_ref_nr.ljust(7, '0')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise UserError(_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = partner_ref_nr + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod)
elif algorithm == 'random':
if not self.check_bbacomm(reference):
base = random.randint(1, 9999999999)
bbacomm = str(base).rjust(10, '0')
base = int(bbacomm)
mod = base % 97 or 97
mod = str(mod).rjust(2, '0')
reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
else:
raise UserError(_("Unsupported Structured Communication Type Algorithm '%s' !" \
"\nPlease contact your Odoo support channel.") % algorithm)
return {'value': {'reference': reference}}
def create(self, cr, uid, vals, context=None):
reference = vals.get('reference', False)
reference_type = vals.get('reference_type', False)
if vals.get('type') == 'out_invoice' and not reference_type:
# fallback on default communication type for partner
reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type
if reference_type == 'bba':
reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference']
vals.update({
'reference_type': reference_type or 'none',
'reference': reference,
})
if reference_type == 'bba':
if not reference:
raise UserError(_('Empty BBA Structured Communication!' \
'\nPlease fill in a unique BBA Structured Communication.'))
if self.check_bbacomm(reference):
reference = re.sub('\D', '', reference)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', '=', vals['reference'])])
if same_ids:
raise UserError(_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for inv in self.browse(cr, uid, ids, context):
if vals.has_key('reference_type'):
reference_type = vals['reference_type']
else:
reference_type = inv.reference_type or ''
if reference_type == 'bba' and 'reference' in vals:
if self.check_bbacomm(vals['reference']):
reference = re.sub('\D', '', vals['reference'])
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('id', '!=', inv.id), ('type', '=', 'out_invoice'),
('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])])
if same_ids:
raise UserError(_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
invoice = self.browse(cr, uid, id, context=context)
if invoice.type in ['out_invoice']:
reference_type = invoice.reference_type or 'none'
default['reference_type'] = reference_type
if reference_type == 'bba':
partner = invoice.partner_id
default['reference'] = self.generate_bbacomm(cr, uid, id,
invoice.type, reference_type,
partner.id, '', context=context)['value']['reference']
return super(account_invoice, self).copy(cr, uid, id, default, context=context)
_columns = {
'reference_type': fields.selection(_get_reference_type, 'Payment Reference',
required=True, readonly=True),
}
_constraints = [
(_check_communication, 'Invalid BBA Structured Communication !', ['reference', 'reference_type']),
]
account_invoice()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt
from webnotes.model.doc import addchild
from webnotes.model.bean import getlist
from webnotes import msgprint, _
class DocType:
def __init__(self,doc,doclist=[]):
self.doc,self.doclist = doc,doclist
def get_months(self):
month_list = ['January','February','March','April','May','June','July','August','September',
'October','November','December']
idx =1
for m in month_list:
mnth = addchild(self.doc, 'budget_distribution_details',
'Budget Distribution Detail', self.doclist)
mnth.month = m or ''
mnth.idx = idx
idx += 1
def validate(self):
total = sum([flt(d.percentage_allocation, 2) for d in self.doclist.get(
{"parentfield": "budget_distribution_details"})])
if total != 100.0:
msgprint(_("Percentage Allocation should be equal to ") + "100%", raise_exception=1)
|
unknown
|
codeparrot/codeparrot-clean
| ||
A macro listed for import was not found.
Erroneous code example:
```compile_fail,E0469
#[macro_use(drink, be_merry)] // error: imported macro not found
extern crate alloc;
fn main() {
// ...
}
```
Either the listed macro is not contained in the imported crate, or it is not
exported from the given crate.
This could be caused by a typo. Did you misspell the macro's name?
Double-check the names of the macros listed for import, and that the crate
in question exports them.
A working version would be:
```ignore (cannot-doctest-multicrate-project)
// In some_crate crate:
#[macro_export]
macro_rules! eat {
...
}
#[macro_export]
macro_rules! drink {
...
}
// In your crate:
#[macro_use(eat, drink)]
extern crate some_crate; //ok!
```
|
unknown
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_error_codes/src/error_codes/E0469.md
|
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.web.websocket.stomp.websocketstompinterceptors;
import org.springframework.context.annotation.Configuration;
import org.springframework.messaging.simp.config.ChannelRegistration;
import org.springframework.web.socket.config.annotation.EnableWebSocketMessageBroker;
import org.springframework.web.socket.config.annotation.WebSocketMessageBrokerConfigurer;
// tag::snippet[]
@Configuration
@EnableWebSocketMessageBroker
public class WebSocketConfiguration implements WebSocketMessageBrokerConfigurer {
@Override
public void configureClientInboundChannel(ChannelRegistration registration) {
registration.interceptors(new MyChannelInterceptor());
}
}
// end::snippet[]
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
framework-docs/src/main/java/org/springframework/docs/web/websocket/stomp/websocketstompinterceptors/WebSocketConfiguration.java
|
from django.contrib.gis.gdal import SpatialReference
from django.db import DEFAULT_DB_ALIAS, connections
def add_srs_entry(srs, auth_name='EPSG', auth_srid=None, ref_sys_name=None,
database=None):
"""
This function takes a GDAL SpatialReference system and adds its information
to the `spatial_ref_sys` table of the spatial backend. Doing this enables
database-level spatial transformations for the backend. Thus, this utility
is useful for adding spatial reference systems not included by default with
the backend:
>>> from django.contrib.gis.utils import add_srs_entry
>>> add_srs_entry(3857)
Keyword Arguments:
auth_name:
This keyword may be customized with the value of the `auth_name` field.
Defaults to 'EPSG'.
auth_srid:
This keyword may be customized with the value of the `auth_srid` field.
Defaults to the SRID determined by GDAL.
ref_sys_name:
For SpatiaLite users only, sets the value of the `ref_sys_name` field.
Defaults to the name determined by GDAL.
database:
The name of the database connection to use; the default is the value
of `django.db.DEFAULT_DB_ALIAS` (at the time of this writing, its value
is 'default').
"""
if not database:
database = DEFAULT_DB_ALIAS
connection = connections[database]
if not hasattr(connection.ops, 'spatial_version'):
raise Exception('The `add_srs_entry` utility only works '
'with spatial backends.')
if not connection.features.supports_add_srs_entry:
raise Exception('This utility does not support your database backend.')
SpatialRefSys = connection.ops.spatial_ref_sys()
# If argument is not a `SpatialReference` instance, use it as parameter
# to construct a `SpatialReference` instance.
if not isinstance(srs, SpatialReference):
srs = SpatialReference(srs)
if srs.srid is None:
raise Exception('Spatial reference requires an SRID to be '
'compatible with the spatial backend.')
# Initializing the keyword arguments dictionary for both PostGIS
# and SpatiaLite.
kwargs = {'srid': srs.srid,
'auth_name': auth_name,
'auth_srid': auth_srid or srs.srid,
'proj4text': srs.proj4,
}
# Backend-specific fields for the SpatialRefSys model.
srs_field_names = {f.name for f in SpatialRefSys._meta.get_fields()}
if 'srtext' in srs_field_names:
kwargs['srtext'] = srs.wkt
if 'ref_sys_name' in srs_field_names:
# Spatialite specific
kwargs['ref_sys_name'] = ref_sys_name or srs.name
# Creating the spatial_ref_sys model.
try:
# Try getting via SRID only, because using all kwargs may
# differ from exact wkt/proj in database.
SpatialRefSys.objects.using(database).get(srid=srs.srid)
except SpatialRefSys.DoesNotExist:
SpatialRefSys.objects.using(database).create(**kwargs)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
from .jsregexp import Exec
import re
DIGS = set('0123456789')
WHITE = u"\u0009\u000A\u000B\u000C\u000D\u0020\u00A0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u2028\u2029\u202F\u205F\u3000\uFEFF"
def replacement_template(rep, source, span, npar):
"""Takes the replacement template and some info about the match and returns filled template
"""
n = 0
res = ''
while n < len(rep) - 1:
char = rep[n]
if char == '$':
if rep[n + 1] == '$':
res += '$'
n += 2
continue
elif rep[n + 1] == '`':
# replace with string that is BEFORE match
res += source[:span[0]]
n += 2
continue
elif rep[n + 1] == '\'':
# replace with string that is AFTER match
res += source[span[1]:]
n += 2
continue
elif rep[n + 1] in DIGS:
dig = rep[n + 1]
if n + 2 < len(rep) and rep[n + 2] in DIGS:
dig += rep[n + 2]
num = int(dig)
# we will not do any replacements if we dont have this npar or dig is 0
if not num or num > len(npar):
res += '$' + dig
else:
# None - undefined has to be replaced with ''
res += npar[num - 1] if npar[num - 1] else ''
n += 1 + len(dig)
continue
res += char
n += 1
if n < len(rep):
res += rep[-1]
return res
###################################################
class StringPrototype:
def toString():
if this.Class != 'String':
raise this.MakeError('TypeError',
'String.prototype.toString is not generic')
return this.value
def valueOf():
if this.Class != 'String':
raise this.MakeError('TypeError',
'String.prototype.valueOf is not generic')
return this.value
def charAt(pos):
this.cok()
pos = pos.to_int()
s = this.to_string()
if 0 <= pos < len(s.value):
char = s.value[pos]
if char not in s.CHAR_BANK:
s.Js(char) # add char to char bank
return s.CHAR_BANK[char]
return s.CHAR_BANK['']
def charCodeAt(pos):
this.cok()
pos = pos.to_int()
s = this.to_string()
if 0 <= pos < len(s.value):
return s.Js(ord(s.value[pos]))
return s.NaN
def concat():
this.cok()
s = this.to_string()
res = s.value
for e in arguments.to_list():
res += e.to_string().value
return res
def indexOf(searchString, position):
this.cok()
s = this.to_string().value
search = searchString.to_string().value
pos = position.to_int()
return this.Js(s.find(search, min(max(pos, 0), len(s))))
def lastIndexOf(searchString, position):
this.cok()
s = this.to_string().value
search = searchString.to_string().value
pos = position.to_number()
pos = 10**15 if pos.is_nan() else pos.to_int()
return s.rfind(search, 0, min(max(pos, 0) + 1, len(s)))
def localeCompare(that):
this.cok()
s = this.to_string()
that = that.to_string()
if s < that:
return this.Js(-1)
elif s > that:
return this.Js(1)
return this.Js(0)
def match(regexp):
this.cok()
s = this.to_string()
r = this.RegExp(regexp) if regexp.Class != 'RegExp' else regexp
if not r.glob:
return Exec(r, s)
r.put('lastIndex', this.Js(0))
found = []
previous_last_index = 0
last_match = True
while last_match:
result = Exec(r, s)
if result.is_null():
last_match = False
else:
this_index = r.get('lastIndex').value
if this_index == previous_last_index:
r.put('lastIndex', this.Js(this_index + 1))
previous_last_index += 1
else:
previous_last_index = this_index
matchStr = result.get('0')
found.append(matchStr)
if not found:
return this.null
return found
def replace(searchValue, replaceValue):
# VERY COMPLICATED. to check again.
this.cok()
string = this.to_string()
s = string.value
res = ''
if not replaceValue.is_callable():
replaceValue = replaceValue.to_string().value
func = False
else:
func = True
# Replace all ( global )
if searchValue.Class == 'RegExp' and searchValue.glob:
last = 0
for e in re.finditer(searchValue.pat, s):
res += s[last:e.span()[0]]
if func:
# prepare arguments for custom func (replaceValue)
args = (e.group(), ) + e.groups() + (e.span()[1], string)
# convert all types to JS
args = map(this.Js, args)
res += replaceValue(*args).to_string().value
else:
res += replacement_template(replaceValue, s, e.span(),
e.groups())
last = e.span()[1]
res += s[last:]
return this.Js(res)
elif searchValue.Class == 'RegExp':
e = re.search(searchValue.pat, s)
if e is None:
return string
span = e.span()
pars = e.groups()
match = e.group()
else:
match = searchValue.to_string().value
ind = s.find(match)
if ind == -1:
return string
span = ind, ind + len(match)
pars = ()
res = s[:span[0]]
if func:
args = (match, ) + pars + (span[1], string)
# convert all types to JS
this_ = this
args = tuple([this_.Js(x) for x in args])
res += replaceValue(*args).to_string().value
else:
res += replacement_template(replaceValue, s, span, pars)
res += s[span[1]:]
return res
def search(regexp):
this.cok()
string = this.to_string()
if regexp.Class == 'RegExp':
rx = regexp
else:
rx = this.RegExp(regexp)
res = re.search(rx.pat, string.value)
if res is not None:
return this.Js(res.span()[0])
return -1
def slice(start, end):
this.cok()
s = this.to_string()
start = start.to_int()
length = len(s.value)
end = length if end.is_undefined() else end.to_int()
#From = max(length+start, 0) if start<0 else min(length, start)
#To = max(length+end, 0) if end<0 else min(length, end)
return s.value[start:end]
def split(separator, limit):
# its a bit different that re.split!
this.cok()
S = this.to_string()
s = S.value
lim = 2**32 - 1 if limit.is_undefined() else limit.to_uint32()
if not lim:
return []
if separator.is_undefined():
return [s]
len_s = len(s)
res = []
R = separator if separator.Class == 'RegExp' else separator.to_string()
if not len_s:
if SplitMatch(s, 0, R) is None:
return [S]
return []
p = q = 0
while q != len_s:
e, cap = SplitMatch(s, q, R)
if e is None or e == p:
q += 1
continue
res.append(s[p:q])
p = q = e
if len(res) == lim:
return res
for element in cap:
res.append(this.Js(element))
if len(res) == lim:
return res
res.append(s[p:])
return res
def substring(start, end):
this.cok()
s = this.to_string().value
start = start.to_int()
length = len(s)
end = length if end.is_undefined() else end.to_int()
fstart = min(max(start, 0), length)
fend = min(max(end, 0), length)
return this.Js(s[min(fstart, fend):max(fstart, fend)])
def substr(start, length):
#I hate this function and its description in specification
r1 = this.to_string().value
r2 = start.to_int()
r3 = 10**20 if length.is_undefined() else length.to_int()
r4 = len(r1)
r5 = r2 if r2 >= 0 else max(0, r2 + r4)
r6 = min(max(r3, 0), r4 - r5)
if r6 <= 0:
return ''
return r1[r5:r5 + r6]
def toLowerCase():
this.cok()
return this.Js(this.to_string().value.lower())
def toLocaleLowerCase():
this.cok()
return this.Js(this.to_string().value.lower())
def toUpperCase():
this.cok()
return this.Js(this.to_string().value.upper())
def toLocaleUpperCase():
this.cok()
return this.Js(this.to_string().value.upper())
def trim():
this.cok()
return this.Js(this.to_string().value.strip(WHITE))
def SplitMatch(s, q, R):
# s is Py String to match, q is the py int match start and R is Js RegExp or String.
if R.Class == 'RegExp':
res = R.match(s, q)
return (None, ()) if res is None else (res.span()[1], res.groups())
# R is just a string
if s[q:].startswith(R.value):
return q + len(R.value), ()
return None, ()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
CMS Video
"""
import time
import os
import requests
from bok_choy.promise import EmptyPromise, Promise
from bok_choy.javascript import wait_for_js, js_defined
from ....tests.helpers import YouTubeStubConfig
from ...lms.video.video import VideoPage
from selenium.webdriver.common.keys import Keys
from ..utils import wait_for_notification
CLASS_SELECTORS = {
'video_container': 'div.video',
'video_init': '.is-initialized',
'video_xmodule': '.xmodule_VideoModule',
'video_spinner': '.video-wrapper .spinner',
'video_controls': 'section.video-controls',
'attach_asset': '.upload-dialog > input[type="file"]',
'upload_dialog': '.wrapper-modal-window-assetupload',
'xblock': '.add-xblock-component',
'slider_range': '.slider-range',
'error': '.transcripts-error-message',
'url_inputs': '.videolist-settings-item input.input',
'collapse_bar': '.videolist-extra-videos',
'status': '.transcripts-message-status',
'attach_transcript': '.file-chooser > input[type="file"]',
}
BUTTON_SELECTORS = {
'create_video': 'a[data-category="video"]',
'handout_download': '.video-handout.video-download-button a',
'handout_download_editor': '.wrapper-comp-setting.file-uploader .download-action',
'upload_asset': '.upload-action',
'asset_submit': '.action-upload',
'handout_clear': '.wrapper-comp-setting.file-uploader .setting-clear',
'translations_clear': '.metadata-video-translations .setting-clear',
'translation_add': '.wrapper-translations-settings > a',
'import': '.setting-import',
'download_to_edit': '.setting-download',
'disabled_download_to_edit': '.setting-download.is-disabled',
'upload_new_timed_transcripts': '.setting-upload',
'replace': '.setting-replace',
'choose': '.setting-choose',
'use_existing': '.setting-use-existing',
'collapse_link': '.collapse-action.collapse-setting',
}
DISPLAY_NAME = "Component Display Name"
DEFAULT_SETTINGS = [
# basic
[DISPLAY_NAME, 'Video', False],
['Default Video URL', 'http://youtu.be/3_yD_cEKoCk, , ', False],
# advanced
[DISPLAY_NAME, 'Video', False],
['Default Timed Transcript', '', False],
['Download Transcript Allowed', 'False', False],
['Downloadable Transcript URL', '', False],
['Show Transcript', 'True', False],
['Transcript Languages', '', False],
['Upload Handout', '', False],
['Video Available on Web Only', 'False', False],
['Video Download Allowed', 'False', False],
['Video File URLs', '', False],
['Video ID', '', False],
['Video Start Time', '00:00:00', False],
['Video Stop Time', '00:00:00', False],
['YouTube ID', '3_yD_cEKoCk', False],
['YouTube ID for .75x speed', '', False],
['YouTube ID for 1.25x speed', '', False],
['YouTube ID for 1.5x speed', '', False]
]
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
@js_defined('window.Video', 'window.RequireJS.require', 'window.jQuery', 'window.XModule', 'window.XBlock',
'window.MathJax', 'window.MathJax.isReady')
class VideoComponentPage(VideoPage):
"""
CMS Video Component Page
"""
url = None
@wait_for_js
def is_browser_on_page(self):
return self.q(css='div{0}'.format(CLASS_SELECTORS['video_xmodule'])).present or self.q(
css='div{0}'.format(CLASS_SELECTORS['xblock'])).present
def get_element_selector(self, class_name, vertical=False):
return super(VideoComponentPage, self).get_element_selector(class_name, vertical=vertical)
def _wait_for(self, check_func, desc, result=False, timeout=30):
"""
Calls the method provided as an argument until the Promise satisfied or BrokenPromise
Arguments:
check_func (callable): Promise function to be fulfilled.
desc (str): Description of the Promise, used in log messages.
result (bool): Indicates whether we need result from Promise or not
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out.
"""
if result:
return Promise(check_func, desc, timeout=timeout).fulfill()
else:
return EmptyPromise(check_func, desc, timeout=timeout).fulfill()
def wait_for_video_component_render(self):
"""
Wait until video component rendered completely
"""
if not YouTubeStubConfig.get_configuration().get('youtube_api_blocked'):
self._wait_for(lambda: self.q(css=CLASS_SELECTORS['video_init']).present, 'Video Player Initialized')
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['video_spinner']).visible,
'Video Buffering Completed')
self._wait_for(self.is_controls_visible, 'Player Controls are Visible')
@wait_for_js
def is_controls_visible(self):
"""
Get current visibility sate of all video controls.
Returns:
bool: True means video controls are visible for all videos, False means video controls are not visible
for one or more videos
"""
return self.q(css=CLASS_SELECTORS['video_controls']).visible
def click_button(self, button_name, index=0, require_notification=False):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
index (int): query index
"""
self.q(css=BUTTON_SELECTORS[button_name]).nth(index).click()
if require_notification:
wait_for_notification(self)
self.wait_for_ajax()
@staticmethod
def file_path(filename):
"""
Construct file path to be uploaded to assets.
Arguments:
filename (str): asset filename
"""
return os.sep.join(__file__.split(os.sep)[:-5]) + '/data/uploads/' + filename
def upload_handout(self, handout_filename):
"""
Upload a handout file to assets
Arguments:
handout_filename (str): handout file name
"""
self.upload_asset(handout_filename)
def upload_asset(self, asset_filename, asset_type='handout', index=0):
"""
Upload a asset file to assets
Arguments:
asset_filename (str): asset file name
asset_type (str): one of `handout`, `transcript`
index (int): query index
"""
asset_file_path = self.file_path(asset_filename)
self.click_button('upload_asset', index)
self.q(css=CLASS_SELECTORS['attach_asset']).results[0].send_keys(asset_file_path)
self.click_button('asset_submit')
# Only srt format transcript files can be uploaded, If an error
# occurs due to incorrect transcript file we will return from here
if asset_type == 'transcript' and self.q(css='#upload_error').present:
return
# confirm upload completion
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['upload_dialog']).present, 'Upload Completed')
def clear_handout(self):
"""
Clear handout from settings
"""
self.click_button('handout_clear')
def _get_handout(self, url):
"""
Download handout at `url`
"""
kwargs = dict()
session_id = [{i['name']: i['value']} for i in self.browser.get_cookies() if i['name'] == u'sessionid']
if session_id:
kwargs.update({
'cookies': session_id[0]
})
response = requests.get(url, **kwargs)
return response.status_code < 400, response.headers
def download_handout(self, mime_type, is_editor=False):
"""
Download handout with mime type specified by `mime_type`
Arguments:
mime_type (str): mime type of handout file
Returns:
tuple: Handout download result.
"""
selector = BUTTON_SELECTORS['handout_download_editor'] if is_editor else BUTTON_SELECTORS['handout_download']
handout_url = self.q(css=selector).attrs('href')[0]
result, headers = self._get_handout(handout_url)
return result, headers['content-type'] == mime_type
@property
def is_handout_button_visible(self):
"""
Check if handout download button is visible
"""
return self.q(css=BUTTON_SELECTORS['handout_download']).visible
def create_video(self):
"""
Create a Video Component by clicking on Video button and wait for rendering completion.
"""
# Create video
self.click_button('create_video', require_notification=True)
self.wait_for_video_component_render()
def xblocks(self):
"""
Tells the total number of video xblocks present on current unit page.
Returns:
(int): total video xblocks
"""
return len(self.q(css='.xblock-header').filter(
lambda el: 'xblock-header-video' in el.get_attribute('class')).results)
def focus_caption_line(self, line_number):
"""
Focus a caption line as specified by `line_number`
Arguments:
line_number (int): caption line number
"""
caption_line_selector = ".subtitles > li[data-index='{index}']".format(index=line_number - 1)
self.q(css=caption_line_selector).results[0].send_keys(Keys.ENTER)
def is_caption_line_focused(self, line_number):
"""
Check if a caption line focused
Arguments:
line_number (int): caption line number
"""
caption_line_selector = ".subtitles > li[data-index='{index}']".format(index=line_number - 1)
attributes = self.q(css=caption_line_selector).attrs('class')
return 'focused' in attributes
@property
def is_slider_range_visible(self):
"""
Return True if slider range is visible.
"""
return self.q(css=CLASS_SELECTORS['slider_range']).visible
def verify_settings(self):
"""
Verify that video component has correct default settings.
"""
query = '.wrapper-comp-setting'
settings = self.q(css=query).results
if len(DEFAULT_SETTINGS) != len(settings):
return False
for counter, setting in enumerate(settings):
is_verified = self._verify_setting_entry(setting,
DEFAULT_SETTINGS[counter][0],
DEFAULT_SETTINGS[counter][1])
if not is_verified:
return is_verified
return True
@staticmethod
def _verify_setting_entry(setting, field_name, field_value):
"""
Verify a `setting` entry.
Arguments:
setting (WebElement): Selenium WebElement
field_name (str): Name of field
field_value (str): Value of field
Returns:
bool: Does `setting` have correct value.
"""
if field_name != setting.find_element_by_class_name('setting-label').get_attribute('innerHTML'):
return False
# Get class attribute values
classes = setting.get_attribute('class').split()
list_type_classes = ['metadata-list-enum', 'metadata-dict', 'metadata-video-translations']
is_list_type = any(list_type in classes for list_type in list_type_classes)
if is_list_type:
current_value = ', '.join(
ele.get_attribute('value') for ele in setting.find_elements_by_class_name('list-settings-item'))
elif 'metadata-videolist-enum' in setting.get_attribute('class'):
current_value = ', '.join(item.find_element_by_tag_name('input').get_attribute('value') for item in
setting.find_elements_by_class_name('videolist-settings-item'))
else:
current_value = setting.find_element_by_class_name('setting-input').get_attribute('value')
if field_value != current_value:
return False
# Clear button should be visible(active class is present) for
# every setting that don't have 'metadata-videolist-enum' class
if 'metadata-videolist-enum' not in setting.get_attribute('class'):
setting_clear_button = setting.find_elements_by_class_name('setting-clear')[0]
if 'active' not in setting_clear_button.get_attribute('class'):
return False
return True
def set_field_value(self, field_name, field_value, field_type='input'):
"""
Set settings input `field` with `value`
Arguments:
field_name (str): Name of field
field_value (str): Name of value
field_type (str): `input`, `select` etc(more to be added later)
"""
query = '.wrapper-comp-setting > label:nth-child(1)'
field_id = ''
if field_type == 'input':
for index, _ in enumerate(self.q(css=query)):
if field_name in self.q(css=query).nth(index).text[0]:
field_id = self.q(css=query).nth(index).attrs('for')[0]
break
self.q(css='#{}'.format(field_id)).fill(field_value)
elif field_type == 'select':
self.q(css='select[name="{0}"] option[value="{1}"]'.format(field_name, field_value)).first.click()
def verify_field_value(self, field_name, field_value):
"""
Get settings value of `field_name`
Arguments:
field_name (str): Name of field
field_value (str): Name of value
Returns:
bool: If `field_name` has `field_value`
"""
_, setting = self._get_setting_entry(field_name)
return self._verify_setting_entry(setting, field_name, field_value)
def _get_setting_entry(self, field_name):
"""
Get setting entry of `field_name`
Arguments:
field_name (str): Name of field
Returns:
setting (WebElement): Selenium WebElement
"""
for index, setting in enumerate(self.q(css='.wrapper-comp-setting').results):
if setting.find_element_by_class_name('setting-label').get_attribute('innerHTML') == field_name:
return index, setting
def translations_count(self):
"""
Get count of translations.
"""
return len(self.q(css='.wrapper-translations-settings .list-settings-item').results)
def select_translation_language(self, language_code, index=0):
"""
Select translation language as specified by `language_code`
Arguments:
language_code (str):
index (int): query index
"""
translations_items = '.wrapper-translations-settings .list-settings-item'
language_selector = translations_items + ' select option[value="{}"]'.format(language_code)
self.q(css=language_selector).nth(index).click()
def upload_translation(self, transcript_name, language_code):
"""
Upload a translation file.
Arguments:
transcript_name (str):
language_code (str):
"""
self.click_button('translation_add')
translations_count = self.translations_count()
self.select_translation_language(language_code, translations_count - 1)
self.upload_asset(transcript_name, asset_type='transcript', index=translations_count - 1)
def replace_translation(self, old_lang_code, new_lang_code, transcript_name):
"""
Replace a translation.
Arguments:
old_lang_code (str):
new_lang_code (str):
transcript_name (str):
"""
language_codes = self.translations()
index = language_codes.index(old_lang_code)
self.select_translation_language(new_lang_code, index)
self.upload_asset(transcript_name, asset_type='transcript', index=index)
def translations(self):
"""
Extract translations
Returns:
list: list of translation language codes
"""
translations_selector = '.metadata-video-translations .remove-setting'
return self.q(css=translations_selector).attrs('data-lang')
def download_translation(self, language_code, text_to_search):
"""
Download a translation having `language_code` and containing `text_to_search`
Arguments:
language_code (str): language code
text_to_search (str): text to search in translation
Returns:
bool: whether download was successful
"""
mime_type = 'application/x-subrip'
lang_code = '/{}?'.format(language_code)
link = [link for link in self.q(css='.download-action').attrs('href') if lang_code in link]
result, headers, content = self._get_transcript(link[0])
return result is True and mime_type in headers['content-type'] and text_to_search in content.decode('utf-8')
def remove_translation(self, language_code):
"""
Remove a translation having `language_code`
Arguments:
language_code (str): language code
"""
self.q(css='.remove-action').filter(lambda el: language_code == el.get_attribute('data-lang')).click()
@property
def upload_status_message(self):
"""
Get asset upload status message
"""
return self.q(css='#upload_error').text[0]
def captions_lines(self):
"""
Extract partial caption lines.
As all the captions lines are exactly same so only getting partial lines will work.
"""
self.wait_for_captions()
selector = '.subtitles > li:nth-child({})'
return ' '.join([self.q(css=selector.format(i)).text[0] for i in range(1, 6)])
def set_url_field(self, url, field_number):
"""
Set video url field in basic settings tab.
Arguments:
url (str): video url
field_number (int): video url field number
"""
if self.q(css=CLASS_SELECTORS['collapse_bar']).visible is False:
self.click_button('collapse_link')
self.q(css=CLASS_SELECTORS['url_inputs']).nth(field_number - 1).fill(url)
time.sleep(DELAY)
self.wait_for_ajax()
def message(self, message_type):
"""
Get video url field status/error message.
Arguments:
message_type(str): type(status, error) of message
Returns:
str: status/error message
"""
if message_type == 'status':
self.wait_for_element_visibility(CLASS_SELECTORS[message_type],
'{} message is Visible'.format(message_type.title()))
return self.q(css=CLASS_SELECTORS[message_type]).text[0]
def url_field_status(self, *field_numbers):
"""
Get video url field status(enable/disable).
Arguments:
url (str): video url
field_numbers (tuple or None): field numbers to check status for, None means get status for all.
tuple items will be integers and must start from 1
Returns:
dict: field numbers as keys and field status(bool) as values, False means a field is disabled
"""
if field_numbers:
index_list = [number - 1 for number in field_numbers]
else:
index_list = range(3) # maximum three fields
statuses = {}
for index in index_list:
status = 'is-disabled' not in self.q(css=CLASS_SELECTORS['url_inputs']).nth(index).attrs('class')[0]
statuses[index + 1] = status
return statuses
def clear_field(self, index):
"""
Clear a video url field at index specified by `index`.
"""
self.q(css=CLASS_SELECTORS['url_inputs']).nth(index - 1).fill('')
# Trigger an 'input' event after filling the field with an empty value.
self.browser.execute_script(
"$('{}:eq({})').trigger('{}')".format(CLASS_SELECTORS['url_inputs'], index, 'input'))
time.sleep(DELAY)
self.wait_for_ajax()
def clear_fields(self):
"""
Clear video url fields.
"""
script = """
$('{selector}')
.prop('disabled', false)
.removeClass('is-disabled')
.val('')
.trigger('input');
""".format(selector=CLASS_SELECTORS['url_inputs'])
self.browser.execute_script(script)
time.sleep(DELAY)
self.wait_for_ajax()
def revert_field(self, field_name):
"""
Revert a field.
"""
_, setting = self._get_setting_entry(field_name)
setting.find_element_by_class_name('setting-clear').click()
def is_transcript_button_visible(self, button_name, index=0, button_text=None):
"""
Check if a transcript related button is visible.
Arguments:
button_name (str): name of button
index (int): query index
button_text (str or None): text to match with text on a button, if None then don't match texts
Returns:
bool: is button visible
"""
is_visible = self.q(css=BUTTON_SELECTORS[button_name]).nth(index).visible
is_text_matched = True
if button_text and button_text != self.q(css=BUTTON_SELECTORS[button_name]).nth(index).text[0]:
is_text_matched = False
return is_visible and is_text_matched
def upload_transcript(self, transcript_filename):
"""
Upload a Transcript
Arguments:
transcript_filename (str): name of transcript file
"""
# Show the Browse Button
self.browser.execute_script("$('form.file-chooser').show()")
asset_file_path = self.file_path(transcript_filename)
self.q(css=CLASS_SELECTORS['attach_transcript']).results[0].send_keys(asset_file_path)
# confirm upload completion
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['attach_transcript']).visible, 'Upload Completed')
|
unknown
|
codeparrot/codeparrot-clean
| ||
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default warp-level GEMM operators selected by data type, size, and layouts of operands.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/gemm/warp/default_mma_tensor_op.h>
#include <cutlass/gemm/warp/mma_tensor_op.h>
#include <ATen/native/cuda/cutlass_extensions/arch/mma.h>
#include <ATen/native/cuda/cutlass_extensions/gemm/warp/mma_tensorop_compute_B_with_f16.h>
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for m-by-n-by-kgroup
template<
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A elements,
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp<WarpShape_,
InstructionShape_,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAddDequantizeInterleavedBToA,
PartitionsK,
AccumulatorsInRowMajor> {
private:
// Shape for computing the FP16s
using ComputeInstructionShape = InstructionShape_;
// Chosen so we get K=16 for int8 and K=32 for int4.
static constexpr int LoadInstructionK = 8 * sizeof_bits<ElementA>::value / sizeof_bits<ElementB>::value;
// Shape for loading the narrow data type from shared memory
using LoadInstructionShape = GemmShape<InstructionShape_::kM, InstructionShape_::kN, LoadInstructionK>;
public:
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<cutlass::arch::Mma<InstructionShape_,
32,
ElementA,
cutlass::layout::RowMajor,
ElementA,
cutlass::layout::ColumnMajor,
ElementC,
cutlass::layout::RowMajor,
arch::OpMultiplyAdd>,
cutlass::MatrixShape<1, 1>>;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaTensorOpComputeBWithF16<WarpShape_,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
Policy,
LoadInstructionShape,
PartitionsK,
AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
c
|
github
|
https://github.com/pytorch/pytorch
|
aten/src/ATen/native/cuda/cutlass_extensions/gemm/warp/default_mma_tensor_op.h
|
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Classes to handle advanced configuration in simple to complex applications.
Allows to load the configuration from a file or from command line
options, to generate a sample configuration file or to display
program's usage. Fills the gap between optik/optparse and ConfigParser
by adding data types (which are also available as a standalone optik
extension in the `optik_ext` module).
Quick start: simplest usage
---------------------------
.. python ::
>>> import sys
>>> from logilab.common.configuration import Configuration
>>> options = [('dothis', {'type':'yn', 'default': True, 'metavar': '<y or n>'}),
... ('value', {'type': 'string', 'metavar': '<string>'}),
... ('multiple', {'type': 'csv', 'default': ('yop',),
... 'metavar': '<comma separated values>',
... 'help': 'you can also document the option'}),
... ('number', {'type': 'int', 'default':2, 'metavar':'<int>'}),
... ]
>>> config = Configuration(options=options, name='My config')
>>> print config['dothis']
True
>>> print config['value']
None
>>> print config['multiple']
('yop',)
>>> print config['number']
2
>>> print config.help()
Usage: [options]
Options:
-h, --help show this help message and exit
--dothis=<y or n>
--value=<string>
--multiple=<comma separated values>
you can also document the option [current: none]
--number=<int>
>>> f = open('myconfig.ini', 'w')
>>> f.write('''[MY CONFIG]
... number = 3
... dothis = no
... multiple = 1,2,3
... ''')
>>> f.close()
>>> config.load_file_configuration('myconfig.ini')
>>> print config['dothis']
False
>>> print config['value']
None
>>> print config['multiple']
['1', '2', '3']
>>> print config['number']
3
>>> sys.argv = ['mon prog', '--value', 'bacon', '--multiple', '4,5,6',
... 'nonoptionargument']
>>> print config.load_command_line_configuration()
['nonoptionargument']
>>> print config['value']
bacon
>>> config.generate_config()
# class for simple configurations which don't need the
# manager / providers model and prefer delegation to inheritance
#
# configuration values are accessible through a dict like interface
#
[MY CONFIG]
dothis=no
value=bacon
# you can also document the option
multiple=4,5,6
number=3
>>>
"""
__docformat__ = "restructuredtext en"
__all__ = ('OptionsManagerMixIn', 'OptionsProviderMixIn',
'ConfigurationMixIn', 'Configuration',
'OptionsManager2ConfigurationAdapter')
import os
import sys
import re
from os.path import exists, expanduser
from copy import copy
from ConfigParser import ConfigParser, NoOptionError, NoSectionError, \
DuplicateSectionError
from warnings import warn
from logilab.common.compat import callable, raw_input, str_encode as _encode
from logilab.common.deprecation import deprecated
from logilab.common.textutils import normalize_text, unquote
from logilab.common import optik_ext
OptionError = optik_ext.OptionError
REQUIRED = []
class UnsupportedAction(Exception):
"""raised by set_option when it doesn't know what to do for an action"""
def _get_encoding(encoding, stream):
encoding = encoding or getattr(stream, 'encoding', None)
if not encoding:
import locale
encoding = locale.getpreferredencoding()
return encoding
# validation functions ########################################################
# validators will return the validated value or raise optparse.OptionValueError
# XXX add to documentation
def choice_validator(optdict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
if not value in optdict['choices']:
msg = "option %s: invalid value: %r, should be in %s"
raise optik_ext.OptionValueError(msg % (name, value, optdict['choices']))
return value
def multiple_choice_validator(optdict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
choices = optdict['choices']
values = optik_ext.check_csv(None, name, value)
for value in values:
if not value in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise optik_ext.OptionValueError(msg % (name, value, choices))
return values
def csv_validator(optdict, name, value):
"""validate and return a converted value for option of type 'csv'
"""
return optik_ext.check_csv(None, name, value)
def yn_validator(optdict, name, value):
"""validate and return a converted value for option of type 'yn'
"""
return optik_ext.check_yn(None, name, value)
def named_validator(optdict, name, value):
"""validate and return a converted value for option of type 'named'
"""
return optik_ext.check_named(None, name, value)
def file_validator(optdict, name, value):
"""validate and return a filepath for option of type 'file'"""
return optik_ext.check_file(None, name, value)
def color_validator(optdict, name, value):
"""validate and return a valid color for option of type 'color'"""
return optik_ext.check_color(None, name, value)
def password_validator(optdict, name, value):
"""validate and return a string for option of type 'password'"""
return optik_ext.check_password(None, name, value)
def date_validator(optdict, name, value):
"""validate and return a mx DateTime object for option of type 'date'"""
return optik_ext.check_date(None, name, value)
def time_validator(optdict, name, value):
"""validate and return a time object for option of type 'time'"""
return optik_ext.check_time(None, name, value)
def bytes_validator(optdict, name, value):
"""validate and return an integer for option of type 'bytes'"""
return optik_ext.check_bytes(None, name, value)
VALIDATORS = {'string': unquote,
'int': int,
'float': float,
'file': file_validator,
'font': unquote,
'color': color_validator,
'regexp': re.compile,
'csv': csv_validator,
'yn': yn_validator,
'bool': yn_validator,
'named': named_validator,
'password': password_validator,
'date': date_validator,
'time': time_validator,
'bytes': bytes_validator,
'choice': choice_validator,
'multiple_choice': multiple_choice_validator,
}
def _call_validator(opttype, optdict, option, value):
if opttype not in VALIDATORS:
raise Exception('Unsupported type "%s"' % opttype)
try:
return VALIDATORS[opttype](optdict, option, value)
except TypeError:
try:
return VALIDATORS[opttype](value)
except optik_ext.OptionValueError:
raise
except:
raise optik_ext.OptionValueError('%s value (%r) should be of type %s' %
(option, value, opttype))
# user input functions ########################################################
# user input functions will ask the user for input on stdin then validate
# the result and return the validated value or raise optparse.OptionValueError
# XXX add to documentation
def input_password(optdict, question='password:'):
from getpass import getpass
while True:
value = getpass(question)
value2 = getpass('confirm: ')
if value == value2:
return value
print 'password mismatch, try again'
def input_string(optdict, question):
value = raw_input(question).strip()
return value or None
def _make_input_function(opttype):
def input_validator(optdict, question):
while True:
value = raw_input(question)
if not value.strip():
return None
try:
return _call_validator(opttype, optdict, None, value)
except optik_ext.OptionValueError, ex:
msg = str(ex).split(':', 1)[-1].strip()
print 'bad value: %s' % msg
return input_validator
INPUT_FUNCTIONS = {
'string': input_string,
'password': input_password,
}
for opttype in VALIDATORS.keys():
INPUT_FUNCTIONS.setdefault(opttype, _make_input_function(opttype))
# utility functions ############################################################
def expand_default(self, option):
"""monkey patch OptionParser.expand_default since we have a particular
way to handle defaults to avoid overriding values in the configuration
file
"""
if self.parser is None or not self.default_tag:
return option.help
optname = option._long_opts[0][2:]
try:
provider = self.parser.options_manager._all_options[optname]
except KeyError:
value = None
else:
optdict = provider.get_option_def(optname)
optname = provider.option_attrname(optname, optdict)
value = getattr(provider.config, optname, optdict)
value = format_option_value(optdict, value)
if value is optik_ext.NO_DEFAULT or not value:
value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(value))
def _validate(value, optdict, name=''):
"""return a validated value for an option according to its type
optional argument name is only used for error message formatting
"""
try:
_type = optdict['type']
except KeyError:
# FIXME
return value
return _call_validator(_type, optdict, name, value)
convert = deprecated('[0.60] convert() was renamed _validate()')(_validate)
# format and output functions ##################################################
def comment(string):
"""return string as a comment"""
lines = [line.strip() for line in string.splitlines()]
return '# ' + ('%s# ' % os.linesep).join(lines)
def format_time(value):
if not value:
return '0'
if value != int(value):
return '%.2fs' % value
value = int(value)
nbmin, nbsec = divmod(value, 60)
if nbsec:
return '%ss' % value
nbhour, nbmin_ = divmod(nbmin, 60)
if nbmin_:
return '%smin' % nbmin
nbday, nbhour_ = divmod(nbhour, 24)
if nbhour_:
return '%sh' % nbhour
return '%sd' % nbday
def format_bytes(value):
if not value:
return '0'
if value != int(value):
return '%.2fB' % value
value = int(value)
prevunit = 'B'
for unit in ('KB', 'MB', 'GB', 'TB'):
next, remain = divmod(value, 1024)
if remain:
return '%s%s' % (value, prevunit)
prevunit = unit
value = next
return '%s%s' % (value, unit)
def format_option_value(optdict, value):
"""return the user input's value from a 'compiled' value"""
if isinstance(value, (list, tuple)):
value = ','.join(value)
elif isinstance(value, dict):
value = ','.join(['%s:%s' % (k, v) for k, v in value.items()])
elif hasattr(value, 'match'): # optdict.get('type') == 'regexp'
# compiled regexp
value = value.pattern
elif optdict.get('type') == 'yn':
value = value and 'yes' or 'no'
elif isinstance(value, (str, unicode)) and value.isspace():
value = "'%s'" % value
elif optdict.get('type') == 'time' and isinstance(value, (float, int, long)):
value = format_time(value)
elif optdict.get('type') == 'bytes' and hasattr(value, '__int__'):
value = format_bytes(value)
return value
def ini_format_section(stream, section, options, encoding=None, doc=None):
"""format an options section using the INI format"""
encoding = _get_encoding(encoding, stream)
if doc:
print >> stream, _encode(comment(doc), encoding)
print >> stream, '[%s]' % section
ini_format(stream, options, encoding)
def ini_format(stream, options, encoding):
"""format options using the INI format"""
for optname, optdict, value in options:
value = format_option_value(optdict, value)
help = optdict.get('help')
if help:
help = normalize_text(help, line_len=79, indent='# ')
print >> stream
print >> stream, _encode(help, encoding)
else:
print >> stream
if value is None:
print >> stream, '#%s=' % optname
else:
value = _encode(value, encoding).strip()
print >> stream, '%s=%s' % (optname, value)
format_section = ini_format_section
def rest_format_section(stream, section, options, encoding=None, doc=None):
"""format an options section using the INI format"""
encoding = _get_encoding(encoding, stream)
if section:
print >> stream, '%s\n%s' % (section, "'"*len(section))
if doc:
print >> stream, _encode(normalize_text(doc, line_len=79, indent=''),
encoding)
print >> stream
for optname, optdict, value in options:
help = optdict.get('help')
print >> stream, ':%s:' % optname
if help:
help = normalize_text(help, line_len=79, indent=' ')
print >> stream, _encode(help, encoding)
if value:
value = _encode(format_option_value(optdict, value), encoding)
print >> stream, ''
print >> stream, ' Default: ``%s``' % value.replace("`` ", "```` ``")
# Options Manager ##############################################################
class OptionsManagerMixIn(object):
"""MixIn to handle a configuration from both a configuration file and
command line options
"""
def __init__(self, usage, config_file=None, version=None, quiet=0):
self.config_file = config_file
self.reset_parsers(usage, version=version)
# list of registered options providers
self.options_providers = []
# dictionary associating option name to checker
self._all_options = {}
self._short_options = {}
self._nocallback_options = {}
self._mygroups = dict()
# verbosity
self.quiet = quiet
self._maxlevel = 0
def reset_parsers(self, usage='', version=None):
# configuration file parser
self.cfgfile_parser = ConfigParser()
# command line parser
self.cmdline_parser = optik_ext.OptionParser(usage=usage, version=version)
self.cmdline_parser.options_manager = self
self._optik_option_attrs = set(self.cmdline_parser.option_class.ATTRS)
def register_options_provider(self, provider, own_group=True):
"""register an options provider"""
assert provider.priority <= 0, "provider's priority can't be >= 0"
for i in range(len(self.options_providers)):
if provider.priority > self.options_providers[i].priority:
self.options_providers.insert(i, provider)
break
else:
self.options_providers.append(provider)
non_group_spec_options = [option for option in provider.options
if 'group' not in option[1]]
groups = getattr(provider, 'option_groups', ())
if own_group and non_group_spec_options:
self.add_option_group(provider.name.upper(), provider.__doc__,
non_group_spec_options, provider)
else:
for opt, optdict in non_group_spec_options:
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
for gname, gdoc in groups:
gname = gname.upper()
goptions = [option for option in provider.options
if option[1].get('group', '').upper() == gname]
self.add_option_group(gname, gdoc, goptions, provider)
def add_option_group(self, group_name, doc, options, provider):
"""add an option group including the listed options
"""
assert options
# add option group to the command line parser
if group_name in self._mygroups:
group = self._mygroups[group_name]
else:
group = optik_ext.OptionGroup(self.cmdline_parser,
title=group_name.capitalize())
self.cmdline_parser.add_option_group(group)
group.level = provider.level
self._mygroups[group_name] = group
# add section to the config file
if group_name != "DEFAULT":
self.cfgfile_parser.add_section(group_name)
# add provider's specific options
for opt, optdict in options:
self.add_optik_option(provider, group, opt, optdict)
def add_optik_option(self, provider, optikcontainer, opt, optdict):
if 'inputlevel' in optdict:
warn('[0.50] "inputlevel" in option dictionary for %s is deprecated,'
' use "level"' % opt, DeprecationWarning)
optdict['level'] = optdict.pop('inputlevel')
args, optdict = self.optik_option(provider, opt, optdict)
option = optikcontainer.add_option(*args, **optdict)
self._all_options[opt] = provider
self._maxlevel = max(self._maxlevel, option.level or 0)
def optik_option(self, provider, opt, optdict):
"""get our personal option definition and return a suitable form for
use with optik/optparse
"""
optdict = copy(optdict)
others = {}
if 'action' in optdict:
self._nocallback_options[provider] = opt
else:
optdict['action'] = 'callback'
optdict['callback'] = self.cb_set_provider_option
# default is handled here and *must not* be given to optik if you
# want the whole machinery to work
if 'default' in optdict:
if ('help' in optdict
and optdict.get('default') is not None
and not optdict['action'] in ('store_true', 'store_false')):
optdict['help'] += ' [current: %default]'
del optdict['default']
args = ['--' + str(opt)]
if 'short' in optdict:
self._short_options[optdict['short']] = opt
args.append('-' + optdict['short'])
del optdict['short']
# cleanup option definition dict before giving it to optik
for key in optdict.keys():
if not key in self._optik_option_attrs:
optdict.pop(key)
return args, optdict
def cb_set_provider_option(self, option, opt, value, parser):
"""optik callback for option setting"""
if opt.startswith('--'):
# remove -- on long option
opt = opt[2:]
else:
# short option, get its long equivalent
opt = self._short_options[opt[1:]]
# trick since we can't set action='store_true' on options
if value is None:
value = 1
self.global_set_option(opt, value)
def global_set_option(self, opt, value):
"""set option on the correct option provider"""
self._all_options[opt].set_option(opt, value)
def generate_config(self, stream=None, skipsections=(), encoding=None):
"""write a configuration file according to the current configuration
into the given stream or stdout
"""
options_by_section = {}
sections = []
for provider in self.options_providers:
for section, options in provider.options_by_section():
if section is None:
section = provider.name
if section in skipsections:
continue
options = [(n, d, v) for (n, d, v) in options
if d.get('type') is not None]
if not options:
continue
if not section in sections:
sections.append(section)
alloptions = options_by_section.setdefault(section, [])
alloptions += options
stream = stream or sys.stdout
encoding = _get_encoding(encoding, stream)
printed = False
for section in sections:
if printed:
print >> stream, '\n'
format_section(stream, section.upper(), options_by_section[section],
encoding)
printed = True
def generate_manpage(self, pkginfo, section=1, stream=None):
"""write a man page for the current configuration into the given
stream or stdout
"""
self._monkeypatch_expand_default()
try:
optik_ext.generate_manpage(self.cmdline_parser, pkginfo,
section, stream=stream or sys.stdout,
level=self._maxlevel)
finally:
self._unmonkeypatch_expand_default()
# initialization methods ##################################################
def load_provider_defaults(self):
"""initialize configuration using default values"""
for provider in self.options_providers:
provider.load_defaults()
def load_file_configuration(self, config_file=None):
"""load the configuration from file"""
self.read_config_file(config_file)
self.load_config_file()
def read_config_file(self, config_file=None):
"""read the configuration file but do not load it (i.e. dispatching
values to each options provider)
"""
helplevel = 1
while helplevel <= self._maxlevel:
opt = '-'.join(['long'] * helplevel) + '-help'
if opt in self._all_options:
break # already processed
def helpfunc(option, opt, val, p, level=helplevel):
print self.help(level)
sys.exit(0)
helpmsg = '%s verbose help.' % ' '.join(['more'] * helplevel)
optdict = {'action' : 'callback', 'callback' : helpfunc,
'help' : helpmsg}
provider = self.options_providers[0]
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
provider.options += ( (opt, optdict), )
helplevel += 1
if config_file is None:
config_file = self.config_file
if config_file is not None:
config_file = expanduser(config_file)
if config_file and exists(config_file):
parser = self.cfgfile_parser
parser.read([config_file])
# normalize sections'title
for sect, values in parser._sections.items():
if not sect.isupper() and values:
parser._sections[sect.upper()] = values
elif not self.quiet:
msg = 'No config file found, using default configuration'
print >> sys.stderr, msg
return
def input_config(self, onlysection=None, inputlevel=0, stream=None):
"""interactively get configuration values by asking to the user and generate
a configuration file
"""
if onlysection is not None:
onlysection = onlysection.upper()
for provider in self.options_providers:
for section, option, optdict in provider.all_options():
if onlysection is not None and section != onlysection:
continue
if not 'type' in optdict:
# ignore action without type (callback, store_true...)
continue
provider.input_option(option, optdict, inputlevel)
# now we can generate the configuration file
if stream is not None:
self.generate_config(stream)
def load_config_file(self):
"""dispatch values previously read from a configuration file to each
options provider)
"""
parser = self.cfgfile_parser
for provider in self.options_providers:
for section, option, optdict in provider.all_options():
try:
value = parser.get(section, option)
provider.set_option(option, value, optdict=optdict)
except (NoSectionError, NoOptionError), ex:
continue
def load_configuration(self, **kwargs):
"""override configuration according to given parameters
"""
for opt, opt_value in kwargs.items():
opt = opt.replace('_', '-')
provider = self._all_options[opt]
provider.set_option(opt, opt_value)
def load_command_line_configuration(self, args=None):
"""override configuration according to command line parameters
return additional arguments
"""
self._monkeypatch_expand_default()
try:
if args is None:
args = sys.argv[1:]
else:
args = list(args)
(options, args) = self.cmdline_parser.parse_args(args=args)
for provider in self._nocallback_options.keys():
config = provider.config
for attr in config.__dict__.keys():
value = getattr(options, attr, None)
if value is None:
continue
setattr(config, attr, value)
return args
finally:
self._unmonkeypatch_expand_default()
# help methods ############################################################
def add_help_section(self, title, description, level=0):
"""add a dummy option section for help purpose """
group = optik_ext.OptionGroup(self.cmdline_parser,
title=title.capitalize(),
description=description)
group.level = level
self._maxlevel = max(self._maxlevel, level)
self.cmdline_parser.add_option_group(group)
def _monkeypatch_expand_default(self):
# monkey patch optik_ext to deal with our default values
try:
self.__expand_default_backup = optik_ext.HelpFormatter.expand_default
optik_ext.HelpFormatter.expand_default = expand_default
except AttributeError:
# python < 2.4: nothing to be done
pass
def _unmonkeypatch_expand_default(self):
# remove monkey patch
if hasattr(optik_ext.HelpFormatter, 'expand_default'):
# unpatch optik_ext to avoid side effects
optik_ext.HelpFormatter.expand_default = self.__expand_default_backup
def help(self, level=0):
"""return the usage string for available options """
self.cmdline_parser.formatter.output_level = level
self._monkeypatch_expand_default()
try:
return self.cmdline_parser.format_help()
finally:
self._unmonkeypatch_expand_default()
class Method(object):
"""used to ease late binding of default method (so you can define options
on the class using default methods on the configuration instance)
"""
def __init__(self, methname):
self.method = methname
self._inst = None
def bind(self, instance):
"""bind the method to its instance"""
if self._inst is None:
self._inst = instance
def __call__(self, *args, **kwargs):
assert self._inst, 'unbound method'
return getattr(self._inst, self.method)(*args, **kwargs)
# Options Provider #############################################################
class OptionsProviderMixIn(object):
"""Mixin to provide options to an OptionsManager"""
# those attributes should be overridden
priority = -1
name = 'default'
options = ()
level = 0
def __init__(self):
self.config = optik_ext.Values()
for option in self.options:
try:
option, optdict = option
except ValueError:
raise Exception('Bad option: %r' % option)
if isinstance(optdict.get('default'), Method):
optdict['default'].bind(self)
elif isinstance(optdict.get('callback'), Method):
optdict['callback'].bind(self)
self.load_defaults()
def load_defaults(self):
"""initialize the provider using default values"""
for opt, optdict in self.options:
action = optdict.get('action')
if action != 'callback':
# callback action have no default
default = self.option_default(opt, optdict)
if default is REQUIRED:
continue
self.set_option(opt, default, action, optdict)
def option_default(self, opt, optdict=None):
"""return the default value for an option"""
if optdict is None:
optdict = self.get_option_def(opt)
default = optdict.get('default')
if callable(default):
default = default()
return default
def option_attrname(self, opt, optdict=None):
"""get the config attribute corresponding to opt
"""
if optdict is None:
optdict = self.get_option_def(opt)
return optdict.get('dest', opt.replace('-', '_'))
option_name = deprecated('[0.60] OptionsProviderMixIn.option_name() was renamed to option_attrname()')(option_attrname)
def option_value(self, opt):
"""get the current value for the given option"""
return getattr(self.config, self.option_attrname(opt), None)
def set_option(self, opt, value, action=None, optdict=None):
"""method called to set an option (registered in the options list)
"""
if optdict is None:
optdict = self.get_option_def(opt)
if value is not None:
value = _validate(value, optdict, opt)
if action is None:
action = optdict.get('action', 'store')
if optdict.get('type') == 'named': # XXX need specific handling
optname = self.option_attrname(opt, optdict)
currentvalue = getattr(self.config, optname, None)
if currentvalue:
currentvalue.update(value)
value = currentvalue
if action == 'store':
setattr(self.config, self.option_attrname(opt, optdict), value)
elif action in ('store_true', 'count'):
setattr(self.config, self.option_attrname(opt, optdict), 0)
elif action == 'store_false':
setattr(self.config, self.option_attrname(opt, optdict), 1)
elif action == 'append':
opt = self.option_attrname(opt, optdict)
_list = getattr(self.config, opt, None)
if _list is None:
if isinstance(value, (list, tuple)):
_list = value
elif value is not None:
_list = []
_list.append(value)
setattr(self.config, opt, _list)
elif isinstance(_list, tuple):
setattr(self.config, opt, _list + (value,))
else:
_list.append(value)
elif action == 'callback':
optdict['callback'](None, opt, value, None)
else:
raise UnsupportedAction(action)
def input_option(self, option, optdict, inputlevel=99):
default = self.option_default(option, optdict)
if default is REQUIRED:
defaultstr = '(required): '
elif optdict.get('level', 0) > inputlevel:
return
elif optdict['type'] == 'password' or default is None:
defaultstr = ': '
else:
defaultstr = '(default: %s): ' % format_option_value(optdict, default)
print ':%s:' % option
print optdict.get('help') or option
inputfunc = INPUT_FUNCTIONS[optdict['type']]
value = inputfunc(optdict, defaultstr)
while default is REQUIRED and not value:
print 'please specify a value'
value = inputfunc(optdict, '%s: ' % option)
if value is None and default is not None:
value = default
self.set_option(option, value, optdict=optdict)
def get_option_def(self, opt):
"""return the dictionary defining an option given it's name"""
assert self.options
for option in self.options:
if option[0] == opt:
return option[1]
raise OptionError('no such option %s in section %r'
% (opt, self.name), opt)
def all_options(self):
"""return an iterator on available options for this provider
option are actually described by a 3-uple:
(section, option name, option dictionary)
"""
for section, options in self.options_by_section():
if section is None:
if self.name is None:
continue
section = self.name.upper()
for option, optiondict, value in options:
yield section, option, optiondict
def options_by_section(self):
"""return an iterator on options grouped by section
(section, [list of (optname, optdict, optvalue)])
"""
sections = {}
for optname, optdict in self.options:
sections.setdefault(optdict.get('group'), []).append(
(optname, optdict, self.option_value(optname)))
if None in sections:
yield None, sections.pop(None)
for section, options in sections.items():
yield section.upper(), options
def options_and_values(self, options=None):
if options is None:
options = self.options
for optname, optdict in options:
yield (optname, optdict, self.option_value(optname))
# configuration ################################################################
class ConfigurationMixIn(OptionsManagerMixIn, OptionsProviderMixIn):
"""basic mixin for simple configurations which don't need the
manager / providers model
"""
def __init__(self, *args, **kwargs):
if not args:
kwargs.setdefault('usage', '')
kwargs.setdefault('quiet', 1)
OptionsManagerMixIn.__init__(self, *args, **kwargs)
OptionsProviderMixIn.__init__(self)
if not getattr(self, 'option_groups', None):
self.option_groups = []
for option, optdict in self.options:
try:
gdef = (optdict['group'].upper(), '')
except KeyError:
continue
if not gdef in self.option_groups:
self.option_groups.append(gdef)
self.register_options_provider(self, own_group=False)
def register_options(self, options):
"""add some options to the configuration"""
options_by_group = {}
for optname, optdict in options:
options_by_group.setdefault(optdict.get('group', self.name.upper()), []).append((optname, optdict))
for group, options in options_by_group.items():
self.add_option_group(group, None, options, self)
self.options += tuple(options)
def load_defaults(self):
OptionsProviderMixIn.load_defaults(self)
def __iter__(self):
return iter(self.config.__dict__.iteritems())
def __getitem__(self, key):
try:
return getattr(self.config, self.option_attrname(key))
except (optik_ext.OptionValueError, AttributeError):
raise KeyError(key)
def __setitem__(self, key, value):
self.set_option(key, value)
def get(self, key, default=None):
try:
return getattr(self.config, self.option_attrname(key))
except (OptionError, AttributeError):
return default
class Configuration(ConfigurationMixIn):
"""class for simple configurations which don't need the
manager / providers model and prefer delegation to inheritance
configuration values are accessible through a dict like interface
"""
def __init__(self, config_file=None, options=None, name=None,
usage=None, doc=None, version=None):
if options is not None:
self.options = options
if name is not None:
self.name = name
if doc is not None:
self.__doc__ = doc
super(Configuration, self).__init__(config_file=config_file, usage=usage, version=version)
class OptionsManager2ConfigurationAdapter(object):
"""Adapt an option manager to behave like a
`logilab.common.configuration.Configuration` instance
"""
def __init__(self, provider):
self.config = provider
def __getattr__(self, key):
return getattr(self.config, key)
def __getitem__(self, key):
provider = self.config._all_options[key]
try:
return getattr(provider.config, provider.option_attrname(key))
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
self.config.global_set_option(self.config.option_attrname(key), value)
def get(self, key, default=None):
provider = self.config._all_options[key]
try:
return getattr(provider.config, provider.option_attrname(key))
except AttributeError:
return default
# other functions ##############################################################
def read_old_config(newconfig, changes, configfile):
"""initialize newconfig from a deprecated configuration file
possible changes:
* ('renamed', oldname, newname)
* ('moved', option, oldgroup, newgroup)
* ('typechanged', option, oldtype, newvalue)
"""
# build an index of changes
changesindex = {}
for action in changes:
if action[0] == 'moved':
option, oldgroup, newgroup = action[1:]
changesindex.setdefault(option, []).append((action[0], oldgroup, newgroup))
continue
if action[0] == 'renamed':
oldname, newname = action[1:]
changesindex.setdefault(newname, []).append((action[0], oldname))
continue
if action[0] == 'typechanged':
option, oldtype, newvalue = action[1:]
changesindex.setdefault(option, []).append((action[0], oldtype, newvalue))
continue
if action[1] in ('added', 'removed'):
continue # nothing to do here
raise Exception('unknown change %s' % action[0])
# build a config object able to read the old config
options = []
for optname, optdef in newconfig.options:
for action in changesindex.pop(optname, ()):
if action[0] == 'moved':
oldgroup, newgroup = action[1:]
optdef = optdef.copy()
optdef['group'] = oldgroup
elif action[0] == 'renamed':
optname = action[1]
elif action[0] == 'typechanged':
oldtype = action[1]
optdef = optdef.copy()
optdef['type'] = oldtype
options.append((optname, optdef))
if changesindex:
raise Exception('unapplied changes: %s' % changesindex)
oldconfig = Configuration(options=options, name=newconfig.name)
# read the old config
oldconfig.load_file_configuration(configfile)
# apply values reverting changes
changes.reverse()
done = set()
for action in changes:
if action[0] == 'renamed':
oldname, newname = action[1:]
newconfig[newname] = oldconfig[oldname]
done.add(newname)
elif action[0] == 'typechanged':
optname, oldtype, newvalue = action[1:]
newconfig[optname] = newvalue
done.add(optname)
for optname, optdef in newconfig.options:
if optdef.get('type') and not optname in done:
newconfig.set_option(optname, oldconfig[optname], optdict=optdef)
def merge_options(options, optgroup=None):
"""preprocess a list of options and remove duplicates, returning a new list
(tuple actually) of options.
Options dictionaries are copied to avoid later side-effect. Also, if
`otpgroup` argument is specified, ensure all options are in the given group.
"""
alloptions = {}
options = list(options)
for i in range(len(options)-1, -1, -1):
optname, optdict = options[i]
if optname in alloptions:
options.pop(i)
alloptions[optname].update(optdict)
else:
optdict = optdict.copy()
options[i] = (optname, optdict)
alloptions[optname] = optdict
if optgroup is not None:
alloptions[optname]['group'] = optgroup
return tuple(options)
|
unknown
|
codeparrot/codeparrot-clean
| ||
@import url('https://fonts.googleapis.com/icon?family=Material+Symbols+Outlined');
:host {
display: flex;
justify-content: center;
font-family: var(--inter-font);
--primary: var(--hot-pink);
--on-primary: var(--page-background);
}
.docs-light-mode {
--on-primary: #fff;
}
.select {
display: flex;
position: relative;
align-items: center;
border-radius: 3rem;
color: var(--on-primary);
background-color: var(--primary);
border: 1px solid color-mix(in srgb, var(--primary) 80%, transparent);
}
.select:hover {
background-color: color-mix(in srgb, var(--primary) 90%, transparent);
}
.select:has([ngComboboxInput][aria-disabled='true']) {
opacity: 0.6;
cursor: default;
}
[ngComboboxInput] {
opacity: 0;
border: none;
cursor: pointer;
height: 3rem;
padding: 0 1.5rem;
}
[ngCombobox]:focus-within .select {
outline: 2px solid var(--primary);
outline-offset: 2px;
}
.combobox-label {
gap: 1rem;
left: 1.5rem;
display: flex;
position: absolute;
align-items: center;
pointer-events: none;
}
.example-arrow {
right: 1rem;
position: absolute;
pointer-events: none;
transition: transform 150ms ease-in-out;
}
[ngComboboxInput][aria-expanded='true'] ~ .example-arrow {
transform: rotate(180deg);
}
.example-popup-container {
width: 100%;
padding: 0.5rem;
margin-top: 8px;
border-radius: 2rem;
background-color: var(--septenary-contrast);
font-size: 0.9rem;
max-height: 13rem;
opacity: 1;
visibility: visible;
transition:
max-height 150ms ease-out,
visibility 0s,
opacity 25ms ease-out;
}
[ngListbox] {
gap: 2px;
padding: 2px;
height: 100%;
display: flex;
overflow: auto;
flex-direction: column;
}
[ngCombobox]:has([ngComboboxInput][aria-expanded='false']) .example-popup-container {
max-height: 0;
opacity: 0;
visibility: hidden;
transition:
max-height 150ms ease-in,
visibility 0s 150ms,
opacity 150ms ease-in;
}
[ngCombobox]:has([ngComboboxInput][aria-expanded='true']) [ngListbox] {
display: flex;
}
[ngOption] {
display: flex;
cursor: pointer;
align-items: center;
padding: 0 1rem;
min-height: 3rem;
border-radius: 3rem;
}
[ngOption]:hover,
[ngOption][data-active='true'] {
background-color: color-mix(in srgb, var(--primary-contrast) 5%, transparent);
}
[ngOption][data-active='true'] {
outline-offset: -2px;
outline: 2px solid var(--primary);
}
[ngOption][aria-selected='true'] {
color: var(--primary);
background-color: color-mix(in srgb, var(--primary) 10%, transparent);
}
[ngOption]:not([aria-selected='true']) .example-option-check {
display: none;
}
.example-option-check {
font-size: 0.9rem;
}
.example-option-text {
flex: 1;
}
|
css
|
github
|
https://github.com/angular/angular
|
adev/src/content/examples/aria/select/src/basic/material/app/app.css
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes as ct
import platform
from tensorflow.core.util import test_log_pb2
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
def _gather_gpu_devices_proc():
"""Try to gather NVidia GPU device information via /proc/driver."""
dev_info = []
for f in gfile.Glob("/proc/driver/nvidia/gpus/*/information"):
bus_id = f.split("/")[5]
key_values = dict(line.rstrip().replace("\t", "").split(":", 1)
for line in gfile.GFile(f, "r"))
key_values = dict((k.lower(), v.strip(" ").rstrip(" "))
for (k, v) in key_values.items())
info = test_log_pb2.GPUInfo()
info.model = key_values.get("model", "Unknown")
info.uuid = key_values.get("gpu uuid", "Unknown")
info.bus_id = bus_id
dev_info.append(info)
return dev_info
class CUDADeviceProperties(ct.Structure):
# See $CUDA_HOME/include/cuda_runtime_api.h for the definition of
# the cudaDeviceProp struct.
_fields_ = [
("name", ct.c_char * 256),
("totalGlobalMem", ct.c_size_t),
("sharedMemPerBlock", ct.c_size_t),
("regsPerBlock", ct.c_int),
("warpSize", ct.c_int),
("memPitch", ct.c_size_t),
("maxThreadsPerBlock", ct.c_int),
("maxThreadsDim", ct.c_int * 3),
("maxGridSize", ct.c_int * 3),
("clockRate", ct.c_int),
("totalConstMem", ct.c_size_t),
("major", ct.c_int),
("minor", ct.c_int),
("textureAlignment", ct.c_size_t),
("texturePitchAlignment", ct.c_size_t),
("deviceOverlap", ct.c_int),
("multiProcessorCount", ct.c_int),
("kernelExecTimeoutEnabled", ct.c_int),
("integrated", ct.c_int),
("canMapHostMemory", ct.c_int),
("computeMode", ct.c_int),
("maxTexture1D", ct.c_int),
("maxTexture1DMipmap", ct.c_int),
("maxTexture1DLinear", ct.c_int),
("maxTexture2D", ct.c_int * 2),
("maxTexture2DMipmap", ct.c_int * 2),
("maxTexture2DLinear", ct.c_int * 3),
("maxTexture2DGather", ct.c_int * 2),
("maxTexture3D", ct.c_int * 3),
("maxTexture3DAlt", ct.c_int * 3),
("maxTextureCubemap", ct.c_int),
("maxTexture1DLayered", ct.c_int * 2),
("maxTexture2DLayered", ct.c_int * 3),
("maxTextureCubemapLayered", ct.c_int * 2),
("maxSurface1D", ct.c_int),
("maxSurface2D", ct.c_int * 2),
("maxSurface3D", ct.c_int * 3),
("maxSurface1DLayered", ct.c_int * 2),
("maxSurface2DLayered", ct.c_int * 3),
("maxSurfaceCubemap", ct.c_int),
("maxSurfaceCubemapLayered", ct.c_int * 2),
("surfaceAlignment", ct.c_size_t),
("concurrentKernels", ct.c_int),
("ECCEnabled", ct.c_int),
("pciBusID", ct.c_int),
("pciDeviceID", ct.c_int),
("pciDomainID", ct.c_int),
("tccDriver", ct.c_int),
("asyncEngineCount", ct.c_int),
("unifiedAddressing", ct.c_int),
("memoryClockRate", ct.c_int),
("memoryBusWidth", ct.c_int),
("l2CacheSize", ct.c_int),
("maxThreadsPerMultiProcessor", ct.c_int),
("streamPrioritiesSupported", ct.c_int),
("globalL1CacheSupported", ct.c_int),
("localL1CacheSupported", ct.c_int),
("sharedMemPerMultiprocessor", ct.c_size_t),
("regsPerMultiprocessor", ct.c_int),
("managedMemSupported", ct.c_int),
("isMultiGpuBoard", ct.c_int),
("multiGpuBoardGroupID", ct.c_int),
# Pad with extra space to avoid dereference crashes if future
# versions of CUDA extend the size of this struct.
("__future_buffer", ct.c_char * 4096)
]
def _gather_gpu_devices_cudart():
"""Try to gather NVidia GPU device information via libcudart."""
dev_info = []
system = platform.system()
if system == "Linux":
libcudart = ct.cdll.LoadLibrary("libcudart.so")
elif system == "Darwin":
libcudart = ct.cdll.LoadLibrary("libcudart.dylib")
elif system == "Windows":
libcudart = ct.windll.LoadLibrary("libcudart.dll")
else:
raise NotImplementedError("Cannot identify system.")
version = ct.c_int()
rc = libcudart.cudaRuntimeGetVersion(ct.byref(version))
if rc != 0:
raise ValueError("Could not get version")
if version.value < 6050:
raise NotImplementedError("CUDA version must be between >= 6.5")
device_count = ct.c_int()
libcudart.cudaGetDeviceCount(ct.byref(device_count))
for i in range(device_count.value):
properties = CUDADeviceProperties()
rc = libcudart.cudaGetDeviceProperties(ct.byref(properties), i)
if rc != 0:
raise ValueError("Could not get device properties")
pci_bus_id = " " * 13
rc = libcudart.cudaDeviceGetPCIBusId(ct.c_char_p(pci_bus_id), 13, i)
if rc != 0:
raise ValueError("Could not get device PCI bus id")
info = test_log_pb2.GPUInfo() # No UUID available
info.model = properties.name
info.bus_id = pci_bus_id
dev_info.append(info)
del properties
return dev_info
def gather_gpu_devices():
"""Gather gpu device info.
Returns:
A list of test_log_pb2.GPUInfo messages.
"""
try:
# Prefer using /proc if possible, it provides the UUID.
dev_info = _gather_gpu_devices_proc()
if not dev_info:
raise ValueError("No devices found")
return dev_info
except (IOError, ValueError, errors.OpError):
pass
try:
# Fall back on using libcudart
return _gather_gpu_devices_cudart()
except (OSError, ValueError, NotImplementedError, errors.OpError):
return []
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2010 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""blink_tool.py is a tool with multiple sub-commands with different purposes.
It has commands for printing expectations, fetching new test baselines, etc.
These commands don't necessarily have anything to do with each other.
"""
import logging
import optparse
import sys
from blinkpy.common.host import Host
from blinkpy.tool.commands.analyze_baselines import AnalyzeBaselines
from blinkpy.tool.commands.command import HelpPrintingOptionParser
from blinkpy.tool.commands.copy_existing_baselines import CopyExistingBaselines
from blinkpy.tool.commands.flaky_tests import FlakyTests
from blinkpy.tool.commands.help_command import HelpCommand
from blinkpy.tool.commands.optimize_baselines import OptimizeBaselines
from blinkpy.tool.commands.pretty_diff import PrettyDiff
from blinkpy.tool.commands.queries import CrashLog
from blinkpy.tool.commands.queries import PrintBaselines
from blinkpy.tool.commands.queries import PrintExpectations
from blinkpy.tool.commands.rebaseline import Rebaseline
from blinkpy.tool.commands.rebaseline_cl import RebaselineCL
from blinkpy.tool.commands.rebaseline_test import RebaselineTest
_log = logging.getLogger(__name__)
class BlinkTool(Host):
# FIXME: It might make more sense if this class had a Host attribute
# instead of being a Host subclass.
global_options = [
optparse.make_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='enable all logging'),
optparse.make_option(
'-d',
'--directory',
action='append',
default=[],
help='Directory to look at for changed files'),
]
def __init__(self, path):
super(BlinkTool, self).__init__()
self._path = path
self.commands = [
AnalyzeBaselines(),
CopyExistingBaselines(),
CrashLog(),
FlakyTests(),
OptimizeBaselines(),
PrettyDiff(),
PrintBaselines(),
PrintExpectations(),
Rebaseline(),
RebaselineCL(),
RebaselineTest(),
]
self.help_command = HelpCommand(tool=self)
self.commands.append(self.help_command)
def main(self, argv=None):
argv = argv or sys.argv
(command_name, args) = self._split_command_name_from_args(argv[1:])
option_parser = self._create_option_parser()
self._add_global_options(option_parser)
command = self.command_by_name(command_name) or self.help_command
if not command:
option_parser.error('%s is not a recognized command', command_name)
command.set_option_parser(option_parser)
(options, args) = command.parse_args(args)
result = command.check_arguments_and_execute(options, args, self)
return result
def path(self):
return self._path
@staticmethod
def _split_command_name_from_args(args):
# Assume the first argument which doesn't start with "-" is the command name.
command_index = 0
for arg in args:
if arg[0] != '-':
break
command_index += 1
else:
return (None, args[:])
command = args[command_index]
return (command, args[:command_index] + args[command_index + 1:])
def _create_option_parser(self):
usage = 'Usage: %prog [options] COMMAND [ARGS]'
name = optparse.OptionParser().get_prog_name()
return HelpPrintingOptionParser(
epilog_method=self.help_command.help_epilog,
prog=name,
usage=usage)
def _add_global_options(self, option_parser):
global_options = self.global_options or []
for option in global_options:
option_parser.add_option(option)
def name(self):
return optparse.OptionParser().get_prog_name()
def should_show_in_main_help(self, command):
return command.show_in_main_help
def command_by_name(self, command_name):
for command in self.commands:
if command_name == command.name:
return command
return None
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php
namespace Illuminate\Database\Events;
use Illuminate\Contracts\Database\Events\MigrationEvent;
class NoPendingMigrations implements MigrationEvent
{
/**
* Create a new event instance.
*
* @param string $method The migration method that was called.
*/
public function __construct(
public $method,
) {
}
}
|
php
|
github
|
https://github.com/laravel/framework
|
src/Illuminate/Database/Events/NoPendingMigrations.php
|
# -*- coding: utf-8 -*-
"""
Inline Gettext
~~~~~~~~~~~~~~
An example extension for Jinja2 that supports inline gettext calls.
Requires the i18n extension to be loaded.
:copyright: (c) 2009 by the Jinja Team.
:license: BSD.
"""
import re
from jinja2.ext import Extension
from jinja2.lexer import Token, count_newlines
from jinja2.exceptions import TemplateSyntaxError
_outside_re = re.compile(r'\\?(gettext|_)\(')
_inside_re = re.compile(r'\\?[()]')
class InlineGettext(Extension):
"""This extension implements support for inline gettext blocks::
<h1>_(Welcome)</h1>
<p>_(This is a paragraph)</p>
Requires the i18n extension to be loaded and configured.
"""
def filter_stream(self, stream):
paren_stack = 0
for token in stream:
if token.type is not 'data':
yield token
continue
pos = 0
lineno = token.lineno
while 1:
if not paren_stack:
match = _outside_re.search(token.value, pos)
else:
match = _inside_re.search(token.value, pos)
if match is None:
break
new_pos = match.start()
if new_pos > pos:
preval = token.value[pos:new_pos]
yield Token(lineno, 'data', preval)
lineno += count_newlines(preval)
gtok = match.group()
if gtok[0] == '\\':
yield Token(lineno, 'data', gtok[1:])
elif not paren_stack:
yield Token(lineno, 'block_begin', None)
yield Token(lineno, 'name', 'trans')
yield Token(lineno, 'block_end', None)
paren_stack = 1
else:
if gtok == '(' or paren_stack > 1:
yield Token(lineno, 'data', gtok)
paren_stack += gtok == ')' and -1 or 1
if not paren_stack:
yield Token(lineno, 'block_begin', None)
yield Token(lineno, 'name', 'endtrans')
yield Token(lineno, 'block_end', None)
pos = match.end()
if pos < len(token.value):
yield Token(lineno, 'data', token.value[pos:])
if paren_stack:
raise TemplateSyntaxError('unclosed gettext expression',
token.lineno, stream.name,
stream.filename)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestTristimulus(TestCase):
def testZeroMag(self):
mags = [0,0,0,0,0]
freqs = [23, 500, 3200, 9000, 10000]
self.assertEqualVector(
Tristimulus()(freqs, mags),
[0,0,0])
def test3Freqs(self):
mags = [1,2,3]
freqs = [100, 200, 300]
self.assertAlmostEqualVector(
Tristimulus()(freqs, mags),
[0.1666666667, 0, 0])
def test4Freqs(self):
mags = [1,2,3,4]
freqs = [100, 435, 6547, 24324]
self.assertAlmostEqualVector(
Tristimulus()(freqs, mags),
[.1, .9, 0])
def test5Freqs(self):
mags = [1,2,3,4,5]
freqs = [100, 324, 5678, 5899, 60000]
self.assertAlmostEqualVector(
Tristimulus()(freqs, mags),
[0.0666666667, .6, 0.33333333333])
def testFrequencyOrder(self):
freqs = [1,2,1.1]
mags = [0,0,0]
self.assertComputeFails(Tristimulus(), freqs, mags)
def testFreqMagDiffSize(self):
freqs = [1]
mags = []
self.assertComputeFails(Tristimulus(), freqs, mags)
def testEmpty(self):
freqs = []
mags = []
self.assertEqualVector(Tristimulus()([],[]), [0,0,0])
suite = allTests(TestTristimulus)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zabbix_hostmacro
short_description: Zabbix host macro creates/updates/deletes
description:
- manages Zabbix host macros, it can create, update or delete them.
version_added: "2.0"
author:
- "(@cave)"
- Dean Hailin Song
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
host_name:
description:
- Name of the host.
required: true
macro_name:
description:
- Name of the host macro.
required: true
macro_value:
description:
- Value of the host macro.
required: true
state:
description:
- State of the macro.
- On C(present), it will create if macro does not exist or update the macro if the associated data is different.
- On C(absent) will remove a macro if it exists.
required: false
choices: ['present', 'absent']
default: "present"
timeout:
description:
- The timeout of API request (seconds).
default: 10
'''
EXAMPLES = '''
- name: Create a new host macro or update an existing macro's value
local_action:
module: zabbix_hostmacro
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
macro_name:Example macro
macro_value:Example value
state: present
'''
import logging
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far).
class ZabbixAPIExtends(ZabbixAPI):
def __init__(self, server, timeout, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout)
class HostMacro(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# exist host
def is_host_exist(self, host_name):
result = self._zapi.host.exists({'host': host_name})
return result
# get host id by host name
def get_host_id(self, host_name):
try:
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
host_id = host_list[0]['hostid']
return host_id
except Exception, e:
self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e))
# get host macro
def get_host_macro(self, macro_name, host_id):
try:
host_macro_list = self._zapi.usermacro.get(
{"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': '{$' + macro_name + '}'}})
if len(host_macro_list) > 0:
return host_macro_list[0]
return None
except Exception, e:
self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e))
# create host macro
def create_host_macro(self, macro_name, macro_value, host_id):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value})
self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name)
except Exception, e:
self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e))
# update host macro
def update_host_macro(self, host_macro_obj, macro_name, macro_value):
host_macro_id = host_macro_obj['hostmacroid']
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value})
self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name)
except Exception, e:
self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e))
# delete host macro
def delete_host_macro(self, host_macro_obj, macro_name):
host_macro_id = host_macro_obj['hostmacroid']
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.usermacro.delete([host_macro_id])
self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name)
except Exception, e:
self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e))
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True, aliases=['url']),
login_user=dict(required=True),
login_password=dict(required=True, no_log=True),
host_name=dict(required=True),
macro_name=dict(required=True),
macro_value=dict(required=True),
state=dict(default="present", choices=['present', 'absent']),
timeout=dict(type='int', default=10)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
host_name = module.params['host_name']
macro_name = (module.params['macro_name']).upper()
macro_value = module.params['macro_value']
state = module.params['state']
timeout = module.params['timeout']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout)
zbx.login(login_user, login_password)
except Exception, e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host_macro_class_obj = HostMacro(module, zbx)
changed = False
if host_name:
host_id = host_macro_class_obj.get_host_id(host_name)
host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id)
if state == 'absent':
if not host_macro_obj:
module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name)
else:
# delete a macro
host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name)
else:
if not host_macro_obj:
# create host macro
host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id)
else:
# update host macro
host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value)
from ansible.module_utils.basic import *
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Copyright (c) 2013 GitHub, Inc.
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to generate symbols for a binary suitable for breakpad.
Currently, the tool only supports Linux, Android, and Mac. Support for other
platforms is planned.
"""
import errno
import optparse
import os
import Queue
import re
import shutil
import subprocess
import sys
import threading
CONCURRENT_TASKS=4
def GetCommandOutput(command):
"""Runs the command list, returning its output.
Prints the given command (which should be a list of one or more strings),
then runs it and returns its output (stdout) as a string.
From chromium_utils.
"""
devnull = open(os.devnull, 'w')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=devnull,
bufsize=1)
output = proc.communicate()[0]
return output
def GetDumpSymsBinary(build_dir=None):
"""Returns the path to the dump_syms binary."""
DUMP_SYMS = 'dump_syms'
dump_syms_bin = os.path.join(os.path.expanduser(build_dir), DUMP_SYMS)
if not os.access(dump_syms_bin, os.X_OK):
print 'Cannot find %s.' % DUMP_SYMS
sys.exit(1)
return dump_syms_bin
def FindBundlePart(full_path):
if full_path.endswith(('.dylib', '.framework', '.app')):
return os.path.basename(full_path)
elif full_path != '' and full_path != '/':
return FindBundlePart(os.path.dirname(full_path))
else:
return ''
def GetDSYMBundle(options, binary_path):
"""Finds the .dSYM bundle to the binary."""
if os.path.isabs(binary_path):
dsym_path = binary_path + '.dSYM'
if os.path.exists(dsym_path):
return dsym_path
filename = FindBundlePart(binary_path)
search_dirs = [options.build_dir, options.libchromiumcontent_dir]
if filename.endswith(('.dylib', '.framework', '.app')):
for directory in search_dirs:
dsym_path = os.path.join(directory, filename) + '.dSYM'
if os.path.exists(dsym_path):
return dsym_path
return binary_path
def GetSymbolPath(options, binary_path):
"""Finds the .dbg to the binary."""
filename = os.path.basename(binary_path)
dbg_path = os.path.join(options.libchromiumcontent_dir, filename) + '.dbg'
if os.path.exists(dbg_path):
return dbg_path
return binary_path
def Resolve(path, exe_path, loader_path, rpaths):
"""Resolve a dyld path.
@executable_path is replaced with |exe_path|
@loader_path is replaced with |loader_path|
@rpath is replaced with the first path in |rpaths| where the referenced file
is found
"""
path = path.replace('@loader_path', loader_path)
path = path.replace('@executable_path', exe_path)
if path.find('@rpath') != -1:
for rpath in rpaths:
new_path = Resolve(path.replace('@rpath', rpath), exe_path, loader_path,
[])
if os.access(new_path, os.F_OK):
return new_path
return ''
return path
def GetSharedLibraryDependenciesLinux(binary):
"""Return absolute paths to all shared library dependecies of the binary.
This implementation assumes that we're running on a Linux system."""
ldd = GetCommandOutput(['ldd', binary])
lib_re = re.compile('\t.* => (.+) \(.*\)$')
result = []
for line in ldd.splitlines():
m = lib_re.match(line)
if m:
result.append(m.group(1))
return result
def GetSharedLibraryDependenciesMac(binary, exe_path):
"""Return absolute paths to all shared library dependecies of the binary.
This implementation assumes that we're running on a Mac system."""
loader_path = os.path.dirname(binary)
otool = GetCommandOutput(['otool', '-l', binary]).splitlines()
rpaths = []
for idx, line in enumerate(otool):
if line.find('cmd LC_RPATH') != -1:
m = re.match(' *path (.*) \(offset .*\)$', otool[idx+2])
rpaths.append(m.group(1))
otool = GetCommandOutput(['otool', '-L', binary]).splitlines()
lib_re = re.compile('\t(.*) \(compatibility .*\)$')
deps = []
for line in otool:
m = lib_re.match(line)
if m:
dep = Resolve(m.group(1), exe_path, loader_path, rpaths)
if dep:
deps.append(os.path.normpath(dep))
return deps
def GetSharedLibraryDependencies(options, binary, exe_path):
"""Return absolute paths to all shared library dependecies of the binary."""
deps = []
if sys.platform.startswith('linux'):
deps = GetSharedLibraryDependenciesLinux(binary)
elif sys.platform == 'darwin':
deps = GetSharedLibraryDependenciesMac(binary, exe_path)
else:
print "Platform not supported."
sys.exit(1)
result = []
build_dir = os.path.abspath(options.build_dir)
for dep in deps:
if (os.access(dep, os.F_OK)):
result.append(dep)
return result
def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def GenerateSymbols(options, binaries):
"""Dumps the symbols of binary and places them in the given directory."""
queue = Queue.Queue()
print_lock = threading.Lock()
def _Worker():
while True:
binary = queue.get()
if options.verbose:
with print_lock:
print "Generating symbols for %s" % binary
if sys.platform == 'darwin':
binary = GetDSYMBundle(options, binary)
elif sys.platform == 'linux2':
binary = GetSymbolPath(options, binary)
syms = GetCommandOutput([GetDumpSymsBinary(options.build_dir), '-r', '-c',
binary])
module_line = re.match("MODULE [^ ]+ [^ ]+ ([0-9A-F]+) (.*)\n", syms)
output_path = os.path.join(options.symbols_dir, module_line.group(2),
module_line.group(1))
mkdir_p(output_path)
symbol_file = "%s.sym" % module_line.group(2)
f = open(os.path.join(output_path, symbol_file), 'w')
f.write(syms)
f.close()
queue.task_done()
for binary in binaries:
queue.put(binary)
for _ in range(options.jobs):
t = threading.Thread(target=_Worker)
t.daemon = True
t.start()
queue.join()
def main():
parser = optparse.OptionParser()
parser.add_option('', '--build-dir', default='',
help='The build output directory.')
parser.add_option('', '--symbols-dir', default='',
help='The directory where to write the symbols file.')
parser.add_option('', '--libchromiumcontent-dir', default='',
help='The directory where libchromiumcontent is downloaded.')
parser.add_option('', '--binary', default='',
help='The path of the binary to generate symbols for.')
parser.add_option('', '--clear', default=False, action='store_true',
help='Clear the symbols directory before writing new '
'symbols.')
parser.add_option('-j', '--jobs', default=CONCURRENT_TASKS, action='store',
type='int', help='Number of parallel tasks to run.')
parser.add_option('-v', '--verbose', action='store_true',
help='Print verbose status output.')
(options, _) = parser.parse_args()
if not options.symbols_dir:
print "Required option --symbols-dir missing."
return 1
if not options.build_dir:
print "Required option --build-dir missing."
return 1
if not options.libchromiumcontent_dir:
print "Required option --libchromiumcontent-dir missing."
return 1
if not options.binary:
print "Required option --binary missing."
return 1
if not os.access(options.binary, os.X_OK):
print "Cannot find %s." % options.binary
return 1
if options.clear:
try:
shutil.rmtree(options.symbols_dir)
except:
pass
# Build the transitive closure of all dependencies.
binaries = set([options.binary])
queue = [options.binary]
exe_path = os.path.dirname(options.binary)
while queue:
deps = GetSharedLibraryDependencies(options, queue.pop(0), exe_path)
new_deps = set(deps) - binaries
binaries |= new_deps
queue.extend(list(new_deps))
GenerateSymbols(options, binaries)
return 0
if '__main__' == __name__:
sys.exit(main())
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created != []:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" this function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )
file.write('{\\rtf')
if text!='':
file.write(text.replace('\n', '\\par '))
else:
file.write(name+'\\par\\par')
file.write('}')
file.close()
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
# Externalize PNG images from markdown files converted by nbconvert.
import argparse
import os
from base64 import b64decode
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
figure_dir = os.path.splitext(args.input)[0] + '_files'
with open(args.input) as fd:
lesson = fd.readlines()
img_count = 0
code_block = []
with open(args.output, 'w') as fd:
for line in lesson:
if line.startswith('<img'):
# Convert embedded image to external one.
prefix = '<img src="data:image/png;base64,'
suffix = '">'
img_count += 1
img = b64decode(line[len(prefix):-len(suffix)])
fig_path = os.path.join(figure_dir,
'figure_%d.png' % (img_count, ))
with open(fig_path, 'wb') as ifd:
ifd.write(img)
fd.write('\n' % (img_count, fig_path))
elif line.startswith(' <') and line.endswith('>\n'):
# Ignore output lines that just print repr of an object.
continue
elif line.startswith(' '):
# Group all lines that are part of a code block.
code_block.append(line[4:])
elif len(code_block):
# Wrap code block in back ticks instead of indent.
fd.write('```python\n')
fd.writelines(code_block)
fd.write('```\n')
code_block = []
fd.write(line)
else:
fd.write(line)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
name = 'djangorestframework-oauth'
package = 'rest_framework_oauth'
description = 'OAuth support for Django REST Framework'
url = 'https://github.com/jpadilla/django-rest-framework-oauth'
author = 'José Padilla'
author_email = 'hello@jpadilla.com'
license = 'BSD'
install_requires = open('requirements.txt').read().split('\n')
# This command has been borrowed from
# https://github.com/getsentry/sentry/blob/master/setup.py
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
cmdclass={'test': PyTest},
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
]
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals.metrics;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.metrics.stats.Avg;
import org.apache.kafka.common.metrics.stats.Max;
import org.apache.kafka.common.metrics.stats.Meter;
import org.apache.kafka.common.metrics.stats.WindowedCount;
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX;
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.COORDINATOR_METRICS_SUFFIX;
public class OffsetCommitMetricsManager {
final MetricName commitLatencyAvg;
final MetricName commitLatencyMax;
final MetricName commitRate;
final MetricName commitTotal;
private final Sensor commitSensor;
public OffsetCommitMetricsManager(Metrics metrics) {
final String metricGroupName = CONSUMER_METRIC_GROUP_PREFIX + COORDINATOR_METRICS_SUFFIX;
commitSensor = metrics.sensor("commit-latency");
commitLatencyAvg = metrics.metricName("commit-latency-avg",
metricGroupName,
"The average time taken for a commit request");
commitSensor.add(commitLatencyAvg, new Avg());
commitLatencyMax = metrics.metricName("commit-latency-max",
metricGroupName,
"The max time taken for a commit request");
commitSensor.add(commitLatencyMax, new Max());
commitRate = metrics.metricName("commit-rate",
metricGroupName,
"The number of commit calls per second");
commitTotal = metrics.metricName("commit-total",
metricGroupName,
"The total number of commit calls");
commitSensor.add(new Meter(new WindowedCount(),
commitRate,
commitTotal));
}
public void recordRequestLatency(long responseLatencyMs) {
this.commitSensor.record(responseLatencyMs);
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/OffsetCommitMetricsManager.java
|
# Copyright (C) 1998-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""-request robot command runner."""
__all__ = [
'CommandRunner',
'Results',
]
# See the delivery diagram in IncomingRunner.py. This module handles all
# email destined for mylist-request, -join, and -leave. It no longer handles
# bounce messages (i.e. -admin or -bounces), nor does it handle mail to
# -owner.
import re
import logging
from email.errors import HeaderParseError
from email.header import decode_header, make_header
from email.iterators import typed_subpart_iterator
from io import StringIO
from mailman.config import config
from mailman.core.i18n import _
from mailman.core.runner import Runner
from mailman.email.message import UserNotification
from mailman.interfaces.command import ContinueProcessing, IEmailResults
from mailman.interfaces.languages import ILanguageManager
from zope.component import getUtility
from zope.interface import implementer
NL = '\n'
log = logging.getLogger('mailman.vette')
class CommandFinder:
"""Generate commands from the content of a message."""
def __init__(self, msg, msgdata, results):
self.command_lines = []
self.ignored_lines = []
self.processed_lines = []
# Depending on where the message was destined to, add some implicit
# commands. For example, if this was sent to the -join or -leave
# addresses, it's the same as if 'join' or 'leave' commands were sent
# to the -request address.
subaddress = msgdata.get('subaddress')
if subaddress == 'join':
self.command_lines.append('join')
elif subaddress == 'leave':
self.command_lines.append('leave')
elif subaddress == 'confirm':
mo = re.match(config.mta.verp_confirm_regexp, msg.get('to', ''))
if mo:
self.command_lines.append('confirm ' + mo.group('cookie'))
# Extract the subject header and do RFC 2047 decoding.
raw_subject = msg.get('subject', '')
try:
subject = str(make_header(decode_header(raw_subject)))
# Mail commands must be ASCII.
self.command_lines.append(subject.encode('us-ascii'))
except (HeaderParseError, UnicodeError, LookupError):
# The Subject header was unparseable or not ASCII. If the raw
# subject is a unicode object, convert it to ASCII ignoring all
# bogus characters. Otherwise, there's nothing in the subject
# that we can use.
if isinstance(raw_subject, str):
safe_subject = raw_subject.encode('us-ascii', 'ignore')
self.command_lines.append(safe_subject)
# Find the first text/plain part of the message.
part = None
for part in typed_subpart_iterator(msg, 'text', 'plain'):
break
if part is None or part is not msg:
# Either there was no text/plain part or we ignored some
# non-text/plain parts.
print(_('Ignoring non-text/plain MIME parts'), file=results)
if part is None:
# There was no text/plain part to be found.
return
body = part.get_payload()
# text/plain parts better have string payloads.
assert isinstance(body, (bytes, str)), 'Non-string decoded payload'
lines = body.splitlines()
# Use no more lines than specified
max_lines = int(config.mailman.email_commands_max_lines)
self.command_lines.extend(lines[:max_lines])
self.ignored_lines.extend(lines[max_lines:])
def __iter__(self):
"""Return each command line, split into space separated arguments."""
while self.command_lines:
line = self.command_lines.pop(0)
self.processed_lines.append(line)
parts = line.strip().split()
if len(parts) == 0:
continue
# Ensure that all the parts are unicodes. Since we only accept
# ASCII commands and arguments, ignore anything else.
parts = [(part
if isinstance(part, str)
else part.decode('ascii', 'ignore'))
for part in parts]
yield parts
@implementer(IEmailResults)
class Results:
"""The email command results."""
def __init__(self, charset='us-ascii'):
self._output = StringIO()
self.charset = charset
print(_("""\
The results of your email command are provided below.
"""), file=self._output)
def write(self, text):
if isinstance(text, bytes):
text = text.decode(self.charset, 'ignore')
self._output.write(text)
def __str__(self):
value = self._output.getvalue()
assert isinstance(value, str), 'Not a string: %r' % value
return value
class CommandRunner(Runner):
"""The email command runner."""
def _dispose(self, mlist, msg, msgdata):
message_id = msg.get('message-id', 'n/a')
# The policy here is similar to the Replybot policy. If a message has
# "Precedence: bulk|junk|list" and no "X-Ack: yes" header, we discard
# the command message.
precedence = msg.get('precedence', '').lower()
ack = msg.get('x-ack', '').lower()
if ack != 'yes' and precedence in ('bulk', 'junk', 'list'):
log.info('%s Precedence: %s message discarded by: %s',
message_id, precedence, mlist.request_address)
return False
# Do replybot for commands.
replybot = config.handlers['replybot']
replybot.process(mlist, msg, msgdata)
if mlist.autorespond_requests == 1:
# Respond and discard.
log.info('%s -request message replied and discard', message_id)
return False
# Now craft the response and process the command lines.
charset = msg.get_param('charset')
if charset is None:
charset = 'us-ascii'
results = Results(charset)
# Include just a few key pieces of information from the original: the
# sender, date, and message id.
print(_('- Original message details:'), file=results)
subject = msg.get('subject', 'n/a')
date = msg.get('date', 'n/a')
from_ = msg.get('from', 'n/a')
print(_(' From: $from_'), file=results)
print(_(' Subject: $subject'), file=results)
print(_(' Date: $date'), file=results)
print(_(' Message-ID: $message_id'), file=results)
print(_('\n- Results:'), file=results)
finder = CommandFinder(msg, msgdata, results)
for parts in finder:
command = None
# Try to find a command on this line. There may be a Re: prefix
# (possibly internationalized) so try with the first and second
# words on the line.
if len(parts) > 0:
command_name = parts.pop(0)
command = config.commands.get(command_name)
if command is None and len(parts) > 0:
command_name = parts.pop(0)
command = config.commands.get(command_name)
if command is None:
print(_('No such command: $command_name'), file=results)
else:
status = command.process(
mlist, msg, msgdata, parts, results)
assert status in ContinueProcessing, (
'Invalid status: %s' % status)
if status == ContinueProcessing.no:
break
# All done. Strip blank lines and send the response.
lines = [line.strip() for line in finder.command_lines if line]
if len(lines) > 0:
print(_('\n- Unprocessed:'), file=results)
for line in lines:
print(line, file=results)
lines = [line.strip() for line in finder.ignored_lines if line]
if len(lines) > 0:
print(_('\n- Ignored:'), file=results)
for line in lines:
print(line, file=results)
print(_('\n- Done.'), file=results)
# Send a reply, but do not attach the original message. This is a
# compromise because the original message is often helpful in tracking
# down problems, but it's also a vector for backscatter spam.
language = getUtility(ILanguageManager)[msgdata['lang']]
reply = UserNotification(msg.sender, mlist.bounces_address,
_('The results of your email commands'),
lang=language)
cte = msg.get('content-transfer-encoding')
if cte is not None:
reply['Content-Transfer-Encoding'] = cte
# Find a charset for the response body. Try the original message's
# charset first, then ascii, then latin-1 and finally falling back to
# utf-8.
reply_body = str(results)
for charset in (results.charset, 'us-ascii', 'latin-1'):
try:
reply_body.encode(charset)
break
except UnicodeError:
pass
else:
charset = 'utf-8'
reply.set_payload(reply_body, charset=charset)
reply.send(mlist)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* contrib/ltree/ltree.h */
#ifndef __LTREE_H__
#define __LTREE_H__
#include "fmgr.h"
#include "tsearch/ts_locale.h"
#include "utils/memutils.h"
/* ltree */
/*
* We want the maximum length of a label to be encoding-independent, so
* set it somewhat arbitrarily at 1000 characters (not bytes), while using
* uint16 fields to hold the byte length.
*/
#define LTREE_LABEL_MAX_CHARS 1000
/*
* LOWER_NODE used to be defined in the Makefile via the compile flags.
* However the MSVC build scripts neglected to do the same which resulted in
* MSVC builds not using LOWER_NODE. Since then, the MSVC scripts have been
* modified to look for -D compile flags in Makefiles, so here, in order to
* get the historic behavior of LOWER_NODE not being defined on MSVC, we only
* define it when not building in that environment. This is important as we
* want to maintain the same LOWER_NODE behavior after a pg_upgrade.
*/
#ifndef _MSC_VER
#define LOWER_NODE
#endif
typedef struct
{
uint16 len; /* label string length in bytes */
char name[FLEXIBLE_ARRAY_MEMBER];
} ltree_level;
#define LEVEL_HDRSIZE (offsetof(ltree_level,name))
#define LEVEL_NEXT(x) ( (ltree_level*)( ((char*)(x)) + MAXALIGN(((ltree_level*)(x))->len + LEVEL_HDRSIZE) ) )
typedef struct
{
int32 vl_len_; /* varlena header (do not touch directly!) */
uint16 numlevel; /* number of labels */
/* Array of maxalign'd ltree_level structs follows: */
char data[FLEXIBLE_ARRAY_MEMBER];
} ltree;
#define LTREE_HDRSIZE MAXALIGN( offsetof(ltree, data) )
#define LTREE_FIRST(x) ( (ltree_level*)( ((char*)(x))+LTREE_HDRSIZE ) )
#define LTREE_MAX_LEVELS PG_UINT16_MAX /* ltree.numlevel is uint16 */
/* lquery */
/* lquery_variant: one branch of some OR'ed alternatives */
typedef struct
{
int32 val; /* CRC of label string */
uint16 len; /* label string length in bytes */
uint8 flag; /* see LVAR_xxx flags below */
char name[FLEXIBLE_ARRAY_MEMBER];
} lquery_variant;
/*
* Note: these macros contain too many MAXALIGN calls and so will sometimes
* overestimate the space needed for an lquery_variant. However, we can't
* change it without breaking on-disk compatibility for lquery.
*/
#define LVAR_HDRSIZE MAXALIGN(offsetof(lquery_variant, name))
#define LVAR_NEXT(x) ( (lquery_variant*)( ((char*)(x)) + MAXALIGN(((lquery_variant*)(x))->len) + LVAR_HDRSIZE ) )
#define LVAR_ANYEND 0x01 /* '*' flag: prefix match */
#define LVAR_INCASE 0x02 /* '@' flag: case-insensitive match */
#define LVAR_SUBLEXEME 0x04 /* '%' flag: word-wise match */
/*
* In an lquery_level, "flag" contains the union of the variants' flags
* along with possible LQL_xxx flags; so those bit sets can't overlap.
*
* "low" and "high" are nominally the minimum and maximum number of matches.
* However, for backwards compatibility with pre-v13 on-disk lqueries,
* non-'*' levels (those with numvar > 0) only have valid low/high if the
* LQL_COUNT flag is set; otherwise those fields are zero, but the behavior
* is as if they were both 1.
*/
typedef struct
{
uint16 totallen; /* total length of this level, in bytes */
uint16 flag; /* see LQL_xxx and LVAR_xxx flags */
uint16 numvar; /* number of variants; 0 means '*' */
uint16 low; /* minimum repeat count */
uint16 high; /* maximum repeat count */
/* Array of maxalign'd lquery_variant structs follows: */
char variants[FLEXIBLE_ARRAY_MEMBER];
} lquery_level;
#define LQL_HDRSIZE MAXALIGN( offsetof(lquery_level,variants) )
#define LQL_NEXT(x) ( (lquery_level*)( ((char*)(x)) + MAXALIGN(((lquery_level*)(x))->totallen) ) )
#define LQL_FIRST(x) ( (lquery_variant*)( ((char*)(x))+LQL_HDRSIZE ) )
#define LQL_NOT 0x10 /* level has '!' (NOT) prefix */
#define LQL_COUNT 0x20 /* level is non-'*' and has repeat counts */
#ifdef LOWER_NODE
#define FLG_CANLOOKSIGN(x) ( ( (x) & ( LQL_NOT | LVAR_ANYEND | LVAR_SUBLEXEME ) ) == 0 )
#else
#define FLG_CANLOOKSIGN(x) ( ( (x) & ( LQL_NOT | LVAR_ANYEND | LVAR_SUBLEXEME | LVAR_INCASE ) ) == 0 )
#endif
#define LQL_CANLOOKSIGN(x) FLG_CANLOOKSIGN( ((lquery_level*)(x))->flag )
typedef struct
{
int32 vl_len_; /* varlena header (do not touch directly!) */
uint16 numlevel; /* number of lquery_levels */
uint16 firstgood; /* number of leading simple-match levels */
uint16 flag; /* see LQUERY_xxx flags below */
/* Array of maxalign'd lquery_level structs follows: */
char data[FLEXIBLE_ARRAY_MEMBER];
} lquery;
#define LQUERY_HDRSIZE MAXALIGN( offsetof(lquery, data) )
#define LQUERY_FIRST(x) ( (lquery_level*)( ((char*)(x))+LQUERY_HDRSIZE ) )
#define LQUERY_MAX_LEVELS PG_UINT16_MAX /* lquery.numlevel is uint16 */
#define LQUERY_HASNOT 0x01
/* valid label chars are alphanumerics, underscores and hyphens */
#define ISLABEL(x) ( t_isalnum_cstr(x) || t_iseq(x, '_') || t_iseq(x, '-') )
/* full text query */
/*
* item in polish notation with back link
* to left operand
*/
typedef struct ITEM
{
int16 type;
int16 left;
int32 val;
uint8 flag;
/* user-friendly value */
uint8 length;
uint16 distance;
} ITEM;
/*
*Storage:
* (len)(size)(array of ITEM)(array of operand in user-friendly form)
*/
typedef struct
{
int32 vl_len_; /* varlena header (do not touch directly!) */
int32 size;
char data[FLEXIBLE_ARRAY_MEMBER];
} ltxtquery;
typedef bool (*ltree_prefix_eq_func) (const char *, size_t, const char *, size_t);
#define HDRSIZEQT MAXALIGN(VARHDRSZ + sizeof(int32))
#define COMPUTESIZE(size,lenofoperand) ( HDRSIZEQT + (size) * sizeof(ITEM) + (lenofoperand) )
#define LTXTQUERY_TOO_BIG(size,lenofoperand) \
((size) > (MaxAllocSize - HDRSIZEQT - (lenofoperand)) / sizeof(ITEM))
#define GETQUERY(x) (ITEM*)( (char*)(x)+HDRSIZEQT )
#define GETOPERAND(x) ( (char*)GETQUERY(x) + ((ltxtquery*)x)->size * sizeof(ITEM) )
#define ISOPERATOR(x) ( (x)=='!' || (x)=='&' || (x)=='|' || (x)=='(' || (x)==')' )
#define END 0
#define ERR 1
#define VAL 2
#define OPR 3
#define OPEN 4
#define CLOSE 5
#define VALTRUE 6 /* for stop words */
#define VALFALSE 7
/* use in array iterator */
PGDLLEXPORT Datum ltree_isparent(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum ltree_risparent(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum ltq_regex(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum ltq_rregex(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum lt_q_regex(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum lt_q_rregex(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum ltxtq_exec(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum ltxtq_rexec(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum _ltq_regex(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum _ltq_rregex(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum _lt_q_regex(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum _lt_q_rregex(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum _ltxtq_exec(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum _ltxtq_rexec(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum _ltree_isparent(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum _ltree_risparent(PG_FUNCTION_ARGS);
/* Concatenation functions */
PGDLLEXPORT Datum ltree_addltree(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum ltree_addtext(PG_FUNCTION_ARGS);
PGDLLEXPORT Datum ltree_textadd(PG_FUNCTION_ARGS);
/* Util function */
PGDLLEXPORT Datum ltree_in(PG_FUNCTION_ARGS);
bool ltree_execute(ITEM *curitem, void *checkval,
bool calcnot, bool (*chkcond) (void *checkval, ITEM *val));
int ltree_compare(const ltree *a, const ltree *b);
bool inner_isparent(const ltree *c, const ltree *p);
bool compare_subnode(ltree_level *t, char *qn, int len,
ltree_prefix_eq_func prefix_eq, bool anyend);
ltree *lca_inner(ltree **a, int len);
bool ltree_prefix_eq(const char *a, size_t a_sz, const char *b, size_t b_sz);
bool ltree_prefix_eq_ci(const char *a, size_t a_sz, const char *b, size_t b_sz);
/* fmgr macros for ltree objects */
#define DatumGetLtreeP(X) ((ltree *) PG_DETOAST_DATUM(X))
#define DatumGetLtreePCopy(X) ((ltree *) PG_DETOAST_DATUM_COPY(X))
#define PG_GETARG_LTREE_P(n) DatumGetLtreeP(PG_GETARG_DATUM(n))
#define PG_GETARG_LTREE_P_COPY(n) DatumGetLtreePCopy(PG_GETARG_DATUM(n))
#define DatumGetLqueryP(X) ((lquery *) PG_DETOAST_DATUM(X))
#define DatumGetLqueryPCopy(X) ((lquery *) PG_DETOAST_DATUM_COPY(X))
#define PG_GETARG_LQUERY_P(n) DatumGetLqueryP(PG_GETARG_DATUM(n))
#define PG_GETARG_LQUERY_P_COPY(n) DatumGetLqueryPCopy(PG_GETARG_DATUM(n))
#define DatumGetLtxtqueryP(X) ((ltxtquery *) PG_DETOAST_DATUM(X))
#define DatumGetLtxtqueryPCopy(X) ((ltxtquery *) PG_DETOAST_DATUM_COPY(X))
#define PG_GETARG_LTXTQUERY_P(n) DatumGetLtxtqueryP(PG_GETARG_DATUM(n))
#define PG_GETARG_LTXTQUERY_P_COPY(n) DatumGetLtxtqueryPCopy(PG_GETARG_DATUM(n))
/* GiST support for ltree */
#define BITBYTE 8
#define SIGLENBIT(siglen) ((siglen) * BITBYTE)
#define LTREE_SIGLEN_DEFAULT (2 * sizeof(int32))
#define LTREE_SIGLEN_MAX GISTMaxIndexKeySize
#define LTREE_GET_SIGLEN() (PG_HAS_OPCLASS_OPTIONS() ? \
((LtreeGistOptions *) PG_GET_OPCLASS_OPTIONS())->siglen : \
LTREE_SIGLEN_DEFAULT)
typedef unsigned char *BITVECP;
#define LOOPBYTE(siglen) \
for(i = 0; i < (siglen); i++)
#define GETBYTE(x,i) ( *( (BITVECP)(x) + (int)( (i) / BITBYTE ) ) )
#define GETBITBYTE(x,i) ( ((unsigned char)(x)) >> i & 0x01 )
#define CLRBIT(x,i) GETBYTE(x,i) &= ~( 0x01 << ( (i) % BITBYTE ) )
#define SETBIT(x,i) GETBYTE(x,i) |= ( 0x01 << ( (i) % BITBYTE ) )
#define GETBIT(x,i) ( (GETBYTE(x,i) >> ( (i) % BITBYTE )) & 0x01 )
#define HASHVAL(val, siglen) (((unsigned int)(val)) % SIGLENBIT(siglen))
#define HASH(sign, val, siglen) SETBIT((sign), HASHVAL(val, siglen))
/*
* type of index key for ltree. Tree are combined B-Tree and R-Tree
* Storage:
* Leaf pages
* (len)(flag)(ltree)
* Non-Leaf
* (len)(flag)(sign)(left_ltree)(right_ltree)
* ALLTRUE: (len)(flag)(left_ltree)(right_ltree)
*
*/
typedef struct
{
int32 vl_len_; /* varlena header (do not touch directly!) */
uint32 flag;
char data[FLEXIBLE_ARRAY_MEMBER];
} ltree_gist;
#define LTG_ONENODE 0x01
#define LTG_ALLTRUE 0x02
#define LTG_NORIGHT 0x04
#define LTG_HDRSIZE MAXALIGN(VARHDRSZ + sizeof(uint32))
#define LTG_SIGN(x) ( (BITVECP)( ((char*)(x))+LTG_HDRSIZE ) )
#define LTG_NODE(x) ( (ltree*)( ((char*)(x))+LTG_HDRSIZE ) )
#define LTG_ISONENODE(x) ( ((ltree_gist*)(x))->flag & LTG_ONENODE )
#define LTG_ISALLTRUE(x) ( ((ltree_gist*)(x))->flag & LTG_ALLTRUE )
#define LTG_ISNORIGHT(x) ( ((ltree_gist*)(x))->flag & LTG_NORIGHT )
#define LTG_LNODE(x, siglen) ( (ltree*)( ( ((char*)(x))+LTG_HDRSIZE ) + ( LTG_ISALLTRUE(x) ? 0 : (siglen) ) ) )
#define LTG_RENODE(x, siglen) ( (ltree*)( ((char*)LTG_LNODE(x, siglen)) + VARSIZE(LTG_LNODE(x, siglen))) )
#define LTG_RNODE(x, siglen) ( LTG_ISNORIGHT(x) ? LTG_LNODE(x, siglen) : LTG_RENODE(x, siglen) )
#define LTG_GETLNODE(x, siglen) ( LTG_ISONENODE(x) ? LTG_NODE(x) : LTG_LNODE(x, siglen) )
#define LTG_GETRNODE(x, siglen) ( LTG_ISONENODE(x) ? LTG_NODE(x) : LTG_RNODE(x, siglen) )
extern ltree_gist *ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen,
ltree *left, ltree *right);
/* GiST support for ltree[] */
#define LTREE_ASIGLEN_DEFAULT (7 * sizeof(int32))
#define LTREE_ASIGLEN_MAX GISTMaxIndexKeySize
#define LTREE_GET_ASIGLEN() (PG_HAS_OPCLASS_OPTIONS() ? \
((LtreeGistOptions *) PG_GET_OPCLASS_OPTIONS())->siglen : \
LTREE_ASIGLEN_DEFAULT)
#define ASIGLENBIT(siglen) ((siglen) * BITBYTE)
#define ALOOPBYTE(siglen) \
for (i = 0; i < (siglen); i++)
#define AHASHVAL(val, siglen) (((unsigned int)(val)) % ASIGLENBIT(siglen))
#define AHASH(sign, val, siglen) SETBIT((sign), AHASHVAL(val, siglen))
/* gist_ltree_ops and gist__ltree_ops opclass options */
typedef struct
{
int32 vl_len_; /* varlena header (do not touch directly!) */
int siglen; /* signature length in bytes */
} LtreeGistOptions;
/* type of key is the same to ltree_gist */
#endif
|
c
|
github
|
https://github.com/postgres/postgres
|
contrib/ltree/ltree.h
|
package client
import (
"context"
"net/url"
"github.com/moby/moby/api/types/swarm"
)
// NodeUpdateOptions holds parameters to update nodes with.
type NodeUpdateOptions struct {
Version swarm.Version
Spec swarm.NodeSpec
}
type NodeUpdateResult struct{}
// NodeUpdate updates a Node.
func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, options NodeUpdateOptions) (NodeUpdateResult, error) {
nodeID, err := trimID("node", nodeID)
if err != nil {
return NodeUpdateResult{}, err
}
query := url.Values{}
query.Set("version", options.Version.String())
resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, options.Spec, nil)
defer ensureReaderClosed(resp)
return NodeUpdateResult{}, err
}
|
go
|
github
|
https://github.com/moby/moby
|
client/node_update.go
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.CommonPathCapabilities;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Time;
/**
* ViewFileSystem (extends the FileSystem interface) implements a client-side
* mount table. Its spec and implementation is identical to {@link ViewFs}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
public class ViewFileSystem extends FileSystem {
private static final Path ROOT_PATH = new Path(Path.SEPARATOR);
static AccessControlException readOnlyMountTable(final String operation,
final String p) {
return new AccessControlException(
"InternalDir of ViewFileSystem is readonly, operation " + operation +
" not permitted on path " + p + ".");
}
static AccessControlException readOnlyMountTable(final String operation,
final Path p) {
return readOnlyMountTable(operation, p.toString());
}
/**
* Gets file system creator instance.
*
* @return fs getter.
*/
protected FsGetter fsGetter() {
return new FsGetter();
}
/**
* Caching children filesystems. HADOOP-15565.
*/
static class InnerCache {
private ConcurrentMap<Key, FileSystem> map = new ConcurrentHashMap<>();
private FsGetter fsCreator;
InnerCache(FsGetter fsCreator) {
this.fsCreator = fsCreator;
}
// computeIfAbsent() does not support a mapping function which throws IOException.
// Wrap fsCreator.getNewInstance() to not throw IOException and return null instead.
FileSystem getNewFileSystem(URI uri, Configuration config) {
try {
return fsCreator.getNewInstance(uri, config);
} catch (IOException e) {
LOG.error("Failed to create new FileSystem instance for " + uri, e);
return null;
}
}
FileSystem get(URI uri, Configuration config) throws IOException {
Key key = new Key(uri);
FileSystem fs = map.computeIfAbsent(key, k -> getNewFileSystem(uri, config));
if (fs == null) {
throw new IOException("Failed to create new FileSystem instance for " + uri);
}
return fs;
}
void closeAll() {
for (FileSystem fs : map.values()) {
try {
fs.close();
} catch (IOException e) {
LOG.info("Fail closing ViewFileSystem's child filesystem " + fs, e);
}
}
}
void clear() {
map.clear();
}
/**
* All the cached instances share the same UGI so there is no need to have a
* URI in the Key. Make the Key simple with just the scheme and authority.
*/
private static class Key {
private final String scheme;
private final String authority;
Key(URI uri) {
scheme = uri.getScheme() == null ? "" : uri.getScheme().toLowerCase();
authority =
uri.getAuthority() == null ? "" : uri.getAuthority().toLowerCase();
}
@Override
public int hashCode() {
return Objects.hash(scheme, authority);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj != null && obj instanceof Key) {
Key that = (Key) obj;
return this.scheme.equals(that.scheme) && this.authority
.equals(that.authority);
}
return false;
}
}
}
/**
* MountPoint representation built from the configuration.
*/
public static class MountPoint {
/**
* The mounted on path location.
*/
private final Path mountedOnPath;
/**
* Array of target FileSystem URIs.
*/
private final String[] targetFileSystemPaths;
MountPoint(Path srcPath, String[] targetFs) {
mountedOnPath = srcPath;
targetFileSystemPaths = targetFs;
}
public Path getMountedOnPath() {
return mountedOnPath;
}
public URI[] getTargetFileSystemURIs() {
URI[] targetUris = new URI[targetFileSystemPaths.length];
for (int i = 0; i < targetFileSystemPaths.length; i++) {
targetUris[i] = URI.create(targetFileSystemPaths[i]);
}
return targetUris;
}
public String[] getTargetFileSystemPaths() {
return targetFileSystemPaths;
}
}
final long creationTime; // of the the mount table
final UserGroupInformation ugi; // the user/group of user who created mtable
private URI myUri;
private Path workingDir;
Configuration config;
InodeTree<FileSystem> fsState; // the fs state; ie the mount table
Path homeDir = null;
private boolean enableInnerCache = false;
private InnerCache cache;
// Default to rename within same mountpoint
private RenameStrategy renameStrategy = RenameStrategy.SAME_MOUNTPOINT;
/**
* Make the path Absolute and get the path-part of a pathname.
* Checks that URI matches this file system
* and that the path-part is a valid name.
*
* @param p path
* @return path-part of the Path p
*/
String getUriPath(final Path p) {
checkPath(p);
return makeAbsolute(p).toUri().getPath();
}
private Path makeAbsolute(final Path f) {
return f.isAbsolute() ? f : new Path(workingDir, f);
}
/**
* This is the constructor with the signature needed by
* {@link FileSystem#createFileSystem(URI, Configuration)}
*
* After this constructor is called initialize() is called.
* @throws IOException raised on errors performing I/O.
*/
public ViewFileSystem() throws IOException {
ugi = UserGroupInformation.getCurrentUser();
creationTime = Time.now();
}
/**
* Return the protocol scheme for the FileSystem.
*
* @return <code>viewfs</code>
*/
@Override
public String getScheme() {
return FsConstants.VIEWFS_SCHEME;
}
/**
* Returns false as it does not support to add fallback link automatically on
* no mounts.
*/
boolean supportAutoAddingFallbackOnNoMounts() {
return false;
}
/**
* Called after a new FileSystem instance is constructed.
* @param theUri a uri whose authority section names the host, port, etc. for
* this FileSystem
* @param conf the configuration
*/
@Override
public void initialize(final URI theUri, final Configuration conf)
throws IOException {
super.initialize(theUri, conf);
setConf(conf);
config = conf;
enableInnerCache = config.getBoolean(CONFIG_VIEWFS_ENABLE_INNER_CACHE,
CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT);
FsGetter fsGetter = fsGetter();
cache = new InnerCache(fsGetter);
// Now build client side view (i.e. client side mount table) from config.
final String authority = theUri.getAuthority();
String tableName = authority;
if (theUri.getPort() != -1 && config
.getBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME,
CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT)) {
tableName = theUri.getHost();
}
try {
myUri = new URI(getScheme(), authority, "/", null, null);
boolean initingUriAsFallbackOnNoMounts =
supportAutoAddingFallbackOnNoMounts();
fsState = new InodeTree<FileSystem>(conf, tableName, myUri,
initingUriAsFallbackOnNoMounts) {
@Override
protected Function<URI, FileSystem> initAndGetTargetFs() {
return new Function<URI, FileSystem>() {
@Override
public FileSystem apply(final URI uri) {
FileSystem fs;
try {
fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws IOException {
if (enableInnerCache) {
synchronized (cache) {
return cache.get(uri, config);
}
} else {
return fsGetter().get(uri, config);
}
}
});
return new ChRootedFileSystem(fs, uri);
} catch (IOException | InterruptedException ex) {
LOG.error("Could not initialize the underlying FileSystem "
+ "object. Exception: " + ex.toString());
}
return null;
}
};
}
@Override
protected FileSystem getTargetFileSystem(final INodeDir<FileSystem> dir)
throws URISyntaxException {
return new InternalDirOfViewFs(dir, creationTime, ugi, myUri, config,
this);
}
@Override
protected FileSystem getTargetFileSystem(final String settings,
final URI[] uris) throws URISyntaxException, IOException {
return NflyFSystem.createFileSystem(uris, config, settings,
fsGetter);
}
};
workingDir = this.getHomeDirectory();
renameStrategy = RenameStrategy.valueOf(
conf.get(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
RenameStrategy.SAME_MOUNTPOINT.toString()));
} catch (URISyntaxException e) {
throw new IOException("URISyntax exception: " + theUri);
}
}
/**
* Convenience Constructor for apps to call directly.
* @param theUri which must be that of ViewFileSystem
* @param conf conf configuration.
* @throws IOException raised on errors performing I/O.
*/
ViewFileSystem(final URI theUri, final Configuration conf)
throws IOException {
this();
initialize(theUri, conf);
}
/**
* Convenience Constructor for apps to call directly.
* @param conf configuration.
* @throws IOException raised on errors performing I/O.
*/
public ViewFileSystem(final Configuration conf) throws IOException {
this(FsConstants.VIEWFS_URI, conf);
}
@Override
public URI getUri() {
return myUri;
}
@Override
public Path resolvePath(final Path f) throws IOException {
final InodeTree.ResolveResult<FileSystem> res;
res = fsState.resolve(getUriPath(f), true);
if (res.isInternalDir()) {
return f;
}
return res.targetFileSystem.resolvePath(res.remainingPath);
}
@Override
public Path getHomeDirectory() {
if (homeDir == null) {
String base = fsState.getHomeDirPrefixValue();
if (base == null) {
base = "/user";
}
homeDir = (base.equals("/") ?
this.makeQualified(new Path(base + ugi.getShortUserName())):
this.makeQualified(new Path(base + "/" + ugi.getShortUserName())));
}
return homeDir;
}
@Override
public Path getWorkingDirectory() {
return workingDir;
}
@Override
public void setWorkingDirectory(final Path new_dir) {
getUriPath(new_dir); // this validates the path
workingDir = makeAbsolute(new_dir);
}
@Override
public FSDataOutputStream append(final Path f, final int bufferSize,
final Progressable progress) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.append(res.remainingPath, bufferSize, progress);
}
@Override
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication,
long blockSize, Progressable progress) throws IOException {
InodeTree.ResolveResult<FileSystem> res;
try {
res = fsState.resolve(getUriPath(f), false);
} catch (FileNotFoundException e) {
throw readOnlyMountTable("create", f);
}
assert(res.remainingPath != null);
return res.targetFileSystem.createNonRecursive(res.remainingPath,
permission, flags, bufferSize, replication, blockSize, progress);
}
@Override
public FSDataOutputStream create(final Path f, final FsPermission permission,
final boolean overwrite, final int bufferSize, final short replication,
final long blockSize, final Progressable progress) throws IOException {
InodeTree.ResolveResult<FileSystem> res;
try {
res = fsState.resolve(getUriPath(f), false);
} catch (FileNotFoundException e) {
throw readOnlyMountTable("create", f);
}
assert(res.remainingPath != null);
return res.targetFileSystem.create(res.remainingPath, permission,
overwrite, bufferSize, replication, blockSize, progress);
}
@Override
public boolean delete(final Path f, final boolean recursive)
throws AccessControlException, FileNotFoundException, IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
// If internal dir or target is a mount link (ie remainingPath is Slash)
if (res.isInternalDir() || res.remainingPath == InodeTree.SlashPath) {
throw readOnlyMountTable("delete", f);
}
return res.targetFileSystem.delete(res.remainingPath, recursive);
}
@Override
@SuppressWarnings("deprecation")
public boolean delete(final Path f)
throws AccessControlException, FileNotFoundException, IOException {
return delete(f, true);
}
@Override
public BlockLocation[] getFileBlockLocations(FileStatus fs,
long start, long len) throws IOException {
final InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(fs.getPath()), true);
return res.targetFileSystem.getFileBlockLocations(
new ViewFsFileStatus(fs, res.remainingPath), start, len);
}
@Override
public FileChecksum getFileChecksum(final Path f)
throws AccessControlException, FileNotFoundException,
IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getFileChecksum(res.remainingPath);
}
@Override
public FileChecksum getFileChecksum(final Path f, final long length)
throws AccessControlException, FileNotFoundException,
IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getFileChecksum(res.remainingPath, length);
}
private static FileStatus fixFileStatus(FileStatus orig,
Path qualified) throws IOException {
// FileStatus#getPath is a fully qualified path relative to the root of
// target file system.
// We need to change it to viewfs URI - relative to root of mount table.
// The implementors of RawLocalFileSystem were trying to be very smart.
// They implement FileStatus#getOwner lazily -- the object
// returned is really a RawLocalFileSystem that expect the
// FileStatus#getPath to be unchanged so that it can get owner when needed.
// Hence we need to interpose a new ViewFileSystemFileStatus that
// works around.
if ("file".equals(orig.getPath().toUri().getScheme())) {
orig = wrapLocalFileStatus(orig, qualified);
}
orig.setPath(qualified);
return orig;
}
private static FileStatus wrapLocalFileStatus(FileStatus orig,
Path qualified) {
return orig instanceof LocatedFileStatus
? new ViewFsLocatedFileStatus((LocatedFileStatus)orig, qualified)
: new ViewFsFileStatus(orig, qualified);
}
/**
* {@inheritDoc}
*
* If the given path is a symlink(mount link), the path will be resolved to a
* target path and it will get the resolved path's FileStatus object. It will
* not be represented as a symlink and isDirectory API returns true if the
* resolved path is a directory, false otherwise.
*/
@Override
public FileStatus getFileStatus(final Path f) throws AccessControlException,
FileNotFoundException, IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
FileStatus status = res.targetFileSystem.getFileStatus(res.remainingPath);
return fixFileStatus(status, this.makeQualified(f));
}
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.access(res.remainingPath, mode);
}
/**
* {@inheritDoc}
*
* Note: listStatus considers listing from fallbackLink if available. If the
* same directory path is present in configured mount path as well as in
* fallback fs, then only the fallback path will be listed in the returned
* result except for link.
*
* If any of the the immediate children of the given path f is a symlink(mount
* link), the returned FileStatus object of that children would be represented
* as a symlink. It will not be resolved to the target path and will not get
* the target path FileStatus object. The target path will be available via
* getSymlink on that children's FileStatus object. Since it represents as
* symlink, isDirectory on that children's FileStatus will return false.
* This behavior can be changed by setting an advanced configuration
* fs.viewfs.mount.links.as.symlinks to false. In this case, mount points will
* be represented as non-symlinks and all the file/directory attributes like
* permissions, isDirectory etc will be assigned from it's resolved target
* directory/file.
*
* If you want to get the FileStatus of target path for that children, you may
* want to use GetFileStatus API with that children's symlink path. Please see
* {@link ViewFileSystem#getFileStatus(Path f)}
*
* Note: In ViewFileSystem, by default the mount links are represented as
* symlinks.
*/
@Override
public FileStatus[] listStatus(final Path f) throws AccessControlException,
FileNotFoundException, IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
FileStatus[] statusLst = res.targetFileSystem.listStatus(res.remainingPath);
if (!res.isInternalDir()) {
// We need to change the name in the FileStatus as described in
// {@link #getFileStatus }
int i = 0;
for (FileStatus status : statusLst) {
statusLst[i++] = fixFileStatus(status,
getChrootedPath(res, status, f));
}
}
return statusLst;
}
@Override
public RemoteIterator<LocatedFileStatus>listLocatedStatus(final Path f,
final PathFilter filter) throws FileNotFoundException, IOException {
final InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
final RemoteIterator<LocatedFileStatus> statusIter =
res.targetFileSystem.listLocatedStatus(res.remainingPath);
if (res.isInternalDir()) {
return statusIter;
}
return new RemoteIterator<LocatedFileStatus>() {
@Override
public boolean hasNext() throws IOException {
return statusIter.hasNext();
}
@Override
public LocatedFileStatus next() throws IOException {
final LocatedFileStatus status = statusIter.next();
return (LocatedFileStatus)fixFileStatus(status,
getChrootedPath(res, status, f));
}
};
}
private Path getChrootedPath(InodeTree.ResolveResult<FileSystem> res,
FileStatus status, Path f) throws IOException {
final String suffix;
if (res.targetFileSystem instanceof ChRootedFileSystem) {
suffix = ((ChRootedFileSystem)res.targetFileSystem)
.stripOutRoot(status.getPath());
} else { // nfly
suffix = ((NflyFSystem.NflyStatus)status).stripRoot();
}
return this.makeQualified(
suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix));
}
@Override
public boolean mkdirs(Path dir) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(dir), false);
return res.targetFileSystem.mkdirs(res.remainingPath);
}
@Override
public boolean mkdirs(final Path dir, final FsPermission permission)
throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(dir), false);
return res.targetFileSystem.mkdirs(res.remainingPath, permission);
}
@Override
public FSDataInputStream open(final Path f, final int bufferSize)
throws AccessControlException, FileNotFoundException, IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.open(res.remainingPath, bufferSize);
}
@Override
public boolean rename(final Path src, final Path dst) throws IOException {
// passing resolveLastComponet as false to catch renaming a mount point to
// itself. We need to catch this as an internal operation and fail if no
// fallback.
InodeTree.ResolveResult<FileSystem> resSrc =
fsState.resolve(getUriPath(src), false);
if (resSrc.isInternalDir()) {
if (fsState.getRootFallbackLink() == null) {
// If fallback is null, we can't rename from src.
throw readOnlyMountTable("rename", src);
}
InodeTree.ResolveResult<FileSystem> resSrcWithLastComp =
fsState.resolve(getUriPath(src), true);
if (resSrcWithLastComp.isInternalDir() || resSrcWithLastComp
.isLastInternalDirLink()) {
throw readOnlyMountTable("rename", src);
} else {
// This is fallback and let's set the src fs with this fallback
resSrc = resSrcWithLastComp;
}
}
InodeTree.ResolveResult<FileSystem> resDst =
fsState.resolve(getUriPath(dst), false);
if (resDst.isInternalDir()) {
if (fsState.getRootFallbackLink() == null) {
// If fallback is null, we can't rename to dst.
throw readOnlyMountTable("rename", dst);
}
// if the fallback exist, we may have chance to rename to fallback path
// where dst parent is matching to internalDir.
InodeTree.ResolveResult<FileSystem> resDstWithLastComp =
fsState.resolve(getUriPath(dst), true);
if (resDstWithLastComp.isInternalDir()) {
// We need to get fallback here. If matching fallback path not exist, it
// will fail later. This is a very special case: Even though we are on
// internal directory, we should allow to rename, so that src files will
// moved under matching fallback dir.
resDst = new InodeTree.ResolveResult<FileSystem>(
InodeTree.ResultKind.INTERNAL_DIR,
fsState.getRootFallbackLink().getTargetFileSystem(), "/",
new Path(resDstWithLastComp.resolvedPath), false);
} else {
// The link resolved to some target fs or fallback fs.
resDst = resDstWithLastComp;
}
}
URI srcUri = resSrc.targetFileSystem.getUri();
URI dstUri = resDst.targetFileSystem.getUri();
verifyRenameStrategy(srcUri, dstUri,
resSrc.targetFileSystem == resDst.targetFileSystem, renameStrategy);
if (resSrc.targetFileSystem instanceof ChRootedFileSystem &&
resDst.targetFileSystem instanceof ChRootedFileSystem) {
ChRootedFileSystem srcFS = (ChRootedFileSystem) resSrc.targetFileSystem;
ChRootedFileSystem dstFS = (ChRootedFileSystem) resDst.targetFileSystem;
return srcFS.getMyFs().rename(srcFS.fullPath(resSrc.remainingPath),
dstFS.fullPath(resDst.remainingPath));
} else {
return resSrc.targetFileSystem.rename(resSrc.remainingPath, resDst.remainingPath);
}
}
static void verifyRenameStrategy(URI srcUri, URI dstUri,
boolean isSrcDestSame, ViewFileSystem.RenameStrategy renameStrategy)
throws IOException {
switch (renameStrategy) {
case SAME_FILESYSTEM_ACROSS_MOUNTPOINT:
if (srcUri.getAuthority() != null) {
if (!(srcUri.getScheme().equals(dstUri.getScheme()) && srcUri
.getAuthority().equals(dstUri.getAuthority()))) {
throw new IOException("Renames across Mount points not supported");
}
}
break;
case SAME_TARGET_URI_ACROSS_MOUNTPOINT:
// Alternate 2: Rename across mountpoints with same target.
// i.e. Rename across alias mountpoints.
//
// Note we compare the URIs. the URIs include the link targets.
// hence we allow renames across mount links as long as the mount links
// point to the same target.
if (!srcUri.equals(dstUri)) {
throw new IOException("Renames across Mount points not supported");
}
break;
case SAME_MOUNTPOINT:
//
// Alternate 3 : renames ONLY within the the same mount links.
//
if (!isSrcDestSame) {
throw new IOException("Renames across Mount points not supported");
}
break;
default:
throw new IllegalArgumentException ("Unexpected rename strategy");
}
}
@Override
public boolean truncate(final Path f, final long newLength)
throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.truncate(res.remainingPath, newLength);
}
@Override
public void setOwner(final Path f, final String username,
final String groupname) throws AccessControlException,
FileNotFoundException, IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
res.targetFileSystem.setOwner(res.remainingPath, username, groupname);
}
@Override
public void setPermission(final Path f, final FsPermission permission)
throws AccessControlException, FileNotFoundException, IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
res.targetFileSystem.setPermission(res.remainingPath, permission);
}
@Override
public boolean setReplication(final Path f, final short replication)
throws AccessControlException, FileNotFoundException, IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.setReplication(res.remainingPath, replication);
}
@Override
public void setTimes(final Path f, final long mtime, final long atime)
throws AccessControlException, FileNotFoundException, IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
res.targetFileSystem.setTimes(res.remainingPath, mtime, atime);
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
true);
res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
true);
res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec);
}
@Override
public void removeDefaultAcl(Path path)
throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeDefaultAcl(res.remainingPath);
}
@Override
public void removeAcl(Path path)
throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeAcl(res.remainingPath);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.setAcl(res.remainingPath, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
return res.targetFileSystem.getAclStatus(res.remainingPath);
}
@Override
public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.setXAttr(res.remainingPath, name, value, flag);
}
@Override
public byte[] getXAttr(Path path, String name) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
return res.targetFileSystem.getXAttr(res.remainingPath, name);
}
@Override
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
return res.targetFileSystem.getXAttrs(res.remainingPath);
}
@Override
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
return res.targetFileSystem.getXAttrs(res.remainingPath, names);
}
@Override
public List<String> listXAttrs(Path path) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
return res.targetFileSystem.listXAttrs(res.remainingPath);
}
@Override
public void removeXAttr(Path path, String name) throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
true);
res.targetFileSystem.removeXAttr(res.remainingPath, name);
}
@Override
public void setVerifyChecksum(final boolean verifyChecksum) {
// This is a file system level operations, however ViewFileSystem
// points to many file systems. Noop for ViewFileSystem.
}
/**
* Initialize the target filesystem for all mount points.
* @param mountPoints The mount points
* @return Mapping of mount point and the initialized target filesystems
* @throws RuntimeException when the target file system cannot be initialized
*/
private Map<String, FileSystem> initializeMountedFileSystems(
List<InodeTree.MountPoint<FileSystem>> mountPoints) {
FileSystem fs = null;
Map<String, FileSystem> fsMap = new HashMap<>(mountPoints.size());
for (InodeTree.MountPoint<FileSystem> mount : mountPoints) {
try {
fs = mount.target.getTargetFileSystem();
fsMap.put(mount.src, fs);
} catch (IOException ex) {
String errMsg = "Not able to initialize FileSystem for mount path " +
mount.src + " with exception " + ex;
LOG.error(errMsg);
throw new RuntimeException(errMsg, ex);
}
}
return fsMap;
}
@Override
public long getDefaultBlockSize() {
throw new NotInMountpointException("getDefaultBlockSize");
}
@Override
public short getDefaultReplication() {
throw new NotInMountpointException("getDefaultReplication");
}
@Override
public FsServerDefaults getServerDefaults() throws IOException {
throw new NotInMountpointException("getServerDefaults");
}
@Override
public long getDefaultBlockSize(Path f) {
try {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getDefaultBlockSize(res.remainingPath);
} catch (FileNotFoundException e) {
throw new NotInMountpointException(f, "getDefaultBlockSize");
} catch (IOException e) {
throw new RuntimeException("Not able to initialize fs in "
+ " getDefaultBlockSize for path " + f + " with exception", e);
}
}
@Override
public short getDefaultReplication(Path f) {
try {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getDefaultReplication(res.remainingPath);
} catch (FileNotFoundException e) {
throw new NotInMountpointException(f, "getDefaultReplication");
} catch (IOException e) {
throw new RuntimeException("Not able to initialize fs in "
+ " getDefaultReplication for path " + f + " with exception", e);
}
}
@Override
public FsServerDefaults getServerDefaults(Path f) throws IOException {
try {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getServerDefaults(res.remainingPath);
} catch (FileNotFoundException e) {
throw new NotInMountpointException(f, "getServerDefaults");
}
}
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getContentSummary(res.remainingPath);
}
@Override
public QuotaUsage getQuotaUsage(Path f) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getQuotaUsage(res.remainingPath);
}
@Override
public void setWriteChecksum(final boolean writeChecksum) {
// This is a file system level operations, however ViewFileSystem
// points to many file systems. Noop for ViewFileSystem.
}
@Override
public FileSystem[] getChildFileSystems() {
List<InodeTree.MountPoint<FileSystem>> mountPoints =
fsState.getMountPoints();
Map<String, FileSystem> fsMap = initializeMountedFileSystems(mountPoints);
Set<FileSystem> children = new HashSet<>();
for (InodeTree.MountPoint<FileSystem> mountPoint : mountPoints) {
FileSystem targetFs = fsMap.get(mountPoint.src);
children.addAll(Arrays.asList(targetFs.getChildFileSystems()));
}
try {
if (fsState.isRootInternalDir() &&
fsState.getRootFallbackLink() != null) {
children.addAll(Arrays.asList(
fsState.getRootFallbackLink().getTargetFileSystem()
.getChildFileSystems()));
}
} catch (IOException ex) {
LOG.error("Could not add child filesystems for source path "
+ fsState.getRootFallbackLink().fullPath + " with exception " + ex);
}
return children.toArray(new FileSystem[]{});
}
public MountPoint[] getMountPoints() {
List<InodeTree.MountPoint<FileSystem>> mountPoints =
fsState.getMountPoints();
MountPoint[] result = new MountPoint[mountPoints.size()];
for ( int i = 0; i < mountPoints.size(); ++i ) {
result[i] = new MountPoint(new Path(mountPoints.get(i).src),
mountPoints.get(i).target.targetDirLinkList);
}
return result;
}
@Override
public Path createSnapshot(Path path, String snapshotName)
throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
true);
return res.targetFileSystem.createSnapshot(res.remainingPath, snapshotName);
}
@Override
public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
true);
res.targetFileSystem.renameSnapshot(res.remainingPath, snapshotOldName,
snapshotNewName);
}
@Override
public void deleteSnapshot(Path path, String snapshotName)
throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
true);
res.targetFileSystem.deleteSnapshot(res.remainingPath, snapshotName);
}
@Override
public void satisfyStoragePolicy(Path src) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(src), true);
res.targetFileSystem.satisfyStoragePolicy(res.remainingPath);
}
@Override
public void setStoragePolicy(Path src, String policyName) throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(src),
true);
res.targetFileSystem.setStoragePolicy(res.remainingPath, policyName);
}
@Override
public void unsetStoragePolicy(Path src) throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(src),
true);
res.targetFileSystem.unsetStoragePolicy(res.remainingPath);
}
@Override
public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(src),
true);
return res.targetFileSystem.getStoragePolicy(res.remainingPath);
}
@Override
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
throws IOException {
Collection<BlockStoragePolicySpi> allPolicies = new HashSet<>();
for (FileSystem fs : getChildFileSystems()) {
try {
Collection<? extends BlockStoragePolicySpi> policies =
fs.getAllStoragePolicies();
allPolicies.addAll(policies);
} catch (UnsupportedOperationException e) {
// ignored
}
}
return allPolicies;
}
/**
* Get the trash root directory for current user when the path
* specified is deleted.
*
* If FORCE_INSIDE_MOUNT_POINT flag is not set, return the default trash root
* from targetFS.
*
* When FORCE_INSIDE_MOUNT_POINT is set to true,
* <ol>
* <li>
* If the trash root for path p is in the same mount point as path p,
* and one of:
* <ol>
* <li>The mount point isn't at the top of the target fs.</li>
* <li>The resolved path of path is root (in fallback FS).</li>
* <li>The trash isn't in user's target fs home directory
* get the corresponding viewFS path for the trash root and return
* it.
* </li>
* </ol>
* </li>
* <li>
* else, return the trash root under the root of the mount point
* (/{mntpoint}/.Trash/{user}).
* </li>
* </ol>
*
* These conditions handle several different important cases:
* <ul>
* <li>File systems may need to have more local trash roots, such as
* encryption zones or snapshot roots.</li>
* <li>The fallback mount should use the user's home directory.</li>
* <li>Cloud storage systems should not use trash in an implicity defined
* home directory, per a container, unless it is the fallback fs.</li>
* </ul>
*
* @param path the trash root of the path to be determined.
* @return the trash root path.
*/
@Override
public Path getTrashRoot(Path path) {
try {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
Path targetFSTrashRoot =
res.targetFileSystem.getTrashRoot(res.remainingPath);
// Allow clients to use old behavior of delegating to target fs.
if (!config.getBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT,
CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT)) {
return targetFSTrashRoot;
}
// The trash root path from the target fs
String targetFSTrashRootPath = targetFSTrashRoot.toUri().getPath();
// The mount point path in the target fs
String mountTargetPath = res.targetFileSystem.getUri().getPath();
if (!mountTargetPath.endsWith("/")) {
mountTargetPath = mountTargetPath + "/";
}
Path targetFsUserHome = res.targetFileSystem.getHomeDirectory();
if (targetFSTrashRootPath.startsWith(mountTargetPath) &&
!(mountTargetPath.equals(ROOT_PATH.toString()) &&
!res.resolvedPath.equals(ROOT_PATH.toString()) &&
(targetFsUserHome != null && targetFSTrashRootPath.startsWith(
targetFsUserHome.toUri().getPath())))) {
String relativeTrashRoot =
targetFSTrashRootPath.substring(mountTargetPath.length());
return makeQualified(new Path(res.resolvedPath, relativeTrashRoot));
} else {
// Return the trash root for the mount point.
return makeQualified(new Path(res.resolvedPath,
TRASH_PREFIX + "/" + ugi.getShortUserName()));
}
} catch (IOException | IllegalArgumentException e) {
throw new NotInMountpointException(path, "getTrashRoot");
}
}
/**
* Get all the trash roots for current user or all users.
*
* When FORCE_INSIDE_MOUNT_POINT is set to true, we also return trash roots
* under the root of each mount point, with their viewFS paths.
*
* @param allUsers return trash roots for all users if true.
* @return all Trash root directories.
*/
@Override
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
// A map from targetFSPath -> FileStatus.
// FileStatus can be from targetFS or viewFS.
HashMap<Path, FileStatus> trashRoots = new HashMap<>();
for (FileSystem fs : getChildFileSystems()) {
for (FileStatus trash : fs.getTrashRoots(allUsers)) {
trashRoots.put(trash.getPath(), trash);
}
}
// Return trashRoots if FORCE_INSIDE_MOUNT_POINT is disabled.
if (!config.getBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT,
CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT)) {
return trashRoots.values();
}
// Get trash roots in TRASH_PREFIX dir inside mount points and fallback FS.
List<InodeTree.MountPoint<FileSystem>> mountPoints =
fsState.getMountPoints();
// If we have a fallback FS, add a mount point for it as <"", fallback FS>.
// The source path of a mount point shall not end with '/', thus for
// fallback fs, we set its mount point src as "".
if (fsState.getRootFallbackLink() != null) {
mountPoints.add(new InodeTree.MountPoint<>("",
fsState.getRootFallbackLink()));
}
try {
for (InodeTree.MountPoint<FileSystem> mountPoint : mountPoints) {
Path trashRoot =
makeQualified(new Path(mountPoint.src + "/" + TRASH_PREFIX));
// Continue if trashRoot does not exist for this mount point
if (!exists(trashRoot)) {
continue;
}
FileSystem targetFS = mountPoint.target.getTargetFileSystem();
if (!allUsers) {
Path userTrashRoot = new Path(trashRoot, ugi.getShortUserName());
if (exists(userTrashRoot)) {
Path targetFSUserTrashRoot = targetFS.makeQualified(
new Path(targetFS.getUri().getPath(),
TRASH_PREFIX + "/" + ugi.getShortUserName()));
trashRoots.put(targetFSUserTrashRoot, getFileStatus(userTrashRoot));
}
} else {
FileStatus[] mountPointTrashRoots = listStatus(trashRoot);
for (FileStatus trash : mountPointTrashRoots) {
// Remove the mountPoint and the leading '/' to get the
// relative targetFsTrash path
String targetFsTrash = trash.getPath().toUri().getPath()
.substring(mountPoint.src.length() + 1);
Path targetFsTrashPath = targetFS.makeQualified(
new Path(targetFS.getUri().getPath(), targetFsTrash));
trashRoots.put(targetFsTrashPath, trash);
}
}
}
} catch (IOException e) {
LOG.warn("Exception in get all trash roots for mount points", e);
}
return trashRoots.values();
}
@Override
public FsStatus getStatus() throws IOException {
return getStatus(null);
}
@Override
public FsStatus getStatus(Path p) throws IOException {
if (p == null) {
p = InodeTree.SlashPath;
}
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(
getUriPath(p), true);
return res.targetFileSystem.getStatus(p);
}
/**
* Return the total size of all files under "/", if {@link
* Constants#CONFIG_VIEWFS_LINK_MERGE_SLASH} is supported and is a valid
* mount point. Else, throw NotInMountpointException.
*
* @throws IOException raised on errors performing I/O.
*/
@Override
public long getUsed() throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(
getUriPath(InodeTree.SlashPath), true);
if (res.isInternalDir()) {
throw new NotInMountpointException(InodeTree.SlashPath, "getUsed");
} else {
return res.targetFileSystem.getUsed();
}
}
@Override
public Path getLinkTarget(Path path) throws IOException {
InodeTree.ResolveResult<FileSystem> res;
try {
res = fsState.resolve(getUriPath(path), true);
} catch (FileNotFoundException e) {
throw new NotInMountpointException(path, "getLinkTarget");
}
return res.targetFileSystem.getLinkTarget(res.remainingPath);
}
/**
* Reject the concat operation; forward the rest to the viewed FS.
* @param path path to query the capability of.
* @param capability string to query the stream support for.
* @return the capability
* @throws IOException if there is no resolved FS, or it raises an IOE.
*/
@Override
public boolean hasPathCapability(Path path, String capability)
throws IOException {
final Path p = makeQualified(path);
switch (validatePathCapabilityArgs(p, capability)) {
case CommonPathCapabilities.FS_CONCAT:
// concat is not supported, as it may be invoked across filesystems.
return false;
default:
// no break
}
// otherwise, check capabilities of mounted FS.
try {
InodeTree.ResolveResult<FileSystem> res
= fsState.resolve(getUriPath(p), true);
return res.targetFileSystem.hasPathCapability(res.remainingPath,
capability);
} catch (FileNotFoundException e) {
// no mount point, nothing will work.
throw new NotInMountpointException(p, "hasPathCapability");
}
}
@Override
public Path getEnclosingRoot(Path path) throws IOException {
InodeTree.ResolveResult<FileSystem> res;
try {
res = fsState.resolve(getUriPath(path), true);
} catch (FileNotFoundException ex) {
NotInMountpointException mountPointEx =
new NotInMountpointException(path,
String.format("getEnclosingRoot - %s", ex.getMessage()));
mountPointEx.initCause(ex);
throw mountPointEx;
}
Path mountPath = new Path(res.resolvedPath);
Path enclosingPath = res.targetFileSystem.getEnclosingRoot(new Path(getUriPath(path)));
return fixRelativePart(this.makeQualified(enclosingPath.depth() > mountPath.depth()
? enclosingPath : mountPath));
}
/**
* An instance of this class represents an internal dir of the viewFs
* that is internal dir of the mount table.
* It is a read only mount tables and create, mkdir or delete operations
* are not allowed.
* If called on create or mkdir then this target is the parent of the
* directory in which one is trying to create or mkdir; hence
* in this case the path name passed in is the last component.
* Otherwise this target is the end point of the path and hence
* the path name passed in is null.
*/
static class InternalDirOfViewFs extends FileSystem {
final InodeTree.INodeDir<FileSystem> theInternalDir;
final long creationTime; // of the the mount table
final UserGroupInformation ugi; // the user/group of user who created mtable
final URI myUri;
private final boolean showMountLinksAsSymlinks;
private InodeTree<FileSystem> fsState;
public InternalDirOfViewFs(final InodeTree.INodeDir<FileSystem> dir,
final long cTime, final UserGroupInformation ugi, URI uri,
Configuration config, InodeTree fsState) throws URISyntaxException {
myUri = uri;
this.fsState = fsState;
try {
initialize(myUri, config);
} catch (IOException e) {
throw new RuntimeException("Cannot occur");
}
theInternalDir = dir;
creationTime = cTime;
this.ugi = ugi;
showMountLinksAsSymlinks = config
.getBoolean(CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS,
CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT);
}
static private void checkPathIsSlash(final Path f) throws IOException {
if (f != InodeTree.SlashPath) {
throw new IOException(
"Internal implementation error: expected file name to be /");
}
}
@Override
public URI getUri() {
return myUri;
}
@Override
public Path getWorkingDirectory() {
throw new RuntimeException(
"Internal impl error: getWorkingDir should not have been called");
}
@Override
public void setWorkingDirectory(final Path new_dir) {
throw new RuntimeException(
"Internal impl error: getWorkingDir should not have been called");
}
@Override
public FSDataOutputStream append(final Path f, final int bufferSize,
final Progressable progress) throws IOException {
throw readOnlyMountTable("append", f);
}
@Override
public FSDataOutputStream create(final Path f,
final FsPermission permission, final boolean overwrite,
final int bufferSize, final short replication, final long blockSize,
final Progressable progress) throws IOException {
Preconditions.checkNotNull(f, "File cannot be null.");
if (InodeTree.SlashPath.equals(f)) {
throw new FileAlreadyExistsException(
"/ is not a file. The directory / already exist at: "
+ theInternalDir.fullPath);
}
if (this.fsState.getRootFallbackLink() != null) {
if (theInternalDir.getChildren().containsKey(f.getName())) {
throw new FileAlreadyExistsException(
"A mount path(file/dir) already exist with the requested path: "
+ theInternalDir.getChildren().get(f.getName()).fullPath);
}
FileSystem linkedFallbackFs =
this.fsState.getRootFallbackLink().getTargetFileSystem();
Path parent = Path.getPathWithoutSchemeAndAuthority(
new Path(theInternalDir.fullPath));
String leaf = f.getName();
Path fileToCreate = new Path(parent, leaf);
try {
return linkedFallbackFs
.create(fileToCreate, permission, overwrite, bufferSize,
replication, blockSize, progress);
} catch (IOException e) {
LOG.error("Failed to create file: {} at fallback: {}", fileToCreate,
linkedFallbackFs.getUri(), e);
throw e;
}
}
throw readOnlyMountTable("create", f);
}
@Override
public boolean delete(final Path f, final boolean recursive)
throws AccessControlException, IOException {
checkPathIsSlash(f);
throw readOnlyMountTable("delete", f);
}
@Override
@SuppressWarnings("deprecation")
public boolean delete(final Path f)
throws AccessControlException, IOException {
return delete(f, true);
}
@Override
public BlockLocation[] getFileBlockLocations(final FileStatus fs,
final long start, final long len) throws
FileNotFoundException, IOException {
// When application calls listFiles on internalDir, it would return
// RemoteIterator from InternalDirOfViewFs. If there is a fallBack, there
// is a chance of files exists under that internalDir in fallback.
// Iterator#next will call getFileBlockLocations with that files. So, we
// should return getFileBlockLocations on fallback. See HDFS-15532.
if (!InodeTree.SlashPath.equals(fs.getPath()) && this.fsState
.getRootFallbackLink() != null) {
FileSystem linkedFallbackFs =
this.fsState.getRootFallbackLink().getTargetFileSystem();
Path parent = Path.getPathWithoutSchemeAndAuthority(
new Path(theInternalDir.fullPath));
Path pathToFallbackFs = new Path(parent, fs.getPath().getName());
return linkedFallbackFs
.getFileBlockLocations(pathToFallbackFs, start, len);
}
checkPathIsSlash(fs.getPath());
throw new FileNotFoundException("Path points to dir not a file");
}
@Override
public FileChecksum getFileChecksum(final Path f)
throws FileNotFoundException, IOException {
checkPathIsSlash(f);
throw new FileNotFoundException("Path points to dir not a file");
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
checkPathIsSlash(f);
return new FileStatus(0, true, 0, 0, creationTime, creationTime,
PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
new Path(theInternalDir.fullPath).makeQualified(
myUri, ROOT_PATH));
}
@Override
public FileStatus[] listStatus(Path f) throws AccessControlException,
FileNotFoundException, IOException {
checkPathIsSlash(f);
FileStatus[] fallbackStatuses = listStatusForFallbackLink();
Set<FileStatus> linkStatuses = new HashSet<>();
Set<FileStatus> internalDirStatuses = new HashSet<>();
int i = 0;
for (Entry<String, INode<FileSystem>> iEntry :
theInternalDir.getChildren().entrySet()) {
INode<FileSystem> inode = iEntry.getValue();
Path path = new Path(inode.fullPath).makeQualified(myUri, null);
if (inode.isLink()) {
INodeLink<FileSystem> link = inode.getLink();
if (showMountLinksAsSymlinks) {
// To maintain backward compatibility, with default option(showing
// mount links as symlinks), we will represent target link as
// symlink and rest other properties are belongs to mount link only.
linkStatuses.add(
new FileStatus(0, false, 0, 0, creationTime, creationTime,
PERMISSION_555, ugi.getShortUserName(),
ugi.getPrimaryGroupName(), link.getTargetLink(), path));
continue;
}
// We will represent as non-symlinks. Here it will show target
// directory/file properties like permissions, isDirectory etc on
// mount path. The path will be a mount link path and isDirectory is
// true if target is dir, otherwise false.
String linkedPath = link.getTargetFileSystem().getUri().getPath();
if ("".equals(linkedPath)) {
linkedPath = "/";
}
try {
FileStatus status =
((ChRootedFileSystem)link.getTargetFileSystem())
.getMyFs().getFileStatus(new Path(linkedPath));
linkStatuses.add(
new FileStatus(status.getLen(), status.isDirectory(),
status.getReplication(), status.getBlockSize(),
status.getModificationTime(), status.getAccessTime(),
status.getPermission(), status.getOwner(),
status.getGroup(), null, path));
} catch (FileNotFoundException ex) {
LOG.warn("Cannot get one of the children's(" + path
+ ") target path(" + link.getTargetFileSystem().getUri()
+ ") file status.", ex);
throw ex;
}
} else {
internalDirStatuses.add(
new FileStatus(0, true, 0, 0, creationTime, creationTime,
PERMISSION_555, ugi.getShortUserName(),
ugi.getPrimaryGroupName(), path));
}
}
FileStatus[] internalDirStatusesMergedWithFallBack = internalDirStatuses
.toArray(new FileStatus[internalDirStatuses.size()]);
if (fallbackStatuses.length > 0) {
internalDirStatusesMergedWithFallBack =
merge(fallbackStatuses, internalDirStatusesMergedWithFallBack);
}
// Links will always have precedence than internalDir or fallback paths.
return merge(linkStatuses.toArray(new FileStatus[linkStatuses.size()]),
internalDirStatusesMergedWithFallBack);
}
private FileStatus[] merge(FileStatus[] toStatuses,
FileStatus[] fromStatuses) {
ArrayList<FileStatus> result = new ArrayList<>();
Set<String> pathSet = new HashSet<>();
for (FileStatus status : toStatuses) {
result.add(status);
pathSet.add(status.getPath().getName());
}
for (FileStatus status : fromStatuses) {
if (!pathSet.contains(status.getPath().getName())) {
result.add(status);
}
}
return result.toArray(new FileStatus[result.size()]);
}
private FileStatus[] listStatusForFallbackLink() throws IOException {
if (this.fsState.getRootFallbackLink() != null) {
FileSystem linkedFallbackFs =
this.fsState.getRootFallbackLink().getTargetFileSystem();
Path p = Path.getPathWithoutSchemeAndAuthority(
new Path(theInternalDir.fullPath));
if (theInternalDir.isRoot() || linkedFallbackFs.exists(p)) {
FileStatus[] statuses = linkedFallbackFs.listStatus(p);
for (FileStatus status : statuses) {
// Fix the path back to viewfs scheme
Path pathFromConfiguredFallbackRoot =
new Path(p, status.getPath().getName());
status.setPath(
new Path(myUri.toString(), pathFromConfiguredFallbackRoot));
}
return statuses;
}
}
return new FileStatus[0];
}
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
long[] summary = {0, 0, 1};
for (FileStatus status : listStatus(f)) {
Path targetPath =
Path.getPathWithoutSchemeAndAuthority(status.getPath());
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(targetPath.toString(), true);
ContentSummary child =
res.targetFileSystem.getContentSummary(res.remainingPath);
summary[0] += child.getLength();
summary[1] += child.getFileCount();
summary[2] += child.getDirectoryCount();
}
return new ContentSummary.Builder()
.length(summary[0])
.fileCount(summary[1])
.directoryCount(summary[2])
.build();
}
@Override
public FsStatus getStatus(Path p) throws IOException {
long[] summary = {0, 0, 0};
for (FileStatus status : listStatus(p)) {
Path targetPath =
Path.getPathWithoutSchemeAndAuthority(status.getPath());
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(targetPath.toString(), true);
FsStatus child = res.targetFileSystem.getStatus(res.remainingPath);
summary[0] += child.getCapacity();
summary[1] += child.getUsed();
summary[2] += child.getRemaining();
}
return new FsStatus(summary[0], summary[1], summary[2]);
}
@Override
public boolean mkdirs(Path dir, FsPermission permission)
throws IOException {
if (theInternalDir.isRoot() && dir == null) {
throw new FileAlreadyExistsException("/ already exits");
}
// Note dir starts with /
if (theInternalDir.getChildren().containsKey(
dir.toString().substring(1))) {
return true; // this is the stupid semantics of FileSystem
}
if (this.fsState.getRootFallbackLink() != null) {
FileSystem linkedFallbackFs =
this.fsState.getRootFallbackLink().getTargetFileSystem();
Path parent = Path.getPathWithoutSchemeAndAuthority(
new Path(theInternalDir.fullPath));
String leafChild = (InodeTree.SlashPath.equals(dir)) ?
InodeTree.SlashPath.toString() :
dir.getName();
Path dirToCreate = new Path(parent, leafChild);
try {
return linkedFallbackFs.mkdirs(dirToCreate, permission);
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Failed to create: {} at fallback: {}", dirToCreate,
linkedFallbackFs.getUri(), e);
}
throw e;
}
}
throw readOnlyMountTable("mkdirs", dir);
}
@Override
public FSDataInputStream open(Path f, int bufferSize)
throws AccessControlException, FileNotFoundException, IOException {
checkPathIsSlash(f);
throw new FileNotFoundException("Path points to dir not a file");
}
@Override
public boolean rename(Path src, Path dst) throws AccessControlException,
IOException {
checkPathIsSlash(src);
checkPathIsSlash(dst);
throw readOnlyMountTable("rename", src);
}
@Override
public boolean truncate(Path f, long newLength) throws IOException {
throw readOnlyMountTable("truncate", f);
}
@Override
public void setOwner(Path f, String username, String groupname)
throws AccessControlException, IOException {
checkPathIsSlash(f);
throw readOnlyMountTable("setOwner", f);
}
@Override
public void setPermission(Path f, FsPermission permission)
throws AccessControlException, IOException {
checkPathIsSlash(f);
throw readOnlyMountTable("setPermission", f);
}
@Override
public boolean setReplication(Path f, short replication)
throws AccessControlException, IOException {
checkPathIsSlash(f);
throw readOnlyMountTable("setReplication", f);
}
@Override
public void setTimes(Path f, long mtime, long atime)
throws AccessControlException, IOException {
checkPathIsSlash(f);
throw readOnlyMountTable("setTimes", f);
}
@Override
public void setVerifyChecksum(boolean verifyChecksum) {
// Noop for viewfs
}
@Override
public FsServerDefaults getServerDefaults(Path f) throws IOException {
throw new NotInMountpointException(f, "getServerDefaults");
}
@Override
public long getDefaultBlockSize(Path f) {
throw new NotInMountpointException(f, "getDefaultBlockSize");
}
@Override
public short getDefaultReplication(Path f) {
throw new NotInMountpointException(f, "getDefaultReplication");
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("modifyAclEntries", path);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeAclEntries", path);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeDefaultAcl", path);
}
@Override
public void removeAcl(Path path) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeAcl", path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("setAcl", path);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
checkPathIsSlash(path);
return new AclStatus.Builder().owner(ugi.getShortUserName())
.group(ugi.getPrimaryGroupName())
.addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
.stickyBit(false).build();
}
@Override
public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("setXAttr", path);
}
@Override
public byte[] getXAttr(Path path, String name) throws IOException {
throw new NotInMountpointException(path, "getXAttr");
}
@Override
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
throw new NotInMountpointException(path, "getXAttrs");
}
@Override
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
throws IOException {
throw new NotInMountpointException(path, "getXAttrs");
}
@Override
public List<String> listXAttrs(Path path) throws IOException {
throw new NotInMountpointException(path, "listXAttrs");
}
@Override
public void removeXAttr(Path path, String name) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeXAttr", path);
}
@Override
public Path createSnapshot(Path path, String snapshotName)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("createSnapshot", path);
}
@Override
public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("renameSnapshot", path);
}
@Override
public void deleteSnapshot(Path path, String snapshotName)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("deleteSnapshot", path);
}
@Override
public QuotaUsage getQuotaUsage(Path f) throws IOException {
throw new NotInMountpointException(f, "getQuotaUsage");
}
@Override
public void satisfyStoragePolicy(Path src) throws IOException {
checkPathIsSlash(src);
throw readOnlyMountTable("satisfyStoragePolicy", src);
}
@Override
public void setStoragePolicy(Path src, String policyName)
throws IOException {
checkPathIsSlash(src);
throw readOnlyMountTable("setStoragePolicy", src);
}
@Override
public void unsetStoragePolicy(Path src) throws IOException {
checkPathIsSlash(src);
throw readOnlyMountTable("unsetStoragePolicy", src);
}
@Override
public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException {
throw new NotInMountpointException(src, "getStoragePolicy");
}
@Override
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
throws IOException {
Collection<BlockStoragePolicySpi> allPolicies = new HashSet<>();
for (FileSystem fs : getChildFileSystems()) {
try {
Collection<? extends BlockStoragePolicySpi> policies =
fs.getAllStoragePolicies();
allPolicies.addAll(policies);
} catch (UnsupportedOperationException e) {
// ignored
}
}
return allPolicies;
}
@Override
public Path getEnclosingRoot(Path path) throws IOException {
InodeTree.ResolveResult<FileSystem> res;
try {
res = fsState.resolve((path.toString()), true);
} catch (FileNotFoundException ex) {
NotInMountpointException mountPointEx =
new NotInMountpointException(path,
String.format("getEnclosingRoot - %s", ex.getMessage()));
mountPointEx.initCause(ex);
throw mountPointEx;
}
Path fullPath = new Path(res.resolvedPath);
Path enclosingPath = res.targetFileSystem.getEnclosingRoot(path);
return enclosingPath.depth() > fullPath.depth()
? enclosingPath
: fullPath;
}
}
enum RenameStrategy {
SAME_MOUNTPOINT, SAME_TARGET_URI_ACROSS_MOUNTPOINT,
SAME_FILESYSTEM_ACROSS_MOUNTPOINT
}
private void closeChildFileSystems(FileSystem fs) {
if (fs != null) {
FileSystem[] childFs = fs.getChildFileSystems();
for (FileSystem child : childFs) {
if (child != null) {
String disableCacheName = String.format("fs.%s.impl.disable.cache",
child.getUri().getScheme());
if (config.getBoolean(disableCacheName, false)) {
try {
child.close();
} catch (IOException e) {
LOG.info("Fail closing ViewFileSystem's child filesystem " + fs,
e);
}
}
}
}
}
}
@Override
public void close() throws IOException {
super.close();
if (enableInnerCache && cache != null) {
cache.closeAll();
cache.clear();
}
if (!enableInnerCache) {
for (InodeTree.MountPoint<FileSystem> mountPoint :
fsState.getMountPoints()) {
FileSystem targetFs = mountPoint.target.getTargetFileSystemForClose();
closeChildFileSystems(targetFs);
}
if (fsState.isRootInternalDir() &&
fsState.getRootFallbackLink() != null) {
closeChildFileSystems(
fsState.getRootFallbackLink().getTargetFileSystem());
}
}
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
|
#!/bin/bash
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
source "${BASH_SOURCE%/*}/utilities/setup.sh"
# Record GPU count and CUDA version status
if [[ "$TFCI_NVIDIA_SMI_ENABLE" == 1 ]]; then
tfrun nvidia-smi
fi
# Update the version numbers for Nightly only
if [[ "$TFCI_NIGHTLY_UPDATE_VERSION_ENABLE" == 1 ]]; then
python_bin=python3
# TODO(belitskiy): Add a `python3` alias/symlink to Windows Docker image.
if [[ $(uname -s) = MSYS_NT* ]]; then
python_bin="python"
fi
tfrun "$python_bin" tensorflow/tools/ci_build/update_version.py --nightly
# replace tensorflow to tf_nightly in the wheel name
export TFCI_BUILD_PIP_PACKAGE_WHEEL_NAME_ARG="$(echo $TFCI_BUILD_PIP_PACKAGE_WHEEL_NAME_ARG | sed 's/tensorflow/tf_nightly/')"
export TFCI_BUILD_PIP_PACKAGE_ADDITIONAL_WHEEL_NAMES="$(echo $TFCI_BUILD_PIP_PACKAGE_ADDITIONAL_WHEEL_NAMES | sed 's/tensorflow/tf_nightly/g')"
fi
# TODO(b/361369076) Remove the following block after TF NumPy 1 is dropped
# Move hermetic requirement lock files for NumPy 1 to the root
if [[ "$TFCI_WHL_NUMPY_VERSION" == 1 ]]; then
cp ./ci/official/requirements_updater/numpy1_requirements/*.txt .
fi
tfrun bazel $TFCI_BAZEL_BAZELRC_ARGS build $TFCI_BAZEL_COMMON_ARGS --config=cuda_wheel //tensorflow/tools/pip_package:wheel $TFCI_BUILD_PIP_PACKAGE_BASE_ARGS $TFCI_BUILD_PIP_PACKAGE_WHEEL_NAME_ARG --verbose_failures
tfrun "$TFCI_FIND_BIN" ./bazel-bin/tensorflow/tools/pip_package -iname "*.whl" -exec cp {} $TFCI_OUTPUT_DIR \;
tfrun mkdir -p ./dist
tfrun cp $TFCI_OUTPUT_DIR/*.whl ./dist
tfrun bash ./ci/official/utilities/rename_and_verify_wheels.sh
if [[ -n "$TFCI_BUILD_PIP_PACKAGE_ADDITIONAL_WHEEL_NAMES" ]]; then
# Re-build the wheel with the same config, but with different name(s), if any.
# This is done after the rename_and_verify_wheel.sh run above, not to have
# to contend with extra wheels there.
for wheel_name in ${TFCI_BUILD_PIP_PACKAGE_ADDITIONAL_WHEEL_NAMES}; do
echo "Building for additional WHEEL_NAME: ${wheel_name}"
CURRENT_WHEEL_NAME_ARG="--repo_env=WHEEL_NAME=${wheel_name}"
tfrun bazel $TFCI_BAZEL_BAZELRC_ARGS build $TFCI_BAZEL_COMMON_ARGS --config=cuda_wheel //tensorflow/tools/pip_package:wheel $TFCI_BUILD_PIP_PACKAGE_BASE_ARGS $CURRENT_WHEEL_NAME_ARG
# Copy the wheel that was just created
tfrun bash -c "$TFCI_FIND_BIN ./bazel-bin/tensorflow/tools/pip_package -iname "${wheel_name}*.whl" -printf '%T+ %p\n' | sort | tail -n 1 | awk '{print \$2}' | xargs -I {} cp {} $TFCI_OUTPUT_DIR"
done
fi
if [[ "$TFCI_ARTIFACT_STAGING_GCS_ENABLE" == 1 ]]; then
# Note: -n disables overwriting previously created files.
# TODO(b/389744576): Remove when gsutil is made to work properly on MSYS2.
if [[ $(uname -s) != MSYS_NT* ]]; then
gsutil cp -n "$TFCI_OUTPUT_DIR"/*.whl "$TFCI_ARTIFACT_STAGING_GCS_URI"
else
powershell -command "gsutil cp -n '$TFCI_OUTPUT_DIR/*.whl' '$TFCI_ARTIFACT_STAGING_GCS_URI'"
fi
fi
if [[ "$TFCI_WHL_BAZEL_TEST_ENABLE" == 1 ]]; then
tfrun bazel $TFCI_BAZEL_BAZELRC_ARGS test $TFCI_BAZEL_COMMON_ARGS $TFCI_BUILD_PIP_PACKAGE_BASE_ARGS $TFCI_BUILD_PIP_PACKAGE_WHEEL_NAME_ARG --repo_env=TF_PYTHON_VERSION=$TFCI_PYTHON_VERSION --config="${TFCI_BAZEL_TARGET_SELECTING_CONFIG_PREFIX}_wheel_test"
fi
|
unknown
|
github
|
https://github.com/tensorflow/tensorflow
|
ci/official/wheel.sh
|
from rhizome.tests.base_test_case import RhizomeApiTestCase
from rhizome.models.user_models import UserGroup
from rhizome.tests.setup_helpers import TestSetupHelpers
from django.contrib.auth.models import User, Group
class UserGroupResourceTest(RhizomeApiTestCase):
def setUp(self):
super(UserGroupResourceTest, self).setUp()
self.ts = TestSetupHelpers()
self.lt = self.ts.create_arbitrary_location_type()
self.top_lvl_location = self.ts.create_arbitrary_location(
self.lt.id,
location_code='Nigeria',
location_name='Nigeria')
def test_get(self):
user = User.objects.create(username='Sam')
group = Group.objects.create(name="Sam's Group")
user_group = UserGroup.objects.create(user=user, group=group)
data = {
'user_id': user.id
}
resp = self.ts.get(self, '/api/v1/user_group/', data)
response_data = self.deserialize(resp)
self.assertHttpOK(resp)
self.assertEqual(len(response_data['objects']), 1)
resp_obj = response_data['objects'][0]
self.assertEqual(resp_obj['group_id'], group.id)
self.assertEqual(resp_obj['user_id'], user.id)
self.assertEqual(resp_obj['id'], user_group.id)
def test_get_all(self):
user = User.objects.create(username='Sam')
group = Group.objects.create(name="Sam's Group")
user_group_1 = UserGroup.objects.create(user=user, group=group)
user_group_2 = UserGroup.objects.create(user=self.ts.user, group=group)
resp = self.ts.get(self, '/api/v1/user_group/')
response_data = self.deserialize(resp)
self.assertHttpOK(resp)
self.assertEqual(
len(response_data['objects']), UserGroup.objects.count())
def test_create(self):
user = User.objects.create(username='Sam')
group = Group.objects.create(name="Sam's Group")
self.assertEqual(UserGroup.objects.count(), 0)
data = {
'user_id': user.id,
'group_id': group.id
}
resp = self.ts.post(self, '/api/v1/user_group/', data)
self.assertHttpCreated(resp)
self.assertEqual(UserGroup.objects.count(), 1)
response_data = self.deserialize(resp)
self.assertEqual(response_data['user_id'], user.id)
self.assertEqual(response_data['group_id'], group.id)
def test_obj_delete(self):
user = User.objects.create(username='Sam')
group = Group.objects.create(name="Sam's Group")
user_group = UserGroup.objects.create(user=user, group=group)
self.assertEqual(UserGroup.objects.count(), 1)
delete_url = '/api/v1/user_group/%s/' % user_group.id
self.ts.delete(self, delete_url)
self.assertEqual(UserGroup.objects.count(), 0)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# this makes ctypes friendlier (for me, anyways)
from ctypes import *
## page permissions
PAGE_EXECUTE = 0x10
PAGE_EXECUTE_READ = 0x20
PAGE_EXECUTE_READWRITE = 0x40
PAGE_EXECUTE_WRITECOPY = 0x80
PAGE_NOACCESS = 0x01
PAGE_READONLY = 0x02
PAGE_READWRITE = 0x04
PAGE_WRITECOPY = 0x08
PAGE_GUARD = 0x100
PAGE_NOCACHE = 0x200
PAGE_WRITECOMBINE = 0x400
## process access permissions from winnt.h
DELETE = 0x00010000L
READ_CONTROL = 0x00020000L
WRITE_DAC = 0x00040000L
WRITE_OWNER = 0x00080000L
SYNCHRONIZE = 0x00100000L
ACCESS_SYSTEM_SECURITY = 0x01000000L
MAXIMUM_ALLOWED = 0x02000000L
GENERIC_READ = 0x80000000L
GENERIC_WRITE = 0x40000000L
GENERIC_EXECUTE = 0x20000000L
GENERIC_ALL = 0x10000000L
STANDARD_RIGHTS_REQUIRED = 0x000F0000L
STANDARD_RIGHTS_READ = READ_CONTROL
STANDARD_RIGHTS_WRITE = READ_CONTROL
STANDARD_RIGHTS_EXECUTE = READ_CONTROL
STANDARD_RIGHTS_ALL = 0x001F0000L
SPECIFIC_RIGHTS_ALL = 0x0000FFFFL
PROCESS_TERMINATE = 0x0001
PROCESS_CREATE_THREAD = 0x0002
PROCESS_SET_SESSIONID = 0x0004
PROCESS_VM_OPERATION = 0x0008
PROCESS_VM_READ = 0x0010
PROCESS_VM_WRITE = 0x0020
PROCESS_DUP_HANDLE = 0x0040
PROCESS_CREATE_PROCESS = 0x0080
PROCESS_SET_QUOTA = 0x0100
PROCESS_SET_INFORMATION = 0x0200
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_SUSPEND_RESUME = 0x0800
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
#PROCESS_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF
PROCESS_VM_ALL = PROCESS_VM_OPERATION|PROCESS_VM_READ|PROCESS_VM_WRITE
PROCESS_INFO_ALL = PROCESS_QUERY_INFORMATION|PROCESS_SET_INFORMATION
THREAD_TERMINATE = 0x0001
THREAD_SUSPEND_RESUME = 0x0002
THREAD_GET_CONTEXT = 0x0008
THREAD_SET_CONTEXT = 0x0010
THREAD_QUERY_INFORMATION = 0x0040
THREAD_SET_INFORMATION = 0x0020
THREAD_SET_THREAD_TOKEN = 0x0080
THREAD_IMPERSONATE = 0x0100
THREAD_DIRECT_IMPERSONATION = 0x0200
THREAD_SET_LIMITED_INFORMATION = 0x0400 # winnt
THREAD_QUERY_LIMITED_INFORMATION = 0x0800 # winnt
THREAD_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF
JOB_OBJECT_ASSIGN_PROCESS = 0x0001
JOB_OBJECT_SET_ATTRIBUTES = 0x0002
JOB_OBJECT_QUERY = 0x0004
JOB_OBJECT_TERMINATE = 0x0008
JOB_OBJECT_SET_SECURITY_ATTRIBUTES = 0x0010
JOB_OBJECT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1F
## constants for contexts
CONTEXT_i386 = 0x00010000 # this assumes that i386 and
CONTEXT_i486 = 0x00010000 # i486 have identical context records
CONTEXT_CONTROL = (CONTEXT_i386 | 0x00000001L) # SS:SP, CS:IP, FLAGS, BP
CONTEXT_INTEGER = (CONTEXT_i386 | 0x00000002L) # AX, BX, CX, DX, SI, DI
CONTEXT_SEGMENTS = (CONTEXT_i386 | 0x00000004L) # DS, ES, FS, GS
CONTEXT_FLOATING_POINT = (CONTEXT_i386 | 0x00000008L) # 387 state
CONTEXT_DEBUG_REGISTERS = (CONTEXT_i386 | 0x00000010L) # DB 0-3,6,7
CONTEXT_EXTENDED_REGISTERS = (CONTEXT_i386 | 0x00000020L) # cpu specific extensions
CONTEXT_FULL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS)
CONTEXT_ALL = CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS
CONTEXT_ALL |= CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS
CONTEXT_ALL |= CONTEXT_EXTENDED_REGISTERS
## basic types
DWORD64 = c_uint64
DWORD = c_uint32
WORD = c_uint16
BYTE = c_uint8
LONG = c_long
ULONG = c_ulong
INT = c_int
UINT = c_uint
ULONGLONG = c_uint64
LONGLONG = c_int64
## complex structures
class M128A(Structure):
_fields_ = [
('Low', ULONGLONG),
('High', LONGLONG)
]
class MMX(Structure):
_fields_ = [
('Header', ARRAY(M128A, 2)),
('Legacy', ARRAY(M128A, 8)),
('Xmm0', M128A),
('Xmm1', M128A),
('Xmm2', M128A),
('Xmm3', M128A),
('Xmm4', M128A),
('Xmm5', M128A),
('Xmm6', M128A),
('Xmm7', M128A),
('Xmm8', M128A),
('Xmm9', M128A),
('Xmm10', M128A),
('Xmm11', M128A),
('Xmm12', M128A),
('Xmm13', M128A),
('Xmm14', M128A),
('Xmm15', M128A)
]
class XMM_SAVE_AREA32(Structure):
_fields_ = [
('ControlWord', WORD),
('StatusWord', WORD),
('TagWord', BYTE),
('Reserved1', BYTE),
('ErrorOpcode', WORD),
('ErrorOffset', DWORD),
('ErrorSelector', WORD),
('Reserved2', WORD),
('DataOffset', DWORD),
('DataSelector', WORD),
('Reserved3', WORD),
('MxCsr', DWORD),
('MxCsr_Mask', DWORD),
('FloatRegisters', ARRAY(M128A, 8)),
('XmmRegisters', ARRAY(M128A, 16)),
('Reserved4', ARRAY(BYTE, 96))
]
SIZE_OF_80387_REGISTERS = 80
class FLOATING_SAVE_AREA(Structure):
_fields_ = [
('ControlWord', DWORD),
('StatusWord', DWORD),
('TagWord', DWORD),
('ErrorOffset', DWORD),
('ErrorSelector', DWORD),
('DataOffset', DWORD),
('DataSelector', DWORD),
('RegisterArea', ARRAY(BYTE, SIZE_OF_80387_REGISTERS)),
('Cr0NpxState', DWORD)
]
MAXIMUM_SUPPORTED_EXTENSION = 512
class CONTEXT(Structure):
_fields_ = [
('ContextFlags', DWORD),
('Dr0', DWORD),
('Dr1', DWORD),
('Dr2', DWORD),
('Dr3', DWORD),
('Dr6', DWORD),
('Dr7', DWORD),
('FloatSave', FLOATING_SAVE_AREA),
('SegGs', DWORD),
('SegFs', DWORD),
('SegEs', DWORD),
('SegDs', DWORD),
('Edi', DWORD),
('Esi', DWORD),
('Ebx', DWORD),
('Edx', DWORD),
('Ecx', DWORD),
('Eax', DWORD),
('Ebp', DWORD),
('Eip', DWORD),
('SegCs', DWORD),
('EFlags', DWORD),
('Esp', DWORD),
('SegSs', DWORD),
('ExtendedRegisters', ARRAY(BYTE, MAXIMUM_SUPPORTED_EXTENSION))
]
## other win32 stuff
HANDLE = c_voidp
class CLIENT_ID(Structure):
_fields_ = [
('UniqueProcess', HANDLE),
('UniqueThread', HANDLE)
]
ThreadBasicInformation = 0 # _THREADINFOCLASS
KAFFINITY = KPRIORITY = c_ulong
PVOID = c_voidp
NTSTATUS = c_long
class THREAD_BASIC_INFORMATION(Structure):
_fields_ = [
('ExitStatus', NTSTATUS),
('TebBaseAddress', PVOID),
('ClientId', CLIENT_ID),
('AffinityMask', KAFFINITY),
('Priority', KPRIORITY),
('BasePriority', KPRIORITY),
]
## token shit
class LUID(Structure):
_fields_ = [
('LowPart', DWORD),
('HighPart', LONG)
]
class LUID_AND_ATTRIBUTES(Structure):
_fields_ = [
('Luid', LUID),
('Attributes', DWORD)
]
class TOKEN_PRIVILEGES(Structure):
_fields_ = [
('PrivilegeCount', ULONG),
('Privileges', LUID_AND_ATTRIBUTES*1)
]
SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001
SE_PRIVILEGE_ENABLED = 0x00000002
SE_PRIVILEGE_REMOVED = 0X00000004
SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000
SE_PRIVILEGE_VALID_ATTRIBUTES = (SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS)
PRIVILEGE_SET_ALL_NECESSARY = (1)
class PRIVILEGE_SET(Structure):
_fields_ = [
('PrivilegeCount', DWORD),
('Control', DWORD),
('Privilege', LUID_AND_ATTRIBUTES*1)
]
## token constants
TOKEN_ASSIGN_PRIMARY = 0x0001
TOKEN_DUPLICATE = 0x0002
TOKEN_IMPERSONATE = 0x0004
TOKEN_QUERY = 0x0008
TOKEN_QUERY_SOURCE = 0x0010
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_ALL_ACCESS_P = STANDARD_RIGHTS_REQUIRED | TOKEN_ASSIGN_PRIMARY | TOKEN_DUPLICATE | TOKEN_IMPERSONATE | TOKEN_QUERY | TOKEN_QUERY_SOURCE | TOKEN_ADJUST_PRIVILEGES | TOKEN_ADJUST_GROUPS | TOKEN_ADJUST_DEFAULT
TOKEN_ALL_ACCESS = TOKEN_ALL_ACCESS_P | TOKEN_ADJUST_SESSIONID
TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY
TOKEN_WRITE = STANDARD_RIGHTS_WRITE | TOKEN_ADJUST_PRIVILEGES | TOKEN_ADJUST_GROUPS | TOKEN_ADJUST_DEFAULT
TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE
|
unknown
|
codeparrot/codeparrot-clean
| ||
import pytest
from pandas import Categorical
import pandas._testing as tm
@pytest.mark.parametrize("c", [None, [1, 2, 3, 4, 5]])
def test_categorical_equal(c):
c = Categorical([1, 2, 3, 4], categories=c)
tm.assert_categorical_equal(c, c)
@pytest.mark.parametrize("check_category_order", [True, False])
def test_categorical_equal_order_mismatch(check_category_order):
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([1, 2, 3, 4], categories=[4, 3, 2, 1])
kwargs = {"check_category_order": check_category_order}
if check_category_order:
msg = """Categorical\\.categories are different
Categorical\\.categories values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Index\\(\\[4, 3, 2, 1\\], dtype='int64'\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_categorical_equal(c1, c2, **kwargs)
else:
tm.assert_categorical_equal(c1, c2, **kwargs)
def test_categorical_equal_categories_mismatch():
msg = """Categorical\\.categories are different
Categorical\\.categories values are different \\(25\\.0 %\\)
\\[left\\]: Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Index\\(\\[1, 2, 3, 5\\], dtype='int64'\\)"""
c1 = Categorical([1, 2, 3, 4])
c2 = Categorical([1, 2, 3, 5])
with pytest.raises(AssertionError, match=msg):
tm.assert_categorical_equal(c1, c2)
def test_categorical_equal_codes_mismatch():
categories = [1, 2, 3, 4]
msg = """Categorical\\.codes are different
Categorical\\.codes values are different \\(50\\.0 %\\)
\\[left\\]: \\[0, 1, 3, 2\\]
\\[right\\]: \\[0, 1, 2, 3\\]"""
c1 = Categorical([1, 2, 4, 3], categories=categories)
c2 = Categorical([1, 2, 3, 4], categories=categories)
with pytest.raises(AssertionError, match=msg):
tm.assert_categorical_equal(c1, c2)
def test_categorical_equal_ordered_mismatch():
data = [1, 2, 3, 4]
msg = """Categorical are different
Attribute "ordered" are different
\\[left\\]: False
\\[right\\]: True"""
c1 = Categorical(data, ordered=False)
c2 = Categorical(data, ordered=True)
with pytest.raises(AssertionError, match=msg):
tm.assert_categorical_equal(c1, c2)
@pytest.mark.parametrize("obj", ["index", "foo", "pandas"])
def test_categorical_equal_object_override(obj):
data = [1, 2, 3, 4]
msg = f"""{obj} are different
Attribute "ordered" are different
\\[left\\]: False
\\[right\\]: True"""
c1 = Categorical(data, ordered=False)
c2 = Categorical(data, ordered=True)
with pytest.raises(AssertionError, match=msg):
tm.assert_categorical_equal(c1, c2, obj=obj)
|
python
|
github
|
https://github.com/pandas-dev/pandas
|
pandas/tests/util/test_assert_categorical_equal.py
|
import os
import re
from socket import *
from Components.Console import Console
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
from boxbranding import getBoxType
class Network:
def __init__(self):
self.ifaces = {}
self.configuredNetworkAdapters = []
self.NetworkState = 0
self.DnsState = 0
self.nameservers = []
self.ethtool_bin = "ethtool"
self.Console = Console()
self.LinkConsole = Console()
self.restartConsole = Console()
self.deactivateInterfaceConsole = Console()
self.activateInterfaceConsole = Console()
self.resetNetworkConsole = Console()
self.DnsConsole = Console()
self.PingConsole = Console()
self.config_ready = None
self.friendlyNames = {}
self.lan_interfaces = []
self.wlan_interfaces = []
self.remoteRootFS = None
self.getInterfaces()
def onRemoteRootFS(self):
if self.remoteRootFS is None:
import Harddisk
for parts in Harddisk.getProcMounts():
if parts[1] == '/' and parts[2] == 'nfs':
self.remoteRootFS = True
break
else:
self.remoteRootFS = False
return self.remoteRootFS
def isBlacklisted(self, iface):
return iface in ('lo', 'wifi0', 'wmaster0', 'sit0', 'tun0')
def getInterfaces(self, callback = None):
self.configuredInterfaces = []
for device in self.getInstalledAdapters():
self.getAddrInet(device, callback)
# helper function
def regExpMatch(self, pattern, string):
if string is None:
return None
try:
return pattern.search(string).group()
except AttributeError:
return None
# helper function to convert ips from a sring to a list of ints
def convertIP(self, ip):
return [ int(n) for n in ip.split('.') ]
def getAddrInet(self, iface, callback):
if not self.Console:
self.Console = Console()
cmd = "ip -o addr show dev " + iface
self.Console.ePopen(cmd, self.IPaddrFinished, [iface,callback])
def IPaddrFinished(self, result, retval, extra_args):
(iface, callback ) = extra_args
data = { 'up': False, 'dhcp': False, 'preup' : False, 'predown' : False }
globalIPpattern = re.compile("scope global")
ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
netRegexp = '[0-9]{1,2}'
macRegexp = '[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}'
ipLinePattern = re.compile('inet ' + ipRegexp + '/')
ipPattern = re.compile(ipRegexp)
netmaskLinePattern = re.compile('/' + netRegexp)
netmaskPattern = re.compile(netRegexp)
bcastLinePattern = re.compile(' brd ' + ipRegexp)
upPattern = re.compile('UP')
macPattern = re.compile(macRegexp)
macLinePattern = re.compile('link/ether ' + macRegexp)
for line in result.splitlines():
split = line.strip().split(' ',2)
if split[1][:-1] == iface:
up = self.regExpMatch(upPattern, split[2])
mac = self.regExpMatch(macPattern, self.regExpMatch(macLinePattern, split[2]))
if up is not None:
data['up'] = True
if iface is not 'lo':
self.configuredInterfaces.append(iface)
if mac is not None:
data['mac'] = mac
if split[1] == iface:
if re.search(globalIPpattern, split[2]):
ip = self.regExpMatch(ipPattern, self.regExpMatch(ipLinePattern, split[2]))
netmask = self.calc_netmask(self.regExpMatch(netmaskPattern, self.regExpMatch(netmaskLinePattern, split[2])))
bcast = self.regExpMatch(ipPattern, self.regExpMatch(bcastLinePattern, split[2]))
if ip is not None:
data['ip'] = self.convertIP(ip)
if netmask is not None:
data['netmask'] = self.convertIP(netmask)
if bcast is not None:
data['bcast'] = self.convertIP(bcast)
if not data.has_key('ip'):
data['dhcp'] = True
data['ip'] = [0, 0, 0, 0]
data['netmask'] = [0, 0, 0, 0]
data['gateway'] = [0, 0, 0, 0]
cmd = "route -n | grep " + iface
self.Console.ePopen(cmd,self.routeFinished, [iface, data, callback])
def routeFinished(self, result, retval, extra_args):
(iface, data, callback) = extra_args
ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
ipPattern = re.compile(ipRegexp)
ipLinePattern = re.compile(ipRegexp)
for line in result.splitlines():
print line[0:7]
if line[0:7] == "0.0.0.0":
gateway = self.regExpMatch(ipPattern, line[16:31])
if gateway:
data['gateway'] = self.convertIP(gateway)
self.ifaces[iface] = data
self.loadNetworkConfig(iface,callback)
def writeNetworkConfig(self):
self.configuredInterfaces = []
fp = file('/etc/network/interfaces', 'w')
fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
for ifacename, iface in self.ifaces.items():
if iface['up']:
fp.write("auto " + ifacename + "\n")
self.configuredInterfaces.append(ifacename)
if iface['dhcp']:
fp.write("iface "+ ifacename +" inet dhcp\n")
fp.write(" hostname $(hostname)\n")
if not iface['dhcp']:
fp.write("iface "+ ifacename +" inet static\n")
fp.write(" hostname $(hostname)\n")
if iface.has_key('ip'):
# print tuple(iface['ip'])
fp.write(" address %d.%d.%d.%d\n" % tuple(iface['ip']))
fp.write(" netmask %d.%d.%d.%d\n" % tuple(iface['netmask']))
if iface.has_key('gateway'):
fp.write(" gateway %d.%d.%d.%d\n" % tuple(iface['gateway']))
if iface.has_key("configStrings"):
fp.write(iface["configStrings"])
if iface["preup"] is not False and not iface.has_key("configStrings"):
fp.write(iface["preup"])
if iface["predown"] is not False and not iface.has_key("configStrings"):
fp.write(iface["predown"])
fp.write("\n")
fp.close()
self.configuredNetworkAdapters = self.configuredInterfaces
self.writeNameserverConfig()
def writeNameserverConfig(self):
try:
os.system('rm -rf /etc/resolv.conf')
fp = file('/etc/resolv.conf', 'w')
for nameserver in self.nameservers:
fp.write("nameserver %d.%d.%d.%d\n" % tuple(nameserver))
fp.close()
except:
print "[Network.py] interfaces - resolv.conf write failed"
def loadNetworkConfig(self,iface,callback = None):
interfaces = []
# parse the interfaces-file
try:
fp = file('/etc/network/interfaces', 'r')
interfaces = fp.readlines()
fp.close()
except:
print "[Network.py] interfaces - opening failed"
ifaces = {}
currif = ""
for i in interfaces:
split = i.strip().split(' ')
if split[0] == "iface":
currif = split[1]
ifaces[currif] = {}
if len(split) == 4 and split[3] == "dhcp":
ifaces[currif]["dhcp"] = True
else:
ifaces[currif]["dhcp"] = False
if currif == iface: #read information only for available interfaces
if split[0] == "address":
ifaces[currif]["address"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("ip"):
if self.ifaces[currif]["ip"] != ifaces[currif]["address"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["ip"] = map(int, split[1].split('.'))
if split[0] == "netmask":
ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("netmask"):
if self.ifaces[currif]["netmask"] != ifaces[currif]["netmask"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if split[0] == "gateway":
ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("gateway"):
if self.ifaces[currif]["gateway"] != ifaces[currif]["gateway"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if split[0] == "pre-up":
if self.ifaces[currif].has_key("preup"):
self.ifaces[currif]["preup"] = i
if split[0] in ("pre-down","post-down"):
if self.ifaces[currif].has_key("predown"):
self.ifaces[currif]["predown"] = i
for ifacename, iface in ifaces.items():
if self.ifaces.has_key(ifacename):
self.ifaces[ifacename]["dhcp"] = iface["dhcp"]
if self.Console:
if len(self.Console.appContainers) == 0:
# save configured interfacelist
self.configuredNetworkAdapters = self.configuredInterfaces
# load ns only once
self.loadNameserverConfig()
# print "read configured interface:", ifaces
# print "self.ifaces after loading:", self.ifaces
self.config_ready = True
self.msgPlugins()
if callback is not None:
callback(True)
def loadNameserverConfig(self):
ipRegexp = "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
nameserverPattern = re.compile("nameserver +" + ipRegexp)
ipPattern = re.compile(ipRegexp)
resolv = []
try:
fp = file('/etc/resolv.conf', 'r')
resolv = fp.readlines()
fp.close()
self.nameservers = []
except:
print "[Network.py] resolv.conf - opening failed"
for line in resolv:
if self.regExpMatch(nameserverPattern, line) is not None:
ip = self.regExpMatch(ipPattern, line)
if ip:
self.nameservers.append(self.convertIP(ip))
# print "nameservers:", self.nameservers
def getInstalledAdapters(self):
return [x for x in os.listdir('/sys/class/net') if not self.isBlacklisted(x)]
def getConfiguredAdapters(self):
return self.configuredNetworkAdapters
def getNumberOfAdapters(self):
return len(self.ifaces)
def getFriendlyAdapterName(self, x):
if x in self.friendlyNames.keys():
return self.friendlyNames.get(x, x)
self.friendlyNames[x] = self.getFriendlyAdapterNaming(x)
return self.friendlyNames.get(x, x) # when we have no friendly name, use adapter name
def getFriendlyAdapterNaming(self, iface):
name = None
if self.isWirelessInterface(iface):
if iface not in self.wlan_interfaces:
name = _("WLAN connection")
if len(self.wlan_interfaces):
name += " " + str(len(self.wlan_interfaces)+1)
self.wlan_interfaces.append(iface)
else:
if iface not in self.lan_interfaces:
if getBoxType() == "et10000" and iface == "eth1":
name = _("VLAN connection")
else:
name = _("LAN connection")
if len(self.lan_interfaces) and not getBoxType() == "et10000" and not iface == "eth1":
name += " " + str(len(self.lan_interfaces)+1)
self.lan_interfaces.append(iface)
return name
def getFriendlyAdapterDescription(self, iface):
if not self.isWirelessInterface(iface):
return _('Ethernet network interface')
moduledir = self.getWlanModuleDir(iface)
if moduledir:
name = os.path.basename(os.path.realpath(moduledir))
if name in ('ath_pci','ath5k'):
name = 'Atheros'
elif name in ('rt73','rt73usb','rt3070sta'):
name = 'Ralink'
elif name == 'zd1211b':
name = 'Zydas'
elif name == 'r871x_usb_drv':
name = 'Realtek'
else:
name = _('Unknown')
return name + ' ' + _('wireless network interface')
def getAdapterName(self, iface):
return iface
def getAdapterList(self):
return self.ifaces.keys()
def getAdapterAttribute(self, iface, attribute):
if self.ifaces.has_key(iface):
if self.ifaces[iface].has_key(attribute):
return self.ifaces[iface][attribute]
return None
def setAdapterAttribute(self, iface, attribute, value):
# print "setting for adapter", iface, "attribute", attribute, " to value", value
if self.ifaces.has_key(iface):
self.ifaces[iface][attribute] = value
def removeAdapterAttribute(self, iface, attribute):
if self.ifaces.has_key(iface):
if self.ifaces[iface].has_key(attribute):
del self.ifaces[iface][attribute]
def getNameserverList(self):
if len(self.nameservers) == 0:
return [[0, 0, 0, 0], [0, 0, 0, 0]]
else:
return self.nameservers
def clearNameservers(self):
self.nameservers = []
def addNameserver(self, nameserver):
if nameserver not in self.nameservers:
self.nameservers.append(nameserver)
def removeNameserver(self, nameserver):
if nameserver in self.nameservers:
self.nameservers.remove(nameserver)
def changeNameserver(self, oldnameserver, newnameserver):
if oldnameserver in self.nameservers:
for i in range(len(self.nameservers)):
if self.nameservers[i] == oldnameserver:
self.nameservers[i] = newnameserver
def resetNetworkConfig(self, mode='lan', callback = None):
self.resetNetworkConsole = Console()
self.commands = []
self.commands.append("/etc/init.d/avahi-daemon stop")
for iface in self.ifaces.keys():
if iface != 'eth0' or not self.onRemoteRootFS():
self.commands.append("ip addr flush dev " + iface)
self.commands.append("/etc/init.d/networking stop")
self.commands.append("killall -9 udhcpc")
self.commands.append("rm /var/run/udhcpc*")
self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinishedCB, [mode, callback], debug=True)
def resetNetworkFinishedCB(self, extra_args):
(mode, callback) = extra_args
if len(self.resetNetworkConsole.appContainers) == 0:
self.writeDefaultNetworkConfig(mode, callback)
def writeDefaultNetworkConfig(self,mode='lan', callback = None):
fp = file('/etc/network/interfaces', 'w')
fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
if mode == 'wlan':
fp.write("auto wlan0\n")
fp.write("iface wlan0 inet dhcp\n")
if mode == 'wlan-mpci':
fp.write("auto ath0\n")
fp.write("iface ath0 inet dhcp\n")
if mode == 'lan':
fp.write("auto eth0\n")
fp.write("iface eth0 inet dhcp\n")
fp.write("\n")
fp.close()
self.resetNetworkConsole = Console()
self.commands = []
if mode == 'wlan':
self.commands.append("ifconfig eth0 down")
self.commands.append("ifconfig ath0 down")
self.commands.append("ifconfig wlan0 up")
if mode == 'wlan-mpci':
self.commands.append("ifconfig eth0 down")
self.commands.append("ifconfig wlan0 down")
self.commands.append("ifconfig ath0 up")
if mode == 'lan':
self.commands.append("ifconfig eth0 up")
self.commands.append("ifconfig wlan0 down")
self.commands.append("ifconfig ath0 down")
self.commands.append("/etc/init.d/avahi-daemon start")
self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinished, [mode,callback], debug=True)
def resetNetworkFinished(self,extra_args):
(mode, callback) = extra_args
if len(self.resetNetworkConsole.appContainers) == 0:
if callback is not None:
callback(True,mode)
def checkNetworkState(self,statecallback):
self.NetworkState = 0
cmd1 = "ping -c 1 www.google.de"
cmd2 = "ping -c 1 www.google.com"
cmd3 = "ping -c 1 www.google.nl"
self.PingConsole = Console()
self.PingConsole.ePopen(cmd1, self.checkNetworkStateFinished,statecallback)
self.PingConsole.ePopen(cmd2, self.checkNetworkStateFinished,statecallback)
self.PingConsole.ePopen(cmd3, self.checkNetworkStateFinished,statecallback)
def checkNetworkStateFinished(self, result, retval,extra_args):
(statecallback) = extra_args
if self.PingConsole is not None:
if retval == 0:
self.PingConsole = None
statecallback(self.NetworkState)
else:
self.NetworkState += 1
if len(self.PingConsole.appContainers) == 0:
statecallback(self.NetworkState)
def restartNetwork(self,callback = None):
self.restartConsole = Console()
self.config_ready = False
self.msgPlugins()
self.commands = []
self.commands.append("/etc/init.d/avahi-daemon stop")
for iface in self.ifaces.keys():
if iface != 'eth0' or not self.onRemoteRootFS():
self.commands.append("ifdown " + iface)
self.commands.append("ip addr flush dev " + iface)
self.commands.append("/etc/init.d/networking stop")
self.commands.append("killall -9 udhcpc")
self.commands.append("rm /var/run/udhcpc*")
self.commands.append("/etc/init.d/networking start")
self.commands.append("/etc/init.d/avahi-daemon start")
self.restartConsole.eBatch(self.commands, self.restartNetworkFinished, callback, debug=True)
def restartNetworkFinished(self,extra_args):
( callback ) = extra_args
if callback is not None:
callback(True)
def getLinkState(self,iface,callback):
cmd = self.ethtool_bin + " " + iface
self.LinkConsole = Console()
self.LinkConsole.ePopen(cmd, self.getLinkStateFinished,callback)
def getLinkStateFinished(self, result, retval,extra_args):
(callback) = extra_args
if self.LinkConsole is not None:
if len(self.LinkConsole.appContainers) == 0:
callback(result)
def stopPingConsole(self):
if self.PingConsole is not None:
if len(self.PingConsole.appContainers):
for name in self.PingConsole.appContainers.keys():
self.PingConsole.kill(name)
def stopLinkStateConsole(self):
if self.LinkConsole is not None:
if len(self.LinkConsole.appContainers):
for name in self.LinkConsole.appContainers.keys():
self.LinkConsole.kill(name)
def stopDNSConsole(self):
if self.DnsConsole is not None:
if len(self.DnsConsole.appContainers):
for name in self.DnsConsole.appContainers.keys():
self.DnsConsole.kill(name)
def stopRestartConsole(self):
if self.restartConsole is not None:
if len(self.restartConsole.appContainers):
for name in self.restartConsole.appContainers.keys():
self.restartConsole.kill(name)
def stopGetInterfacesConsole(self):
if self.Console is not None:
if len(self.Console.appContainers):
for name in self.Console.appContainers.keys():
self.Console.kill(name)
def stopDeactivateInterfaceConsole(self):
if self.deactivateInterfaceConsole is not None:
self.deactivateInterfaceConsole.killAll()
self.deactivateInterfaceConsole = None
def stopActivateInterfaceConsole(self):
if self.activateInterfaceConsole is not None:
self.activateInterfaceConsole.killAll()
self.activateInterfaceConsole = None
def checkforInterface(self,iface):
if self.getAdapterAttribute(iface, 'up') is True:
return True
else:
ret=os.system("ifconfig " + iface + " up")
os.system("ifconfig " + iface + " down")
if ret == 0:
return True
else:
return False
def checkDNSLookup(self,statecallback):
cmd1 = "nslookup www.dream-multimedia-tv.de"
cmd2 = "nslookup www.heise.de"
cmd3 = "nslookup www.google.de"
self.DnsConsole = Console()
self.DnsConsole.ePopen(cmd1, self.checkDNSLookupFinished,statecallback)
self.DnsConsole.ePopen(cmd2, self.checkDNSLookupFinished,statecallback)
self.DnsConsole.ePopen(cmd3, self.checkDNSLookupFinished,statecallback)
def checkDNSLookupFinished(self, result, retval,extra_args):
(statecallback) = extra_args
if self.DnsConsole is not None:
if retval == 0:
self.DnsConsole = None
statecallback(self.DnsState)
else:
self.DnsState += 1
if len(self.DnsConsole.appContainers) == 0:
statecallback(self.DnsState)
def deactivateInterface(self,ifaces,callback = None):
self.config_ready = False
self.msgPlugins()
commands = []
def buildCommands(iface):
commands.append("ifdown " + iface)
commands.append("ip addr flush dev " + iface)
#wpa_supplicant sometimes doesn't quit properly on SIGTERM
if os.path.exists('/var/run/wpa_supplicant/'+ iface):
commands.append("wpa_cli -i" + iface + " terminate")
if not self.deactivateInterfaceConsole:
self.deactivateInterfaceConsole = Console()
if isinstance(ifaces, (list, tuple)):
for iface in ifaces:
if iface != 'eth0' or not self.onRemoteRootFS():
buildCommands(iface)
else:
if ifaces == 'eth0' and self.onRemoteRootFS():
if callback is not None:
callback(True)
return
buildCommands(ifaces)
self.deactivateInterfaceConsole.eBatch(commands, self.deactivateInterfaceFinished, [ifaces,callback], debug=True)
def deactivateInterfaceFinished(self,extra_args):
(ifaces, callback) = extra_args
def checkCommandResult(iface):
if self.deactivateInterfaceConsole and self.deactivateInterfaceConsole.appResults.has_key("ifdown " + iface):
result = str(self.deactivateInterfaceConsole.appResults.get("ifdown " + iface)).strip("\n")
if result == "ifdown: interface " + iface + " not configured":
return False
else:
return True
#ifdown sometimes can't get the interface down.
if isinstance(ifaces, (list, tuple)):
for iface in ifaces:
if checkCommandResult(iface) is False:
Console().ePopen(("ifconfig " + iface + " down" ))
else:
if checkCommandResult(ifaces) is False:
Console().ePopen(("ifconfig " + ifaces + " down" ))
if self.deactivateInterfaceConsole:
if len(self.deactivateInterfaceConsole.appContainers) == 0:
if callback is not None:
callback(True)
def activateInterface(self,iface,callback = None):
if self.config_ready:
self.config_ready = False
self.msgPlugins()
if iface == 'eth0' and self.onRemoteRootFS():
if callback is not None:
callback(True)
return
if not self.activateInterfaceConsole:
self.activateInterfaceConsole = Console()
commands = ["ifup " + iface]
self.activateInterfaceConsole.eBatch(commands, self.activateInterfaceFinished, callback, debug=True)
def activateInterfaceFinished(self,extra_args):
callback = extra_args
if self.activateInterfaceConsole:
if len(self.activateInterfaceConsole.appContainers) == 0:
if callback is not None:
callback(True)
def sysfsPath(self, iface):
return '/sys/class/net/' + iface
def isWirelessInterface(self, iface):
if iface in self.wlan_interfaces:
return True
if os.path.isdir(self.sysfsPath(iface) + '/wireless'):
return True
# r871x_usb_drv on kernel 2.6.12 is not identifiable over /sys/class/net/'ifacename'/wireless so look also inside /proc/net/wireless
device = re.compile('[a-z]{2,}[0-9]*:')
ifnames = []
fp = open('/proc/net/wireless', 'r')
for line in fp:
try:
ifnames.append(device.search(line).group()[:-1])
except AttributeError:
pass
fp.close()
if iface in ifnames:
return True
return False
def getWlanModuleDir(self, iface = None):
devicedir = self.sysfsPath(iface) + '/device'
moduledir = devicedir + '/driver/module'
if os.path.isdir(moduledir):
return moduledir
# identification is not possible over default moduledir
for x in os.listdir(devicedir):
# rt3070 on kernel 2.6.18 registers wireless devices as usb_device (e.g. 1-1.3:1.0) and identification is only possible over /sys/class/net/'ifacename'/device/1-xxx
if x.startswith("1-"):
moduledir = devicedir + '/' + x + '/driver/module'
if os.path.isdir(moduledir):
return moduledir
# rt73, zd1211b, r871x_usb_drv on kernel 2.6.12 can be identified over /sys/class/net/'ifacename'/device/driver, so look also here
moduledir = devicedir + '/driver'
if os.path.isdir(moduledir):
return moduledir
return None
def detectWlanModule(self, iface = None):
if not self.isWirelessInterface(iface):
return None
devicedir = self.sysfsPath(iface) + '/device'
if os.path.isdir(devicedir + '/ieee80211'):
return 'nl80211'
moduledir = self.getWlanModuleDir(iface)
if moduledir:
module = os.path.basename(os.path.realpath(moduledir))
if module in ('ath_pci','ath5k'):
return 'madwifi'
if module in ('rt73','rt73'):
return 'ralink'
if module == 'zd1211b':
return 'zydas'
return 'wext'
def calc_netmask(self,nmask):
from struct import pack, unpack
from socket import inet_ntoa, inet_aton
mask = 1L<<31
xnet = (1L<<32)-1
cidr_range = range(0, 32)
cidr = long(nmask)
if cidr not in cidr_range:
print 'cidr invalid: %d' % cidr
return None
else:
nm = ((1L<<cidr)-1)<<(32-cidr)
netmask = str(inet_ntoa(pack('>L', nm)))
return netmask
def msgPlugins(self):
if self.config_ready is not None:
for p in plugins.getPlugins(PluginDescriptor.WHERE_NETWORKCONFIG_READ):
p(reason=self.config_ready)
def hotplug(self, event):
interface = event['INTERFACE']
if self.isBlacklisted(interface):
return
action = event['ACTION']
if action == "add":
print "[Network] Add new interface:", interface
self.getAddrInet(interface, None)
elif action == "remove":
print "[Network] Removed interface:", interface
try:
del self.ifaces[interface]
except KeyError:
pass
iNetwork = Network()
def InitNetwork():
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Perform uploader operations."""
import argparse
from invenio.ext.script import Manager
manager = Manager(usage=__doc__)
@manager.option('-f', '-filename', dest='blobs', nargs='+',
type=argparse.FileType('r'))
def insert(blobs):
"""Upload new records."""
from .api import run
for blob in blobs:
filename = getattr(blob, 'name', None)
run('insert', blob.read(), master_format='marc',
reader_info=dict(schema='xml'), filename=filename)
def main():
"""Execute manager."""
from invenio.base.factory import create_app
app = create_app()
manager.app = app
manager.run()
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import { teardown } from '../../../reactivity/effects.js';
import { get_descriptor } from '../../../../shared/utils.js';
/**
* Makes an `export`ed (non-prop) variable available on the `$$props` object
* so that consumers can do `bind:x` on the component.
* @template V
* @param {Record<string, unknown>} props
* @param {string} prop
* @param {V} value
* @returns {void}
*/
export function bind_prop(props, prop, value) {
var desc = get_descriptor(props, prop);
if (desc && desc.set) {
props[prop] = value;
teardown(() => {
props[prop] = null;
});
}
}
|
javascript
|
github
|
https://github.com/sveltejs/svelte
|
packages/svelte/src/internal/client/dom/elements/bindings/props.js
|
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Utilities for building Huffman decoding tables. */
#include "huffman.h"
#include "../common/constants.h"
#include "../common/platform.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#define BROTLI_REVERSE_BITS_MAX 8
#if defined(BROTLI_RBIT)
#define BROTLI_REVERSE_BITS_BASE \
((sizeof(brotli_reg_t) << 3) - BROTLI_REVERSE_BITS_MAX)
#else
#define BROTLI_REVERSE_BITS_BASE 0
static BROTLI_MODEL("small")
uint8_t kReverseBits[1 << BROTLI_REVERSE_BITS_MAX] = {
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0,
0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8,
0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4,
0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC,
0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2,
0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA,
0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6,
0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE,
0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1,
0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9,
0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5,
0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED,
0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3,
0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB,
0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7,
0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF,
0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
};
#endif /* BROTLI_RBIT */
#define BROTLI_REVERSE_BITS_LOWEST \
((brotli_reg_t)1 << (BROTLI_REVERSE_BITS_MAX - 1 + BROTLI_REVERSE_BITS_BASE))
/* Returns reverse(num >> BROTLI_REVERSE_BITS_BASE, BROTLI_REVERSE_BITS_MAX),
where reverse(value, len) is the bit-wise reversal of the len least
significant bits of value. */
static BROTLI_INLINE brotli_reg_t BrotliReverseBits(brotli_reg_t num) {
#if defined(BROTLI_RBIT)
return BROTLI_RBIT(num);
#else
return kReverseBits[num];
#endif
}
/* Stores code in table[0], table[step], table[2*step], ..., table[end] */
/* Assumes that end is an integer multiple of step */
static BROTLI_INLINE void ReplicateValue(HuffmanCode* table,
int step, int end,
HuffmanCode code) {
do {
end -= step;
table[end] = code;
} while (end > 0);
}
/* Returns the table width of the next 2nd level table. |count| is the histogram
of bit lengths for the remaining symbols, |len| is the code length of the
next processed symbol. */
static BROTLI_INLINE int NextTableBitSize(const uint16_t* const count,
int len, int root_bits) {
int left = 1 << (len - root_bits);
while (len < BROTLI_HUFFMAN_MAX_CODE_LENGTH) {
left -= count[len];
if (left <= 0) break;
++len;
left <<= 1;
}
return len - root_bits;
}
void BrotliBuildCodeLengthsHuffmanTable(HuffmanCode* table,
const uint8_t* const code_lengths,
uint16_t* count) {
HuffmanCode code; /* current table entry */
int symbol; /* symbol index in original or sorted table */
brotli_reg_t key; /* prefix code */
brotli_reg_t key_step; /* prefix code addend */
int step; /* step size to replicate values in current table */
int table_size; /* size of current table */
int sorted[BROTLI_CODE_LENGTH_CODES]; /* symbols sorted by code length */
/* offsets in sorted table for each length */
int offset[BROTLI_HUFFMAN_MAX_CODE_LENGTH_CODE_LENGTH + 1];
int bits;
int bits_count;
BROTLI_DCHECK(BROTLI_HUFFMAN_MAX_CODE_LENGTH_CODE_LENGTH <=
BROTLI_REVERSE_BITS_MAX);
BROTLI_DCHECK(BROTLI_HUFFMAN_MAX_CODE_LENGTH_CODE_LENGTH == 5);
/* Generate offsets into sorted symbol table by code length. */
symbol = -1;
bits = 1;
/* BROTLI_HUFFMAN_MAX_CODE_LENGTH_CODE_LENGTH == 5 */
BROTLI_REPEAT_5({
symbol += count[bits];
offset[bits] = symbol;
bits++;
});
/* Symbols with code length 0 are placed after all other symbols. */
offset[0] = BROTLI_CODE_LENGTH_CODES - 1;
/* Sort symbols by length, by symbol order within each length. */
symbol = BROTLI_CODE_LENGTH_CODES;
do {
BROTLI_REPEAT_6({
symbol--;
sorted[offset[code_lengths[symbol]]--] = symbol;
});
} while (symbol != 0);
table_size = 1 << BROTLI_HUFFMAN_MAX_CODE_LENGTH_CODE_LENGTH;
/* Special case: all symbols but one have 0 code length. */
if (offset[0] == 0) {
code = ConstructHuffmanCode(0, (uint16_t)sorted[0]);
for (key = 0; key < (brotli_reg_t)table_size; ++key) {
table[key] = code;
}
return;
}
/* Fill in table. */
key = 0;
key_step = BROTLI_REVERSE_BITS_LOWEST;
symbol = 0;
bits = 1;
step = 2;
do {
for (bits_count = count[bits]; bits_count != 0; --bits_count) {
code = ConstructHuffmanCode((uint8_t)bits, (uint16_t)sorted[symbol++]);
ReplicateValue(&table[BrotliReverseBits(key)], step, table_size, code);
key += key_step;
}
step <<= 1;
key_step >>= 1;
} while (++bits <= BROTLI_HUFFMAN_MAX_CODE_LENGTH_CODE_LENGTH);
}
uint32_t BrotliBuildHuffmanTable(HuffmanCode* root_table,
int root_bits,
const uint16_t* const symbol_lists,
uint16_t* count) {
HuffmanCode code; /* current table entry */
HuffmanCode* table; /* next available space in table */
int len; /* current code length */
int symbol; /* symbol index in original or sorted table */
brotli_reg_t key; /* prefix code */
brotli_reg_t key_step; /* prefix code addend */
brotli_reg_t sub_key; /* 2nd level table prefix code */
brotli_reg_t sub_key_step; /* 2nd level table prefix code addend */
int step; /* step size to replicate values in current table */
int table_bits; /* key length of current table */
int table_size; /* size of current table */
int total_size; /* sum of root table size and 2nd level table sizes */
int max_length = -1;
int bits;
int bits_count;
BROTLI_DCHECK(root_bits <= BROTLI_REVERSE_BITS_MAX);
BROTLI_DCHECK(BROTLI_HUFFMAN_MAX_CODE_LENGTH - root_bits <=
BROTLI_REVERSE_BITS_MAX);
while (symbol_lists[max_length] == 0xFFFF) max_length--;
max_length += BROTLI_HUFFMAN_MAX_CODE_LENGTH + 1;
table = root_table;
table_bits = root_bits;
table_size = 1 << table_bits;
total_size = table_size;
/* Fill in the root table. Reduce the table size to if possible,
and create the repetitions by memcpy. */
if (table_bits > max_length) {
table_bits = max_length;
table_size = 1 << table_bits;
}
key = 0;
key_step = BROTLI_REVERSE_BITS_LOWEST;
bits = 1;
step = 2;
do {
symbol = bits - (BROTLI_HUFFMAN_MAX_CODE_LENGTH + 1);
for (bits_count = count[bits]; bits_count != 0; --bits_count) {
symbol = symbol_lists[symbol];
code = ConstructHuffmanCode((uint8_t)bits, (uint16_t)symbol);
ReplicateValue(&table[BrotliReverseBits(key)], step, table_size, code);
key += key_step;
}
step <<= 1;
key_step >>= 1;
} while (++bits <= table_bits);
/* If root_bits != table_bits then replicate to fill the remaining slots. */
while (total_size != table_size) {
memcpy(&table[table_size], &table[0],
(size_t)table_size * sizeof(table[0]));
table_size <<= 1;
}
/* Fill in 2nd level tables and add pointers to root table. */
key_step = BROTLI_REVERSE_BITS_LOWEST >> (root_bits - 1);
sub_key = (BROTLI_REVERSE_BITS_LOWEST << 1);
sub_key_step = BROTLI_REVERSE_BITS_LOWEST;
for (len = root_bits + 1, step = 2; len <= max_length; ++len) {
symbol = len - (BROTLI_HUFFMAN_MAX_CODE_LENGTH + 1);
for (; count[len] != 0; --count[len]) {
if (sub_key == (BROTLI_REVERSE_BITS_LOWEST << 1U)) {
table += table_size;
table_bits = NextTableBitSize(count, len, root_bits);
table_size = 1 << table_bits;
total_size += table_size;
sub_key = BrotliReverseBits(key);
key += key_step;
root_table[sub_key] = ConstructHuffmanCode(
(uint8_t)(table_bits + root_bits),
(uint16_t)(((size_t)(table - root_table)) - sub_key));
sub_key = 0;
}
symbol = symbol_lists[symbol];
code = ConstructHuffmanCode((uint8_t)(len - root_bits), (uint16_t)symbol);
ReplicateValue(
&table[BrotliReverseBits(sub_key)], step, table_size, code);
sub_key += sub_key_step;
}
step <<= 1;
sub_key_step >>= 1;
}
return (uint32_t)total_size;
}
uint32_t BrotliBuildSimpleHuffmanTable(HuffmanCode* table,
int root_bits,
uint16_t* val,
uint32_t num_symbols) {
uint32_t table_size = 1;
const uint32_t goal_size = 1U << root_bits;
switch (num_symbols) {
case 0:
table[0] = ConstructHuffmanCode(0, val[0]);
break;
case 1:
if (val[1] > val[0]) {
table[0] = ConstructHuffmanCode(1, val[0]);
table[1] = ConstructHuffmanCode(1, val[1]);
} else {
table[0] = ConstructHuffmanCode(1, val[1]);
table[1] = ConstructHuffmanCode(1, val[0]);
}
table_size = 2;
break;
case 2:
table[0] = ConstructHuffmanCode(1, val[0]);
table[2] = ConstructHuffmanCode(1, val[0]);
if (val[2] > val[1]) {
table[1] = ConstructHuffmanCode(2, val[1]);
table[3] = ConstructHuffmanCode(2, val[2]);
} else {
table[1] = ConstructHuffmanCode(2, val[2]);
table[3] = ConstructHuffmanCode(2, val[1]);
}
table_size = 4;
break;
case 3: {
int i, k;
for (i = 0; i < 3; ++i) {
for (k = i + 1; k < 4; ++k) {
if (val[k] < val[i]) {
uint16_t t = val[k];
val[k] = val[i];
val[i] = t;
}
}
}
table[0] = ConstructHuffmanCode(2, val[0]);
table[2] = ConstructHuffmanCode(2, val[1]);
table[1] = ConstructHuffmanCode(2, val[2]);
table[3] = ConstructHuffmanCode(2, val[3]);
table_size = 4;
break;
}
case 4: {
if (val[3] < val[2]) {
uint16_t t = val[3];
val[3] = val[2];
val[2] = t;
}
table[0] = ConstructHuffmanCode(1, val[0]);
table[1] = ConstructHuffmanCode(2, val[1]);
table[2] = ConstructHuffmanCode(1, val[0]);
table[3] = ConstructHuffmanCode(3, val[2]);
table[4] = ConstructHuffmanCode(1, val[0]);
table[5] = ConstructHuffmanCode(2, val[1]);
table[6] = ConstructHuffmanCode(1, val[0]);
table[7] = ConstructHuffmanCode(3, val[3]);
table_size = 8;
break;
}
}
while (table_size != goal_size) {
memcpy(&table[table_size], &table[0],
(size_t)table_size * sizeof(table[0]));
table_size <<= 1;
}
return goal_size;
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
|
c
|
github
|
https://github.com/nodejs/node
|
deps/brotli/c/dec/huffman.c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.