code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
"""Tests for the Heos config flow module.""" from urllib.parse import urlparse from pyheos import HeosError from homeassistant import data_entry_flow from homeassistant.components import ssdp from homeassistant.components.heos.config_flow import HeosFlowHandler from homeassistant.components.heos.const import DATA_DISCOVERED_HOSTS, DOMAIN from homeassistant.const import CONF_HOST async def test_flow_aborts_already_setup(hass, config_entry): """Test flow aborts when entry already setup.""" config_entry.add_to_hass(hass) flow = HeosFlowHandler() flow.hass = hass result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_no_host_shows_form(hass): """Test form is shown when host not provided.""" flow = HeosFlowHandler() flow.hass = hass result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert result["errors"] == {} async def test_cannot_connect_shows_error_form(hass, controller): """Test form is shown with error when cannot connect.""" flow = HeosFlowHandler() flow.hass = hass controller.connect.side_effect = HeosError() result = await flow.async_step_user({CONF_HOST: "127.0.0.1"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert result["errors"][CONF_HOST] == "connection_failure" assert controller.connect.call_count == 1 assert controller.disconnect.call_count == 1 controller.connect.reset_mock() controller.disconnect.reset_mock() async def test_create_entry_when_host_valid(hass, controller): """Test result type is create entry when host is valid.""" flow = HeosFlowHandler() flow.hass = hass data = {CONF_HOST: "127.0.0.1"} result = await flow.async_step_user(data) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Controller (127.0.0.1)" assert result["data"] == data assert controller.connect.call_count == 1 assert controller.disconnect.call_count == 1 async def test_create_entry_when_friendly_name_valid(hass, controller): """Test result type is create entry when friendly name is valid.""" hass.data[DATA_DISCOVERED_HOSTS] = {"Office (127.0.0.1)": "127.0.0.1"} flow = HeosFlowHandler() flow.hass = hass data = {CONF_HOST: "Office (127.0.0.1)"} result = await flow.async_step_user(data) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Controller (127.0.0.1)" assert result["data"] == {CONF_HOST: "127.0.0.1"} assert controller.connect.call_count == 1 assert controller.disconnect.call_count == 1 assert DATA_DISCOVERED_HOSTS not in hass.data async def test_discovery_shows_create_form(hass, controller, discovery_data): """Test discovery shows form to confirm setup and subsequent abort.""" await hass.config_entries.flow.async_init( DOMAIN, context={"source": "ssdp"}, data=discovery_data ) await hass.async_block_till_done() assert len(hass.config_entries.flow.async_progress()) == 1 assert hass.data[DATA_DISCOVERED_HOSTS] == {"Office (127.0.0.1)": "127.0.0.1"} port = urlparse(discovery_data[ssdp.ATTR_SSDP_LOCATION]).port discovery_data[ssdp.ATTR_SSDP_LOCATION] = f"http://127.0.0.2:{port}/" discovery_data[ssdp.ATTR_UPNP_FRIENDLY_NAME] = "Bedroom" await hass.config_entries.flow.async_init( DOMAIN, context={"source": "ssdp"}, data=discovery_data ) await hass.async_block_till_done() assert len(hass.config_entries.flow.async_progress()) == 1 assert hass.data[DATA_DISCOVERED_HOSTS] == { "Office (127.0.0.1)": "127.0.0.1", "Bedroom (127.0.0.2)": "127.0.0.2", } async def test_disovery_flow_aborts_already_setup( hass, controller, discovery_data, config_entry ): """Test discovery flow aborts when entry already setup.""" config_entry.add_to_hass(hass) flow = HeosFlowHandler() flow.hass = hass result = await flow.async_step_ssdp(discovery_data) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup"
unknown
codeparrot/codeparrot-clean
from LogAnalyzer import Test,TestResult import DataflashLog # import scipy # import pylab #### TEMP!!! only for dev # from scipy import signal class TestDualGyroDrift(Test): '''test for gyro drift between dual IMU data''' def __init__(self): Test.__init__(self) self.name = "Gyro Drift" self.enable = False def run(self, logdata, verbose): self.result = TestResult() self.result.status = TestResult.StatusType.GOOD # if "IMU" not in logdata.channels or "IMU2" not in logdata.channels: # self.result.status = TestResult.StatusType.NA # return # imuX = logdata.channels["IMU"]["GyrX"].listData # imu2X = logdata.channels["IMU2"]["GyrX"].listData # # NOTE: weird thing about Holger's log is that the counts of IMU+IMU2 are different # print "length 1: %.2f, length 2: %.2f" % (len(imuX),len(imu2X)) # #assert(len(imuX) == len(imu2X)) # # divide the curve into segments and get the average of each segment # # we will get the diff between those averages, rather than a per-sample diff as the IMU+IMU2 arrays are often not the same length # diffThresholdWARN = 0.03 # diffThresholdFAIL = 0.05 # nSamples = 10 # imu1XAverages, imu1YAverages, imu1ZAverages, imu2XAverages, imu2YAverages, imu2ZAverages = ([],[],[],[],[],[]) # imuXDiffAverages, imuYDiffAverages, imuZDiffAverages = ([],[],[]) # maxDiffX, maxDiffY, maxDiffZ = (0,0,0) # sliceLength1 = len(logdata.channels["IMU"]["GyrX"].dictData.values()) / nSamples # sliceLength2 = len(logdata.channels["IMU2"]["GyrX"].dictData.values()) / nSamples # for i in range(0,nSamples): # imu1XAverages.append(numpy.mean(logdata.channels["IMU"]["GyrX"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1])) # imu1YAverages.append(numpy.mean(logdata.channels["IMU"]["GyrY"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1])) # imu1ZAverages.append(numpy.mean(logdata.channels["IMU"]["GyrZ"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1])) # imu2XAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrX"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2])) # imu2YAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrY"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2])) # imu2ZAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrZ"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2])) # imuXDiffAverages.append(imu2XAverages[-1]-imu1XAverages[-1]) # imuYDiffAverages.append(imu2YAverages[-1]-imu1YAverages[-1]) # imuZDiffAverages.append(imu2ZAverages[-1]-imu1ZAverages[-1]) # if abs(imuXDiffAverages[-1]) > maxDiffX: # maxDiffX = imuXDiffAverages[-1] # if abs(imuYDiffAverages[-1]) > maxDiffY: # maxDiffY = imuYDiffAverages[-1] # if abs(imuZDiffAverages[-1]) > maxDiffZ: # maxDiffZ = imuZDiffAverages[-1] # if max(maxDiffX,maxDiffY,maxDiffZ) > diffThresholdFAIL: # self.result.status = TestResult.StatusType.FAIL # self.result.statusMessage = "IMU/IMU2 gyro averages differ by more than %s radians" % diffThresholdFAIL # elif max(maxDiffX,maxDiffY,maxDiffZ) > diffThresholdWARN: # self.result.status = TestResult.StatusType.WARN # self.result.statusMessage = "IMU/IMU2 gyro averages differ by more than %s radians" % diffThresholdWARN # # pylab.plot(zip(*imuX)[0], zip(*imuX)[1], 'g') # # pylab.plot(zip(*imu2X)[0], zip(*imu2X)[1], 'r') # #pylab.plot(range(0,(nSamples*sliceLength1),sliceLength1), imu1ZAverages, 'b') # print "Gyro averages1X: " + `imu1XAverages` # print "Gyro averages1Y: " + `imu1YAverages` # print "Gyro averages1Z: " + `imu1ZAverages` + "\n" # print "Gyro averages2X: " + `imu2XAverages` # print "Gyro averages2Y: " + `imu2YAverages` # print "Gyro averages2Z: " + `imu2ZAverages` + "\n" # print "Gyro averages diff X: " + `imuXDiffAverages` # print "Gyro averages diff Y: " + `imuYDiffAverages` # print "Gyro averages diff Z: " + `imuZDiffAverages` # # lowpass filter using numpy # # cutoff = 100 # # fs = 10000.0 # # b,a = scipy.signal.filter_design.butter(5,cutoff/(fs/2)) # # imuXFiltered = scipy.signal.filtfilt(b,a,zip(*imuX)[1]) # # imu2XFiltered = scipy.signal.filtfilt(b,a,zip(*imu2X)[1]) # #pylab.plot(imuXFiltered, 'r') # # TMP: DISPLAY BEFORE+AFTER plots # pylab.show() # # print "imuX average before lowpass filter: %.8f" % logdata.channels["IMU"]["GyrX"].avg() # # print "imuX average after lowpass filter: %.8f" % numpy.mean(imuXFiltered) # # print "imu2X average before lowpass filter: %.8f" % logdata.channels["IMU2"]["GyrX"].avg() # # print "imu2X average after lowpass filter: %.8f" % numpy.mean(imu2XFiltered) # avg1X = logdata.channels["IMU"]["GyrX"].avg() # avg1Y = logdata.channels["IMU"]["GyrY"].avg() # avg1Z = logdata.channels["IMU"]["GyrZ"].avg() # avg2X = logdata.channels["IMU2"]["GyrX"].avg() # avg2Y = logdata.channels["IMU2"]["GyrY"].avg() # avg2Z = logdata.channels["IMU2"]["GyrZ"].avg() # avgRatioX = (max(avg1X,avg2X) - min(avg1X,avg2X)) / #abs(max(avg1X,avg2X) / min(avg1X,avg2X)) # avgRatioY = abs(max(avg1Y,avg2Y) / min(avg1Y,avg2Y)) # avgRatioZ = abs(max(avg1Z,avg2Z) / min(avg1Z,avg2Z)) # self.result.statusMessage = "IMU gyro avg: %.4f,%.4f,%.4f\nIMU2 gyro avg: %.4f,%.4f,%.4f\nAvg ratio: %.4f,%.4f,%.4f" % (avg1X,avg1Y,avg1Z, avg2X,avg2Y,avg2Z, avgRatioX,avgRatioY,avgRatioZ)
unknown
codeparrot/codeparrot-clean
package convert import ( gogotypes "github.com/gogo/protobuf/types" swarmtypes "github.com/moby/moby/api/types/swarm" swarmapi "github.com/moby/swarmkit/v2/api" ) // SecretFromGRPC converts a grpc Secret to a Secret. func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { secret := swarmtypes.Secret{ ID: s.ID, Spec: swarmtypes.SecretSpec{ Annotations: annotationsFromGRPC(s.Spec.Annotations), Data: s.Spec.Data, Driver: driverFromGRPC(s.Spec.Driver), }, } secret.Version.Index = s.Meta.Version.Index // Meta secret.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) secret.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) if s.Spec.Templating != nil { secret.Spec.Templating = &swarmtypes.Driver{ Name: s.Spec.Templating.Name, Options: s.Spec.Templating.Options, } } return secret } // SecretSpecToGRPC converts Secret to a grpc Secret. func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { spec := swarmapi.SecretSpec{ Annotations: swarmapi.Annotations{ Name: s.Name, Labels: s.Labels, }, Data: s.Data, Driver: driverToGRPC(s.Driver), } if s.Templating != nil { spec.Templating = &swarmapi.Driver{ Name: s.Templating.Name, Options: s.Templating.Options, } } return spec } // SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference { refs := []*swarmtypes.SecretReference{} for _, r := range s { ref := &swarmtypes.SecretReference{ SecretID: r.SecretID, SecretName: r.SecretName, } if t, ok := r.Target.(*swarmapi.SecretReference_File); ok { ref.File = &swarmtypes.SecretReferenceFileTarget{ Name: t.File.Name, UID: t.File.UID, GID: t.File.GID, Mode: t.File.Mode, } } refs = append(refs, ref) } return refs }
go
github
https://github.com/moby/moby
daemon/cluster/convert/secret.go
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = """ module: podman_image_info author: - Sam Doran (@samdoran) version_added: '2.8' short_description: Gather info about images using podman notes: - Podman may required elevated privileges in order to run properly. description: - Gather info about images using C(podman) options: executable: description: - Path to C(podman) executable if it is not in the C($PATH) on the machine running C(podman) default: 'podman' type: str name: description: - List of tags or UID to gather info about. If no name is given return info about all images. """ EXAMPLES = """ - name: Gather info for all images podman_image_info: - name: Gather info on a specific image podman_image_info: name: nginx - name: Gather info on several images podman_image_info: name: - redis - quay.io/bitnami/wildfly """ RETURN = """ images: description: info from all or specified images returned: always type: dict sample: [ { "Annotations": {}, "Architecture": "amd64", "Author": "", "Comment": "from Bitnami with love", "ContainerConfig": { "Cmd": [ "nami", "start", "--foreground", "wildfly" ], "Entrypoint": [ "/app-entrypoint.sh" ], "Env": [ "PATH=/opt/bitnami/java/bin:/opt/bitnami/wildfly/bin:/opt/bitnami/nami/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "IMAGE_OS=debian-9", "NAMI_VERSION=0.0.9-0", "GPG_KEY_SERVERS_LIST=ha.pool.sks-keyservers.net \ hkp://p80.pool.sks-keyservers.net:80 keyserver.ubuntu.com hkp://keyserver.ubuntu.com:80 pgp.mit.edu", "TINI_VERSION=v0.13.2", "TINI_GPG_KEY=595E85A6B1B4779EA4DAAEC70B588DFF0527A9B7", "GOSU_VERSION=1.10", "GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4", "BITNAMI_IMAGE_VERSION=14.0.1-debian-9-r12", "BITNAMI_APP_NAME=wildfly", "WILDFLY_JAVA_HOME=", "WILDFLY_JAVA_OPTS=", "WILDFLY_MANAGEMENT_HTTP_PORT_NUMBER=9990", "WILDFLY_PASSWORD=bitnami", "WILDFLY_PUBLIC_CONSOLE=true", "WILDFLY_SERVER_AJP_PORT_NUMBER=8009", "WILDFLY_SERVER_HTTP_PORT_NUMBER=8080", "WILDFLY_SERVER_INTERFACE=0.0.0.0", "WILDFLY_USERNAME=user", "WILDFLY_WILDFLY_HOME=/home/wildfly", "WILDFLY_WILDFLY_OPTS=-Dwildfly.as.deployment.ondemand=false" ], "ExposedPorts": { "8080/tcp": {}, "9990/tcp": {} }, "Labels": { "maintainer": "Bitnami <containers@bitnami.com>" } }, "Created": "2018-09-25T04:07:45.934395523Z", "Digest": "sha256:5c7d8e2dd66dcf4a152a4032a1d3c5a33458c67e1c1335edd8d18d738892356b", "GraphDriver": { "Data": { "LowerDir": "/var/lib/containers/storage/overlay/a9dbf5616cc16919a8ac0dfc60aff87a72b5be52994c4649fcc91a089a12931\ f/diff:/var/lib/containers/storage/overlay/67129bd46022122a7d8b7acb490092af6c7ce244ce4fbd7d9e2d2b7f5979e090/diff:/var/lib/containers/storage/overlay/7c51242c\ 4c5db5c74afda76d7fdbeab6965d8b21804bb3fc597dee09c770b0ca/diff:/var/lib/containers/storage/overlay/f97315dc58a9c002ba0cabccb9933d4b0d2113733d204188c88d72f75569b57b/diff:/var/lib/containers/storage/overlay/1dbde2dd497ddde2b467727125b900958a051a72561e58d29abe3d660dcaa9a7/diff:/var/lib/containers/storage/overlay/4aad9d80f30c3f0608f58173558b7554d84dee4dc4479672926eca29f75e6e33/diff:/var/lib/containers/storage/overlay/6751fc9b6868254870c062d75a511543fc8cfda2ce6262f4945f107449219632/diff:/var/lib/containers/storage/overlay/a27034d79081347421dd24d7e9e776c18271cd9a6e51053cb39af4d3d9c400e8/diff:/var/lib/containers/storage/overlay/537cf0045ed9cd7989f7944e7393019c81b16c1799a2198d8348cd182665397f/diff:/var/lib/containers/storage/overlay/27578615c5ae352af4e8449862d61aaf5c11b105a7d5905af55bd01b0c656d6e/diff:/var/lib/containers/storage/overlay/566542742840fe3034b3596f7cb9e62a6274c95a69f368f9e713746f8712c0b6/diff", "MergedDir": "/var/lib/containers/storage/overlay/72bb96d6\ c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/merged", "UpperDir": "/var/lib/containers/storage/overlay/72bb96d6c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/diff", "WorkDir": "/var/lib/containers/storage/overlay/72bb96d6c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/work" }, "Name": "overlay" }, "Id": "bcacbdf7a119c0fa934661ca8af839e625ce6540d9ceb6827cdd389f823d49e0", "Labels": { "maintainer": "Bitnami <containers@bitnami.com>" }, "ManifestType": "application/vnd.docker.distribution.manifest.v1+prettyjws", "Os": "linux", "Parent": "", "RepoDigests": [ "quay.io/bitnami/wildfly@sha256:5c7d8e2dd66dcf4a152a4032a1d3c5a33458c67e1c1335edd8d18d738892356b" ], "RepoTags": [ "quay.io/bitnami/wildfly:latest" ], "RootFS": { "Layers": [ "sha256:75391df2c87e076b0c2f72d20c95c57dc8be7ee684cc07273416cce622b43367", "sha256:7dd303f041039bfe8f0833092673ac35f93137d10e0fbc4302021ea65ad57731", "sha256:720d9edf0cd2a9bb56b88b80be9070dbfaad359514c70094c65066963fed485d", "sha256:6a567ecbf97725501a634fcb486271999aa4591b633b4ae9932a46b40f5aaf47", "sha256:59e9a6db8f178f3da868614564faabb2820cdfb69be32e63a4405d6f7772f68c", "sha256:310a82ccb092cd650215ab375da8943d235a263af9a029b8ac26a281446c04db", "sha256:36cb91cf4513543a8f0953fed785747ea18b675bc2677f3839889cfca0aac79e" ], "Type": "layers" }, "Size": 569919342, "User": "", "Version": "17.06.0-ce", "VirtualSize": 569919342 } ] """ import json from ansible.module_utils.basic import AnsibleModule def image_exists(module, executable, name): command = [executable, 'image', 'exists', name] rc, out, err = module.run_command(command) if rc == 1: return False elif 'Command "exists" not found' in err: # The 'exists' test is available in podman >= 0.12.1 command = [executable, 'image', 'ls', '-q', name] rc2, out2, err2 = module.run_command(command) if rc2 != 0: return False return True def filter_invalid_names(module, executable, name): valid_names = [] names = name if not isinstance(name, list): names = [name] for name in names: if image_exists(module, executable, name): valid_names.append(name) return valid_names def get_image_info(module, executable, name): names = name if not isinstance(name, list): names = [name] if len(names) > 0: command = [executable, 'image', 'inspect'] command.extend(names) rc, out, err = module.run_command(command) if rc != 0: module.fail_json(msg="Unable to gather info for '{0}': {1}".format(', '.join(names), err)) return out else: return json.dumps([]) def get_all_image_info(module, executable): command = [executable, 'image', 'ls', '-q'] rc, out, err = module.run_command(command) name = out.strip().split('\n') out = get_image_info(module, executable, name) return out def main(): module = AnsibleModule( argument_spec=dict( executable=dict(type='str', default='podman'), name=dict(type='list') ), supports_check_mode=True, ) executable = module.params['executable'] name = module.params.get('name') executable = module.get_bin_path(executable, required=True) if name: valid_names = filter_invalid_names(module, executable, name) results = json.loads(get_image_info(module, executable, valid_names)) else: results = json.loads(get_all_image_info(module, executable)) results = dict( changed=False, images=results ) module.exit_json(**results) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package releaseauth import ( "bytes" "crypto/sha256" "encoding/hex" "errors" "fmt" "log" ) // SHA256Hash represents a 256-bit SHA hash type SHA256Hash [sha256.Size]byte // ErrInvalidSHA256Hash is returned when the hash is invalid var ErrInvalidSHA256Hash = errors.New("the value was not a valid SHA-256 hash") // SHA256FromHex decodes a SHA256Hash from a hex string dump func SHA256FromHex(hashHex string) (SHA256Hash, error) { var result [sha256.Size]byte hash, err := hex.DecodeString(hashHex) if err != nil || len(hash) != sha256.Size { return result, ErrInvalidSHA256Hash } if copy(result[:], hash) != sha256.Size { panic("could not copy hash value") } return result, nil } // SHA256Checksums decodes a file generated by the sha256sum program type SHA256Checksums map[string]SHA256Hash func ParseChecksums(data []byte) (SHA256Checksums, error) { items := bytes.Split(data, []byte("\n")) result := make(map[string]SHA256Hash, len(items)) for _, line := range items { parts := bytes.SplitN(line, []byte(" "), 2) if len(parts) != 2 { break } log.Printf("[TRACE] parsing SHA256SUMS %q = %q", parts[0], parts[1]) hash, err := SHA256FromHex(string(parts[0])) if err != nil { return result, fmt.Errorf("failed to parse checksums: %w", err) } result[string(parts[1])] = hash } return result, nil } // Validate retrieves a SHA256Hash for the a filename and compares it // to the specified hash. Validate returns an error if the hash is not found // or if it does not match. func (c SHA256Checksums) Validate(filename string, hash SHA256Hash) error { sum, ok := c[filename] if !ok { return fmt.Errorf("no checksum found for filename %q", filename) } if sum != hash { return fmt.Errorf("checksums do not match") } return nil }
go
github
https://github.com/hashicorp/terraform
internal/releaseauth/hash.go
# $Id: states.py 6141 2009-09-25 18:50:30Z milde $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ This is the ``docutils.parsers.restructuredtext.states`` module, the core of the reStructuredText parser. It defines the following: :Classes: - `RSTStateMachine`: reStructuredText parser's entry point. - `NestedStateMachine`: recursive StateMachine. - `RSTState`: reStructuredText State superclass. - `Inliner`: For parsing inline markup. - `Body`: Generic classifier of the first line of a block. - `SpecializedBody`: Superclass for compound element members. - `BulletList`: Second and subsequent bullet_list list_items - `DefinitionList`: Second+ definition_list_items. - `EnumeratedList`: Second+ enumerated_list list_items. - `FieldList`: Second+ fields. - `OptionList`: Second+ option_list_items. - `RFC2822List`: Second+ RFC2822-style fields. - `ExtensionOptions`: Parses directive option fields. - `Explicit`: Second+ explicit markup constructs. - `SubstitutionDef`: For embedded directives in substitution definitions. - `Text`: Classifier of second line of a text block. - `SpecializedText`: Superclass for continuation lines of Text-variants. - `Definition`: Second line of potential definition_list_item. - `Line`: Second line of overlined section title or transition marker. - `Struct`: An auxiliary collection class. :Exception classes: - `MarkupError` - `ParserError` - `MarkupMismatch` :Functions: - `escape2null()`: Return a string, escape-backslashes converted to nulls. - `unescape()`: Return a string, nulls removed or restored to backslashes. :Attributes: - `state_classes`: set of State classes used with `RSTStateMachine`. Parser Overview =============== The reStructuredText parser is implemented as a recursive state machine, examining its input one line at a time. To understand how the parser works, please first become familiar with the `docutils.statemachine` module. In the description below, references are made to classes defined in this module; please see the individual classes for details. Parsing proceeds as follows: 1. The state machine examines each line of input, checking each of the transition patterns of the state `Body`, in order, looking for a match. The implicit transitions (blank lines and indentation) are checked before any others. The 'text' transition is a catch-all (matches anything). 2. The method associated with the matched transition pattern is called. A. Some transition methods are self-contained, appending elements to the document tree (`Body.doctest` parses a doctest block). The parser's current line index is advanced to the end of the element, and parsing continues with step 1. B. Other transition methods trigger the creation of a nested state machine, whose job is to parse a compound construct ('indent' does a block quote, 'bullet' does a bullet list, 'overline' does a section [first checking for a valid section header], etc.). - In the case of lists and explicit markup, a one-off state machine is created and run to parse contents of the first item. - A new state machine is created and its initial state is set to the appropriate specialized state (`BulletList` in the case of the 'bullet' transition; see `SpecializedBody` for more detail). This state machine is run to parse the compound element (or series of explicit markup elements), and returns as soon as a non-member element is encountered. For example, the `BulletList` state machine ends as soon as it encounters an element which is not a list item of that bullet list. The optional omission of inter-element blank lines is enabled by this nested state machine. - The current line index is advanced to the end of the elements parsed, and parsing continues with step 1. C. The result of the 'text' transition depends on the next line of text. The current state is changed to `Text`, under which the second line is examined. If the second line is: - Indented: The element is a definition list item, and parsing proceeds similarly to step 2.B, using the `DefinitionList` state. - A line of uniform punctuation characters: The element is a section header; again, parsing proceeds as in step 2.B, and `Body` is still used. - Anything else: The element is a paragraph, which is examined for inline markup and appended to the parent element. Processing continues with step 1. """ __docformat__ = 'reStructuredText' import sys import re import roman from types import FunctionType, MethodType from docutils import nodes, statemachine, utils, urischemes from docutils import ApplicationError, DataError from docutils.statemachine import StateMachineWS, StateWS from docutils.nodes import fully_normalize_name as normalize_name from docutils.nodes import whitespace_normalize_name from docutils.utils import escape2null, unescape, column_width import docutils.parsers.rst from docutils.parsers.rst import directives, languages, tableparser, roles from docutils.parsers.rst.languages import en as _fallback_language_module class MarkupError(DataError): pass class UnknownInterpretedRoleError(DataError): pass class InterpretedRoleNotImplementedError(DataError): pass class ParserError(ApplicationError): pass class MarkupMismatch(Exception): pass class Struct: """Stores data attributes for dotted-attribute access.""" def __init__(self, **keywordargs): self.__dict__.update(keywordargs) class RSTStateMachine(StateMachineWS): """ reStructuredText's master StateMachine. The entry point to reStructuredText parsing is the `run()` method. """ def run(self, input_lines, document, input_offset=0, match_titles=1, inliner=None): """ Parse `input_lines` and modify the `document` node in place. Extend `StateMachineWS.run()`: set up parse-global data and run the StateMachine. """ self.language = languages.get_language( document.settings.language_code) self.match_titles = match_titles if inliner is None: inliner = Inliner() inliner.init_customizations(document.settings) self.memo = Struct(document=document, reporter=document.reporter, language=self.language, title_styles=[], section_level=0, section_bubble_up_kludge=0, inliner=inliner) self.document = document self.attach_observer(document.note_source) self.reporter = self.memo.reporter self.node = document results = StateMachineWS.run(self, input_lines, input_offset, input_source=document['source']) assert results == [], 'RSTStateMachine.run() results should be empty!' self.node = self.memo = None # remove unneeded references class NestedStateMachine(StateMachineWS): """ StateMachine run from within other StateMachine runs, to parse nested document structures. """ def run(self, input_lines, input_offset, memo, node, match_titles=1): """ Parse `input_lines` and populate a `docutils.nodes.document` instance. Extend `StateMachineWS.run()`: set up document-wide data. """ self.match_titles = match_titles self.memo = memo self.document = memo.document self.attach_observer(self.document.note_source) self.reporter = memo.reporter self.language = memo.language self.node = node results = StateMachineWS.run(self, input_lines, input_offset) assert results == [], ('NestedStateMachine.run() results should be ' 'empty!') return results class RSTState(StateWS): """ reStructuredText State superclass. Contains methods used by all State subclasses. """ nested_sm = NestedStateMachine nested_sm_cache = [] def __init__(self, state_machine, debug=0): self.nested_sm_kwargs = {'state_classes': state_classes, 'initial_state': 'Body'} StateWS.__init__(self, state_machine, debug) def runtime_init(self): StateWS.runtime_init(self) memo = self.state_machine.memo self.memo = memo self.reporter = memo.reporter self.inliner = memo.inliner self.document = memo.document self.parent = self.state_machine.node def goto_line(self, abs_line_offset): """ Jump to input line `abs_line_offset`, ignoring jumps past the end. """ try: self.state_machine.goto_line(abs_line_offset) except EOFError: pass def no_match(self, context, transitions): """ Override `StateWS.no_match` to generate a system message. This code should never be run. """ self.reporter.severe( 'Internal error: no transition pattern match. State: "%s"; ' 'transitions: %s; context: %s; current line: %r.' % (self.__class__.__name__, transitions, context, self.state_machine.line), line=self.state_machine.abs_line_number()) return context, None, [] def bof(self, context): """Called at beginning of file.""" return [], [] def nested_parse(self, block, input_offset, node, match_titles=0, state_machine_class=None, state_machine_kwargs=None): """ Create a new StateMachine rooted at `node` and run it over the input `block`. """ use_default = 0 if state_machine_class is None: state_machine_class = self.nested_sm use_default += 1 if state_machine_kwargs is None: state_machine_kwargs = self.nested_sm_kwargs use_default += 1 block_length = len(block) state_machine = None if use_default == 2: try: state_machine = self.nested_sm_cache.pop() except IndexError: pass if not state_machine: state_machine = state_machine_class(debug=self.debug, **state_machine_kwargs) state_machine.run(block, input_offset, memo=self.memo, node=node, match_titles=match_titles) if use_default == 2: self.nested_sm_cache.append(state_machine) else: state_machine.unlink() new_offset = state_machine.abs_line_offset() # No `block.parent` implies disconnected -- lines aren't in sync: if block.parent and (len(block) - block_length) != 0: # Adjustment for block if modified in nested parse: self.state_machine.next_line(len(block) - block_length) return new_offset def nested_list_parse(self, block, input_offset, node, initial_state, blank_finish, blank_finish_state=None, extra_settings={}, match_titles=0, state_machine_class=None, state_machine_kwargs=None): """ Create a new StateMachine rooted at `node` and run it over the input `block`. Also keep track of optional intermediate blank lines and the required final one. """ if state_machine_class is None: state_machine_class = self.nested_sm if state_machine_kwargs is None: state_machine_kwargs = self.nested_sm_kwargs.copy() state_machine_kwargs['initial_state'] = initial_state state_machine = state_machine_class(debug=self.debug, **state_machine_kwargs) if blank_finish_state is None: blank_finish_state = initial_state state_machine.states[blank_finish_state].blank_finish = blank_finish for key, value in extra_settings.items(): setattr(state_machine.states[initial_state], key, value) state_machine.run(block, input_offset, memo=self.memo, node=node, match_titles=match_titles) blank_finish = state_machine.states[blank_finish_state].blank_finish state_machine.unlink() return state_machine.abs_line_offset(), blank_finish def section(self, title, source, style, lineno, messages): """Check for a valid subsection and create one if it checks out.""" if self.check_subsection(source, style, lineno): self.new_subsection(title, lineno, messages) def check_subsection(self, source, style, lineno): """ Check for a valid subsection header. Return 1 (true) or None (false). When a new section is reached that isn't a subsection of the current section, back up the line count (use ``previous_line(-x)``), then ``raise EOFError``. The current StateMachine will finish, then the calling StateMachine can re-examine the title. This will work its way back up the calling chain until the correct section level isreached. @@@ Alternative: Evaluate the title, store the title info & level, and back up the chain until that level is reached. Store in memo? Or return in results? :Exception: `EOFError` when a sibling or supersection encountered. """ memo = self.memo title_styles = memo.title_styles mylevel = memo.section_level try: # check for existing title style level = title_styles.index(style) + 1 except ValueError: # new title style if len(title_styles) == memo.section_level: # new subsection title_styles.append(style) return 1 else: # not at lowest level self.parent += self.title_inconsistent(source, lineno) return None if level <= mylevel: # sibling or supersection memo.section_level = level # bubble up to parent section if len(style) == 2: memo.section_bubble_up_kludge = 1 # back up 2 lines for underline title, 3 for overline title self.state_machine.previous_line(len(style) + 1) raise EOFError # let parent section re-evaluate if level == mylevel + 1: # immediate subsection return 1 else: # invalid subsection self.parent += self.title_inconsistent(source, lineno) return None def title_inconsistent(self, sourcetext, lineno): error = self.reporter.severe( 'Title level inconsistent:', nodes.literal_block('', sourcetext), line=lineno) return error def new_subsection(self, title, lineno, messages): """Append new subsection to document tree. On return, check level.""" memo = self.memo mylevel = memo.section_level memo.section_level += 1 section_node = nodes.section() self.parent += section_node textnodes, title_messages = self.inline_text(title, lineno) titlenode = nodes.title(title, '', *textnodes) name = normalize_name(titlenode.astext()) section_node['names'].append(name) section_node += titlenode section_node += messages section_node += title_messages self.document.note_implicit_target(section_node, section_node) offset = self.state_machine.line_offset + 1 absoffset = self.state_machine.abs_line_offset() + 1 newabsoffset = self.nested_parse( self.state_machine.input_lines[offset:], input_offset=absoffset, node=section_node, match_titles=1) self.goto_line(newabsoffset) if memo.section_level <= mylevel: # can't handle next section? raise EOFError # bubble up to supersection # reset section_level; next pass will detect it properly memo.section_level = mylevel def paragraph(self, lines, lineno): """ Return a list (paragraph & messages) & a boolean: literal_block next? """ data = '\n'.join(lines).rstrip() if re.search(r'(?<!\\)(\\\\)*::$', data): if len(data) == 2: return [], 1 elif data[-3] in ' \n': text = data[:-3].rstrip() else: text = data[:-1] literalnext = 1 else: text = data literalnext = 0 textnodes, messages = self.inline_text(text, lineno) p = nodes.paragraph(data, '', *textnodes) p.line = lineno return [p] + messages, literalnext def inline_text(self, text, lineno): """ Return 2 lists: nodes (text and inline elements), and system_messages. """ return self.inliner.parse(text, lineno, self.memo, self.parent) def unindent_warning(self, node_name): return self.reporter.warning( '%s ends without a blank line; unexpected unindent.' % node_name, line=(self.state_machine.abs_line_number() + 1)) def build_regexp(definition, compile=1): """ Build, compile and return a regular expression based on `definition`. :Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts), where "parts" is a list of regular expressions and/or regular expression definitions to be joined into an or-group. """ name, prefix, suffix, parts = definition part_strings = [] for part in parts: if type(part) is tuple: part_strings.append(build_regexp(part, None)) else: part_strings.append(part) or_group = '|'.join(part_strings) regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals() if compile: return re.compile(regexp, re.UNICODE) else: return regexp class Inliner: """ Parse inline markup; call the `parse()` method. """ def __init__(self): self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),] """List of (pattern, bound method) tuples, used by `self.implicit_inline`.""" def init_customizations(self, settings): """Setting-based customizations; run when parsing begins.""" if settings.pep_references: self.implicit_dispatch.append((self.patterns.pep, self.pep_reference)) if settings.rfc_references: self.implicit_dispatch.append((self.patterns.rfc, self.rfc_reference)) def parse(self, text, lineno, memo, parent): # Needs to be refactored for nested inline markup. # Add nested_parse() method? """ Return 2 lists: nodes (text and inline elements), and system_messages. Using `self.patterns.initial`, a pattern which matches start-strings (emphasis, strong, interpreted, phrase reference, literal, substitution reference, and inline target) and complete constructs (simple reference, footnote reference), search for a candidate. When one is found, check for validity (e.g., not a quoted '*' character). If valid, search for the corresponding end string if applicable, and check it for validity. If not found or invalid, generate a warning and ignore the start-string. Implicit inline markup (e.g. standalone URIs) is found last. """ self.reporter = memo.reporter self.document = memo.document self.language = memo.language self.parent = parent pattern_search = self.patterns.initial.search dispatch = self.dispatch remaining = escape2null(text) processed = [] unprocessed = [] messages = [] while remaining: match = pattern_search(remaining) if match: groups = match.groupdict() method = dispatch[groups['start'] or groups['backquote'] or groups['refend'] or groups['fnend']] before, inlines, remaining, sysmessages = method(self, match, lineno) unprocessed.append(before) messages += sysmessages if inlines: processed += self.implicit_inline(''.join(unprocessed), lineno) processed += inlines unprocessed = [] else: break remaining = ''.join(unprocessed) + remaining if remaining: processed += self.implicit_inline(remaining, lineno) return processed, messages openers = u'\'"([{<\u2018\u201c\xab\u00a1\u00bf' # see quoted_start below closers = u'\'")]}>\u2019\u201d\xbb!?' unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0' start_string_prefix = (u'((?<=^)|(?<=[-/: \\n\u2019%s%s]))' % (re.escape(unicode_delimiters), re.escape(openers))) end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))' % (re.escape(unicode_delimiters), re.escape(closers))) non_whitespace_before = r'(?<![ \n])' non_whitespace_escape_before = r'(?<![ \n\x00])' non_whitespace_after = r'(?![ \n])' # Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together): simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*' # Valid URI characters (see RFC 2396 & RFC 2732); # final \x00 allows backslash escapes in URIs: uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]""" # Delimiter indicating the end of a URI (not part of the URI): uri_end_delim = r"""[>]""" # Last URI character; same as uric but no punctuation: urilast = r"""[_~*/=+a-zA-Z0-9]""" # End of a URI (either 'urilast' or 'uric followed by a # uri_end_delim'): uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals() emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]""" email_pattern = r""" %(emailc)s+(?:\.%(emailc)s+)* # name (?<!\x00)@ # at %(emailc)s+(?:\.%(emailc)s*)* # host %(uri_end)s # final URI char """ parts = ('initial_inline', start_string_prefix, '', [('start', '', non_whitespace_after, # simple start-strings [r'\*\*', # strong r'\*(?!\*)', # emphasis but not strong r'``', # literal r'_`', # inline internal target r'\|(?!\|)'] # substitution reference ), ('whole', '', end_string_suffix, # whole constructs [# reference name & end-string r'(?P<refname>%s)(?P<refend>__?)' % simplename, ('footnotelabel', r'\[', r'(?P<fnend>\]_)', [r'[0-9]+', # manually numbered r'\#(%s)?' % simplename, # auto-numbered (w/ label?) r'\*', # auto-symbol r'(?P<citationlabel>%s)' % simplename] # citation reference ) ] ), ('backquote', # interpreted text or phrase reference '(?P<role>(:%s:)?)' % simplename, # optional role non_whitespace_after, ['`(?!`)'] # but not literal ) ] ) patterns = Struct( initial=build_regexp(parts), emphasis=re.compile(non_whitespace_escape_before + r'(\*)' + end_string_suffix), strong=re.compile(non_whitespace_escape_before + r'(\*\*)' + end_string_suffix), interpreted_or_phrase_ref=re.compile( r""" %(non_whitespace_escape_before)s ( ` (?P<suffix> (?P<role>:%(simplename)s:)? (?P<refend>__?)? ) ) %(end_string_suffix)s """ % locals(), re.VERBOSE | re.UNICODE), embedded_uri=re.compile( r""" ( (?:[ \n]+|^) # spaces or beginning of line/string < # open bracket %(non_whitespace_after)s ([^<>\x00]+) # anything but angle brackets & nulls %(non_whitespace_before)s > # close bracket w/o whitespace before ) $ # end of string """ % locals(), re.VERBOSE), literal=re.compile(non_whitespace_before + '(``)' + end_string_suffix), target=re.compile(non_whitespace_escape_before + r'(`)' + end_string_suffix), substitution_ref=re.compile(non_whitespace_escape_before + r'(\|_{0,2})' + end_string_suffix), email=re.compile(email_pattern % locals() + '$', re.VERBOSE), uri=re.compile( (r""" %(start_string_prefix)s (?P<whole> (?P<absolute> # absolute URI (?P<scheme> # scheme (http, ftp, mailto) [a-zA-Z][a-zA-Z0-9.+-]* ) : ( ( # either: (//?)? # hierarchical URI %(uric)s* # URI characters %(uri_end)s # final URI char ) ( # optional query \?%(uric)s* %(uri_end)s )? ( # optional fragment \#%(uric)s* %(uri_end)s )? ) ) | # *OR* (?P<email> # email address """ + email_pattern + r""" ) ) %(end_string_suffix)s """) % locals(), re.VERBOSE), pep=re.compile( r""" %(start_string_prefix)s ( (pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file | (PEP\s+(?P<pepnum2>\d+)) # reference by name ) %(end_string_suffix)s""" % locals(), re.VERBOSE), rfc=re.compile( r""" %(start_string_prefix)s (RFC(-|\s+)?(?P<rfcnum>\d+)) %(end_string_suffix)s""" % locals(), re.VERBOSE)) def quoted_start(self, match): """Return 1 if inline markup start-string is 'quoted', 0 if not.""" string = match.string start = match.start() end = match.end() if start == 0: # start-string at beginning of text return 0 prestart = string[start - 1] try: poststart = string[end] if self.openers.index(prestart) \ == self.closers.index(poststart): # quoted return 1 except IndexError: # start-string at end of text return 1 except ValueError: # not quoted pass return 0 def inline_obj(self, match, lineno, end_pattern, nodeclass, restore_backslashes=0): string = match.string matchstart = match.start('start') matchend = match.end('start') if self.quoted_start(match): return (string[:matchend], [], string[matchend:], [], '') endmatch = end_pattern.search(string[matchend:]) if endmatch and endmatch.start(1): # 1 or more chars text = unescape(endmatch.string[:endmatch.start(1)], restore_backslashes) textend = matchend + endmatch.end(1) rawsource = unescape(string[matchstart:textend], 1) return (string[:matchstart], [nodeclass(rawsource, text)], string[textend:], [], endmatch.group(1)) msg = self.reporter.warning( 'Inline %s start-string without end-string.' % nodeclass.__name__, line=lineno) text = unescape(string[matchstart:matchend], 1) rawsource = unescape(string[matchstart:matchend], 1) prb = self.problematic(text, rawsource, msg) return string[:matchstart], [prb], string[matchend:], [msg], '' def problematic(self, text, rawsource, message): msgid = self.document.set_id(message, self.parent) problematic = nodes.problematic(rawsource, text, refid=msgid) prbid = self.document.set_id(problematic) message.add_backref(prbid) return problematic def emphasis(self, match, lineno): before, inlines, remaining, sysmessages, endstring = self.inline_obj( match, lineno, self.patterns.emphasis, nodes.emphasis) return before, inlines, remaining, sysmessages def strong(self, match, lineno): before, inlines, remaining, sysmessages, endstring = self.inline_obj( match, lineno, self.patterns.strong, nodes.strong) return before, inlines, remaining, sysmessages def interpreted_or_phrase_ref(self, match, lineno): end_pattern = self.patterns.interpreted_or_phrase_ref string = match.string matchstart = match.start('backquote') matchend = match.end('backquote') rolestart = match.start('role') role = match.group('role') position = '' if role: role = role[1:-1] position = 'prefix' elif self.quoted_start(match): return (string[:matchend], [], string[matchend:], []) endmatch = end_pattern.search(string[matchend:]) if endmatch and endmatch.start(1): # 1 or more chars textend = matchend + endmatch.end() if endmatch.group('role'): if role: msg = self.reporter.warning( 'Multiple roles in interpreted text (both ' 'prefix and suffix present; only one allowed).', line=lineno) text = unescape(string[rolestart:textend], 1) prb = self.problematic(text, text, msg) return string[:rolestart], [prb], string[textend:], [msg] role = endmatch.group('suffix')[1:-1] position = 'suffix' escaped = endmatch.string[:endmatch.start(1)] rawsource = unescape(string[matchstart:textend], 1) if rawsource[-1:] == '_': if role: msg = self.reporter.warning( 'Mismatch: both interpreted text role %s and ' 'reference suffix.' % position, line=lineno) text = unescape(string[rolestart:textend], 1) prb = self.problematic(text, text, msg) return string[:rolestart], [prb], string[textend:], [msg] return self.phrase_ref(string[:matchstart], string[textend:], rawsource, escaped, unescape(escaped)) else: rawsource = unescape(string[rolestart:textend], 1) nodelist, messages = self.interpreted(rawsource, escaped, role, lineno) return (string[:rolestart], nodelist, string[textend:], messages) msg = self.reporter.warning( 'Inline interpreted text or phrase reference start-string ' 'without end-string.', line=lineno) text = unescape(string[matchstart:matchend], 1) prb = self.problematic(text, text, msg) return string[:matchstart], [prb], string[matchend:], [msg] def phrase_ref(self, before, after, rawsource, escaped, text): match = self.patterns.embedded_uri.search(escaped) if match: text = unescape(escaped[:match.start(0)]) uri_text = match.group(2) uri = ''.join(uri_text.split()) uri = self.adjust_uri(uri) if uri: target = nodes.target(match.group(1), refuri=uri) else: raise ApplicationError('problem with URI: %r' % uri_text) if not text: text = uri else: target = None refname = normalize_name(text) reference = nodes.reference(rawsource, text, name=whitespace_normalize_name(text)) node_list = [reference] if rawsource[-2:] == '__': if target: reference['refuri'] = uri else: reference['anonymous'] = 1 else: if target: reference['refuri'] = uri target['names'].append(refname) self.document.note_explicit_target(target, self.parent) node_list.append(target) else: reference['refname'] = refname self.document.note_refname(reference) return before, node_list, after, [] def adjust_uri(self, uri): match = self.patterns.email.match(uri) if match: return 'mailto:' + uri else: return uri def interpreted(self, rawsource, text, role, lineno): role_fn, messages = roles.role(role, self.language, lineno, self.reporter) if role_fn: nodes, messages2 = role_fn(role, rawsource, text, lineno, self) return nodes, messages + messages2 else: msg = self.reporter.error( 'Unknown interpreted text role "%s".' % role, line=lineno) return ([self.problematic(rawsource, rawsource, msg)], messages + [msg]) def literal(self, match, lineno): before, inlines, remaining, sysmessages, endstring = self.inline_obj( match, lineno, self.patterns.literal, nodes.literal, restore_backslashes=1) return before, inlines, remaining, sysmessages def inline_internal_target(self, match, lineno): before, inlines, remaining, sysmessages, endstring = self.inline_obj( match, lineno, self.patterns.target, nodes.target) if inlines and isinstance(inlines[0], nodes.target): assert len(inlines) == 1 target = inlines[0] name = normalize_name(target.astext()) target['names'].append(name) self.document.note_explicit_target(target, self.parent) return before, inlines, remaining, sysmessages def substitution_reference(self, match, lineno): before, inlines, remaining, sysmessages, endstring = self.inline_obj( match, lineno, self.patterns.substitution_ref, nodes.substitution_reference) if len(inlines) == 1: subref_node = inlines[0] if isinstance(subref_node, nodes.substitution_reference): subref_text = subref_node.astext() self.document.note_substitution_ref(subref_node, subref_text) if endstring[-1:] == '_': reference_node = nodes.reference( '|%s%s' % (subref_text, endstring), '') if endstring[-2:] == '__': reference_node['anonymous'] = 1 else: reference_node['refname'] = normalize_name(subref_text) self.document.note_refname(reference_node) reference_node += subref_node inlines = [reference_node] return before, inlines, remaining, sysmessages def footnote_reference(self, match, lineno): """ Handles `nodes.footnote_reference` and `nodes.citation_reference` elements. """ label = match.group('footnotelabel') refname = normalize_name(label) string = match.string before = string[:match.start('whole')] remaining = string[match.end('whole'):] if match.group('citationlabel'): refnode = nodes.citation_reference('[%s]_' % label, refname=refname) refnode += nodes.Text(label) self.document.note_citation_ref(refnode) else: refnode = nodes.footnote_reference('[%s]_' % label) if refname[0] == '#': refname = refname[1:] refnode['auto'] = 1 self.document.note_autofootnote_ref(refnode) elif refname == '*': refname = '' refnode['auto'] = '*' self.document.note_symbol_footnote_ref( refnode) else: refnode += nodes.Text(label) if refname: refnode['refname'] = refname self.document.note_footnote_ref(refnode) if utils.get_trim_footnote_ref_space(self.document.settings): before = before.rstrip() return (before, [refnode], remaining, []) def reference(self, match, lineno, anonymous=None): referencename = match.group('refname') refname = normalize_name(referencename) referencenode = nodes.reference( referencename + match.group('refend'), referencename, name=whitespace_normalize_name(referencename)) if anonymous: referencenode['anonymous'] = 1 else: referencenode['refname'] = refname self.document.note_refname(referencenode) string = match.string matchstart = match.start('whole') matchend = match.end('whole') return (string[:matchstart], [referencenode], string[matchend:], []) def anonymous_reference(self, match, lineno): return self.reference(match, lineno, anonymous=1) def standalone_uri(self, match, lineno): if (not match.group('scheme') or match.group('scheme').lower() in urischemes.schemes): if match.group('email'): addscheme = 'mailto:' else: addscheme = '' text = match.group('whole') unescaped = unescape(text, 0) return [nodes.reference(unescape(text, 1), unescaped, refuri=addscheme + unescaped)] else: # not a valid scheme raise MarkupMismatch def pep_reference(self, match, lineno): text = match.group(0) if text.startswith('pep-'): pepnum = int(match.group('pepnum1')) elif text.startswith('PEP'): pepnum = int(match.group('pepnum2')) else: raise MarkupMismatch ref = (self.document.settings.pep_base_url + self.document.settings.pep_file_url_template % pepnum) unescaped = unescape(text, 0) return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)] rfc_url = 'rfc%d.html' def rfc_reference(self, match, lineno): text = match.group(0) if text.startswith('RFC'): rfcnum = int(match.group('rfcnum')) ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum else: raise MarkupMismatch unescaped = unescape(text, 0) return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)] def implicit_inline(self, text, lineno): """ Check each of the patterns in `self.implicit_dispatch` for a match, and dispatch to the stored method for the pattern. Recursively check the text before and after the match. Return a list of `nodes.Text` and inline element nodes. """ if not text: return [] for pattern, method in self.implicit_dispatch: match = pattern.search(text) if match: try: # Must recurse on strings before *and* after the match; # there may be multiple patterns. return (self.implicit_inline(text[:match.start()], lineno) + method(match, lineno) + self.implicit_inline(text[match.end():], lineno)) except MarkupMismatch: pass return [nodes.Text(unescape(text), rawsource=unescape(text, 1))] dispatch = {'*': emphasis, '**': strong, '`': interpreted_or_phrase_ref, '``': literal, '_`': inline_internal_target, ']_': footnote_reference, '|': substitution_reference, '_': reference, '__': anonymous_reference} def _loweralpha_to_int(s, _zero=(ord('a')-1)): return ord(s) - _zero def _upperalpha_to_int(s, _zero=(ord('A')-1)): return ord(s) - _zero def _lowerroman_to_int(s): return roman.fromRoman(s.upper()) class Body(RSTState): """ Generic classifier of the first line of a block. """ double_width_pad_char = tableparser.TableParser.double_width_pad_char """Padding character for East Asian double-width text.""" enum = Struct() """Enumerated list parsing information.""" enum.formatinfo = { 'parens': Struct(prefix='(', suffix=')', start=1, end=-1), 'rparen': Struct(prefix='', suffix=')', start=0, end=-1), 'period': Struct(prefix='', suffix='.', start=0, end=-1)} enum.formats = enum.formatinfo.keys() enum.sequences = ['arabic', 'loweralpha', 'upperalpha', 'lowerroman', 'upperroman'] # ORDERED! enum.sequencepats = {'arabic': '[0-9]+', 'loweralpha': '[a-z]', 'upperalpha': '[A-Z]', 'lowerroman': '[ivxlcdm]+', 'upperroman': '[IVXLCDM]+',} enum.converters = {'arabic': int, 'loweralpha': _loweralpha_to_int, 'upperalpha': _upperalpha_to_int, 'lowerroman': _lowerroman_to_int, 'upperroman': roman.fromRoman} enum.sequenceregexps = {} for sequence in enum.sequences: enum.sequenceregexps[sequence] = re.compile( enum.sequencepats[sequence] + '$') grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$') """Matches the top (& bottom) of a full table).""" simple_table_top_pat = re.compile('=+( +=+)+ *$') """Matches the top of a simple table.""" simple_table_border_pat = re.compile('=+[ =]*$') """Matches the bottom & header bottom of a simple table.""" pats = {} """Fragments of patterns used by transitions.""" pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]' pats['alpha'] = '[a-zA-Z]' pats['alphanum'] = '[a-zA-Z0-9]' pats['alphanumplus'] = '[a-zA-Z0-9_-]' pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s' '|%(upperroman)s|#)' % enum.sequencepats) pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats # @@@ Loosen up the pattern? Allow Unicode? pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats for format in enum.formats: pats[format] = '(?P<%s>%s%s%s)' % ( format, re.escape(enum.formatinfo[format].prefix), pats['enum'], re.escape(enum.formatinfo[format].suffix)) patterns = { 'bullet': u'[-+*\u2022\u2023\u2043]( +|$)', 'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats, 'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)', 'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats, 'doctest': r'>>>( +|$)', 'line_block': r'\|( +|$)', 'grid_table_top': grid_table_top_pat, 'simple_table_top': simple_table_top_pat, 'explicit_markup': r'\.\.( +|$)', 'anonymous': r'__( +|$)', 'line': r'(%(nonalphanum7bit)s)\1* *$' % pats, 'text': r''} initial_transitions = ( 'bullet', 'enumerator', 'field_marker', 'option_marker', 'doctest', 'line_block', 'grid_table_top', 'simple_table_top', 'explicit_markup', 'anonymous', 'line', 'text') def indent(self, match, context, next_state): """Block quote.""" indented, indent, line_offset, blank_finish = \ self.state_machine.get_indented() elements = self.block_quote(indented, line_offset) self.parent += elements if not blank_finish: self.parent += self.unindent_warning('Block quote') return context, next_state, [] def block_quote(self, indented, line_offset): elements = [] while indented: (blockquote_lines, attribution_lines, attribution_offset, indented, new_line_offset) = self.split_attribution(indented, line_offset) blockquote = nodes.block_quote() self.nested_parse(blockquote_lines, line_offset, blockquote) elements.append(blockquote) if attribution_lines: attribution, messages = self.parse_attribution( attribution_lines, attribution_offset) blockquote += attribution elements += messages line_offset = new_line_offset while indented and not indented[0]: indented = indented[1:] line_offset += 1 return elements # U+2014 is an em-dash: attribution_pattern = re.compile(u'(---?(?!-)|\u2014) *(?=[^ \\n])') def split_attribution(self, indented, line_offset): """ Check for a block quote attribution and split it off: * First line after a blank line must begin with a dash ("--", "---", em-dash; matches `self.attribution_pattern`). * Every line after that must have consistent indentation. * Attributions must be preceded by block quote content. Return a tuple of: (block quote content lines, content offset, attribution lines, attribution offset, remaining indented lines). """ blank = None nonblank_seen = False for i in range(len(indented)): line = indented[i].rstrip() if line: if nonblank_seen and blank == i - 1: # last line blank match = self.attribution_pattern.match(line) if match: attribution_end, indent = self.check_attribution( indented, i) if attribution_end: a_lines = indented[i:attribution_end] a_lines.trim_left(match.end(), end=1) a_lines.trim_left(indent, start=1) return (indented[:i], a_lines, i, indented[attribution_end:], line_offset + attribution_end) nonblank_seen = True else: blank = i else: return (indented, None, None, None, None) def check_attribution(self, indented, attribution_start): """ Check attribution shape. Return the index past the end of the attribution, and the indent. """ indent = None i = attribution_start + 1 for i in range(attribution_start + 1, len(indented)): line = indented[i].rstrip() if not line: break if indent is None: indent = len(line) - len(line.lstrip()) elif len(line) - len(line.lstrip()) != indent: return None, None # bad shape; not an attribution else: # return index of line after last attribution line: i += 1 return i, (indent or 0) def parse_attribution(self, indented, line_offset): text = '\n'.join(indented).rstrip() lineno = self.state_machine.abs_line_number() + line_offset textnodes, messages = self.inline_text(text, lineno) node = nodes.attribution(text, '', *textnodes) node.line = lineno return node, messages def bullet(self, match, context, next_state): """Bullet list item.""" bulletlist = nodes.bullet_list() self.parent += bulletlist bulletlist['bullet'] = match.string[0] i, blank_finish = self.list_item(match.end()) bulletlist += i offset = self.state_machine.line_offset + 1 # next line new_line_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=bulletlist, initial_state='BulletList', blank_finish=blank_finish) self.goto_line(new_line_offset) if not blank_finish: self.parent += self.unindent_warning('Bullet list') return [], next_state, [] def list_item(self, indent): if self.state_machine.line[indent:]: indented, line_offset, blank_finish = ( self.state_machine.get_known_indented(indent)) else: indented, indent, line_offset, blank_finish = ( self.state_machine.get_first_known_indented(indent)) listitem = nodes.list_item('\n'.join(indented)) if indented: self.nested_parse(indented, input_offset=line_offset, node=listitem) return listitem, blank_finish def enumerator(self, match, context, next_state): """Enumerated List Item""" format, sequence, text, ordinal = self.parse_enumerator(match) if not self.is_enumerated_list_item(ordinal, sequence, format): raise statemachine.TransitionCorrection('text') enumlist = nodes.enumerated_list() self.parent += enumlist if sequence == '#': enumlist['enumtype'] = 'arabic' else: enumlist['enumtype'] = sequence enumlist['prefix'] = self.enum.formatinfo[format].prefix enumlist['suffix'] = self.enum.formatinfo[format].suffix if ordinal != 1: enumlist['start'] = ordinal msg = self.reporter.info( 'Enumerated list start value not ordinal-1: "%s" (ordinal %s)' % (text, ordinal), line=self.state_machine.abs_line_number()) self.parent += msg listitem, blank_finish = self.list_item(match.end()) enumlist += listitem offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=enumlist, initial_state='EnumeratedList', blank_finish=blank_finish, extra_settings={'lastordinal': ordinal, 'format': format, 'auto': sequence == '#'}) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning('Enumerated list') return [], next_state, [] def parse_enumerator(self, match, expected_sequence=None): """ Analyze an enumerator and return the results. :Return: - the enumerator format ('period', 'parens', or 'rparen'), - the sequence used ('arabic', 'loweralpha', 'upperroman', etc.), - the text of the enumerator, stripped of formatting, and - the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.; ``None`` is returned for invalid enumerator text). The enumerator format has already been determined by the regular expression match. If `expected_sequence` is given, that sequence is tried first. If not, we check for Roman numeral 1. This way, single-character Roman numerals (which are also alphabetical) can be matched. If no sequence has been matched, all sequences are checked in order. """ groupdict = match.groupdict() sequence = '' for format in self.enum.formats: if groupdict[format]: # was this the format matched? break # yes; keep `format` else: # shouldn't happen raise ParserError('enumerator format not matched') text = groupdict[format][self.enum.formatinfo[format].start :self.enum.formatinfo[format].end] if text == '#': sequence = '#' elif expected_sequence: try: if self.enum.sequenceregexps[expected_sequence].match(text): sequence = expected_sequence except KeyError: # shouldn't happen raise ParserError('unknown enumerator sequence: %s' % sequence) elif text == 'i': sequence = 'lowerroman' elif text == 'I': sequence = 'upperroman' if not sequence: for sequence in self.enum.sequences: if self.enum.sequenceregexps[sequence].match(text): break else: # shouldn't happen raise ParserError('enumerator sequence not matched') if sequence == '#': ordinal = 1 else: try: ordinal = self.enum.converters[sequence](text) except roman.InvalidRomanNumeralError: ordinal = None return format, sequence, text, ordinal def is_enumerated_list_item(self, ordinal, sequence, format): """ Check validity based on the ordinal value and the second line. Return true if the ordinal is valid and the second line is blank, indented, or starts with the next enumerator or an auto-enumerator. """ if ordinal is None: return None try: next_line = self.state_machine.next_line() except EOFError: # end of input lines self.state_machine.previous_line() return 1 else: self.state_machine.previous_line() if not next_line[:1].strip(): # blank or indented return 1 result = self.make_enumerator(ordinal + 1, sequence, format) if result: next_enumerator, auto_enumerator = result try: if ( next_line.startswith(next_enumerator) or next_line.startswith(auto_enumerator) ): return 1 except TypeError: pass return None def make_enumerator(self, ordinal, sequence, format): """ Construct and return the next enumerated list item marker, and an auto-enumerator ("#" instead of the regular enumerator). Return ``None`` for invalid (out of range) ordinals. """ #" if sequence == '#': enumerator = '#' elif sequence == 'arabic': enumerator = str(ordinal) else: if sequence.endswith('alpha'): if ordinal > 26: return None enumerator = chr(ordinal + ord('a') - 1) elif sequence.endswith('roman'): try: enumerator = roman.toRoman(ordinal) except roman.RomanError: return None else: # shouldn't happen raise ParserError('unknown enumerator sequence: "%s"' % sequence) if sequence.startswith('lower'): enumerator = enumerator.lower() elif sequence.startswith('upper'): enumerator = enumerator.upper() else: # shouldn't happen raise ParserError('unknown enumerator sequence: "%s"' % sequence) formatinfo = self.enum.formatinfo[format] next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix + ' ') auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' ' return next_enumerator, auto_enumerator def field_marker(self, match, context, next_state): """Field list item.""" field_list = nodes.field_list() self.parent += field_list field, blank_finish = self.field(match) field_list += field offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=field_list, initial_state='FieldList', blank_finish=blank_finish) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning('Field list') return [], next_state, [] def field(self, match): name = self.parse_field_marker(match) lineno = self.state_machine.abs_line_number() indented, indent, line_offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end()) field_node = nodes.field() field_node.line = lineno name_nodes, name_messages = self.inline_text(name, lineno) field_node += nodes.field_name(name, '', *name_nodes) field_body = nodes.field_body('\n'.join(indented), *name_messages) field_node += field_body if indented: self.parse_field_body(indented, line_offset, field_body) return field_node, blank_finish def parse_field_marker(self, match): """Extract & return field name from a field marker match.""" field = match.group()[1:] # strip off leading ':' field = field[:field.rfind(':')] # strip off trailing ':' etc. return field def parse_field_body(self, indented, offset, node): self.nested_parse(indented, input_offset=offset, node=node) def option_marker(self, match, context, next_state): """Option list item.""" optionlist = nodes.option_list() try: listitem, blank_finish = self.option_list_item(match) except MarkupError, (message, lineno): # This shouldn't happen; pattern won't match. msg = self.reporter.error( 'Invalid option list marker: %s' % message, line=lineno) self.parent += msg indented, indent, line_offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end()) elements = self.block_quote(indented, line_offset) self.parent += elements if not blank_finish: self.parent += self.unindent_warning('Option list') return [], next_state, [] self.parent += optionlist optionlist += listitem offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=optionlist, initial_state='OptionList', blank_finish=blank_finish) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning('Option list') return [], next_state, [] def option_list_item(self, match): offset = self.state_machine.abs_line_offset() options = self.parse_option_marker(match) indented, indent, line_offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end()) if not indented: # not an option list item self.goto_line(offset) raise statemachine.TransitionCorrection('text') option_group = nodes.option_group('', *options) description = nodes.description('\n'.join(indented)) option_list_item = nodes.option_list_item('', option_group, description) if indented: self.nested_parse(indented, input_offset=line_offset, node=description) return option_list_item, blank_finish def parse_option_marker(self, match): """ Return a list of `node.option` and `node.option_argument` objects, parsed from an option marker match. :Exception: `MarkupError` for invalid option markers. """ optlist = [] optionstrings = match.group().rstrip().split(', ') for optionstring in optionstrings: tokens = optionstring.split() delimiter = ' ' firstopt = tokens[0].split('=') if len(firstopt) > 1: # "--opt=value" form tokens[:1] = firstopt delimiter = '=' elif (len(tokens[0]) > 2 and ((tokens[0].startswith('-') and not tokens[0].startswith('--')) or tokens[0].startswith('+'))): # "-ovalue" form tokens[:1] = [tokens[0][:2], tokens[0][2:]] delimiter = '' if len(tokens) > 1 and (tokens[1].startswith('<') and tokens[-1].endswith('>')): # "-o <value1 value2>" form; join all values into one token tokens[1:] = [' '.join(tokens[1:])] if 0 < len(tokens) <= 2: option = nodes.option(optionstring) option += nodes.option_string(tokens[0], tokens[0]) if len(tokens) > 1: option += nodes.option_argument(tokens[1], tokens[1], delimiter=delimiter) optlist.append(option) else: raise MarkupError( 'wrong number of option tokens (=%s), should be 1 or 2: ' '"%s"' % (len(tokens), optionstring), self.state_machine.abs_line_number() + 1) return optlist def doctest(self, match, context, next_state): data = '\n'.join(self.state_machine.get_text_block()) self.parent += nodes.doctest_block(data, data) return [], next_state, [] def line_block(self, match, context, next_state): """First line of a line block.""" block = nodes.line_block() self.parent += block lineno = self.state_machine.abs_line_number() line, messages, blank_finish = self.line_block_line(match, lineno) block += line self.parent += messages if not blank_finish: offset = self.state_machine.line_offset + 1 # next line new_line_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=block, initial_state='LineBlock', blank_finish=0) self.goto_line(new_line_offset) if not blank_finish: self.parent += self.reporter.warning( 'Line block ends without a blank line.', line=(self.state_machine.abs_line_number() + 1)) if len(block): if block[0].indent is None: block[0].indent = 0 self.nest_line_block_lines(block) return [], next_state, [] def line_block_line(self, match, lineno): """Return one line element of a line_block.""" indented, indent, line_offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end(), until_blank=1) text = u'\n'.join(indented) text_nodes, messages = self.inline_text(text, lineno) line = nodes.line(text, '', *text_nodes) if match.string.rstrip() != '|': # not empty line.indent = len(match.group(1)) - 1 return line, messages, blank_finish def nest_line_block_lines(self, block): for index in range(1, len(block)): if block[index].indent is None: block[index].indent = block[index - 1].indent self.nest_line_block_segment(block) def nest_line_block_segment(self, block): indents = [item.indent for item in block] least = min(indents) new_items = [] new_block = nodes.line_block() for item in block: if item.indent > least: new_block.append(item) else: if len(new_block): self.nest_line_block_segment(new_block) new_items.append(new_block) new_block = nodes.line_block() new_items.append(item) if len(new_block): self.nest_line_block_segment(new_block) new_items.append(new_block) block[:] = new_items def grid_table_top(self, match, context, next_state): """Top border of a full table.""" return self.table_top(match, context, next_state, self.isolate_grid_table, tableparser.GridTableParser) def simple_table_top(self, match, context, next_state): """Top border of a simple table.""" return self.table_top(match, context, next_state, self.isolate_simple_table, tableparser.SimpleTableParser) def table_top(self, match, context, next_state, isolate_function, parser_class): """Top border of a generic table.""" nodelist, blank_finish = self.table(isolate_function, parser_class) self.parent += nodelist if not blank_finish: msg = self.reporter.warning( 'Blank line required after table.', line=self.state_machine.abs_line_number() + 1) self.parent += msg return [], next_state, [] def table(self, isolate_function, parser_class): """Parse a table.""" block, messages, blank_finish = isolate_function() if block: try: parser = parser_class() tabledata = parser.parse(block) tableline = (self.state_machine.abs_line_number() - len(block) + 1) table = self.build_table(tabledata, tableline) nodelist = [table] + messages except tableparser.TableMarkupError, detail: nodelist = self.malformed_table( block, ' '.join(detail.args)) + messages else: nodelist = messages return nodelist, blank_finish def isolate_grid_table(self): messages = [] blank_finish = 1 try: block = self.state_machine.get_text_block(flush_left=1) except statemachine.UnexpectedIndentationError, instance: block, source, lineno = instance.args messages.append(self.reporter.error('Unexpected indentation.', source=source, line=lineno)) blank_finish = 0 block.disconnect() # for East Asian chars: block.pad_double_width(self.double_width_pad_char) width = len(block[0].strip()) for i in range(len(block)): block[i] = block[i].strip() if block[i][0] not in '+|': # check left edge blank_finish = 0 self.state_machine.previous_line(len(block) - i) del block[i:] break if not self.grid_table_top_pat.match(block[-1]): # find bottom blank_finish = 0 # from second-last to third line of table: for i in range(len(block) - 2, 1, -1): if self.grid_table_top_pat.match(block[i]): self.state_machine.previous_line(len(block) - i + 1) del block[i+1:] break else: messages.extend(self.malformed_table(block)) return [], messages, blank_finish for i in range(len(block)): # check right edge if len(block[i]) != width or block[i][-1] not in '+|': messages.extend(self.malformed_table(block)) return [], messages, blank_finish return block, messages, blank_finish def isolate_simple_table(self): start = self.state_machine.line_offset lines = self.state_machine.input_lines limit = len(lines) - 1 toplen = len(lines[start].strip()) pattern_match = self.simple_table_border_pat.match found = 0 found_at = None i = start + 1 while i <= limit: line = lines[i] match = pattern_match(line) if match: if len(line.strip()) != toplen: self.state_machine.next_line(i - start) messages = self.malformed_table( lines[start:i+1], 'Bottom/header table border does ' 'not match top border.') return [], messages, i == limit or not lines[i+1].strip() found += 1 found_at = i if found == 2 or i == limit or not lines[i+1].strip(): end = i break i += 1 else: # reached end of input_lines if found: extra = ' or no blank line after table bottom' self.state_machine.next_line(found_at - start) block = lines[start:found_at+1] else: extra = '' self.state_machine.next_line(i - start - 1) block = lines[start:] messages = self.malformed_table( block, 'No bottom table border found%s.' % extra) return [], messages, not extra self.state_machine.next_line(end - start) block = lines[start:end+1] # for East Asian chars: block.pad_double_width(self.double_width_pad_char) return block, [], end == limit or not lines[end+1].strip() def malformed_table(self, block, detail=''): block.replace(self.double_width_pad_char, '') data = '\n'.join(block) message = 'Malformed table.' lineno = self.state_machine.abs_line_number() - len(block) + 1 if detail: message += '\n' + detail error = self.reporter.error(message, nodes.literal_block(data, data), line=lineno) return [error] def build_table(self, tabledata, tableline, stub_columns=0): colwidths, headrows, bodyrows = tabledata table = nodes.table() tgroup = nodes.tgroup(cols=len(colwidths)) table += tgroup for colwidth in colwidths: colspec = nodes.colspec(colwidth=colwidth) if stub_columns: colspec.attributes['stub'] = 1 stub_columns -= 1 tgroup += colspec if headrows: thead = nodes.thead() tgroup += thead for row in headrows: thead += self.build_table_row(row, tableline) tbody = nodes.tbody() tgroup += tbody for row in bodyrows: tbody += self.build_table_row(row, tableline) return table def build_table_row(self, rowdata, tableline): row = nodes.row() for cell in rowdata: if cell is None: continue morerows, morecols, offset, cellblock = cell attributes = {} if morerows: attributes['morerows'] = morerows if morecols: attributes['morecols'] = morecols entry = nodes.entry(**attributes) row += entry if ''.join(cellblock): self.nested_parse(cellblock, input_offset=tableline+offset, node=entry) return row explicit = Struct() """Patterns and constants used for explicit markup recognition.""" explicit.patterns = Struct( target=re.compile(r""" ( _ # anonymous target | # *OR* (?!_) # no underscore at the beginning (?P<quote>`?) # optional open quote (?![ `]) # first char. not space or # backquote (?P<name> # reference name .+? ) %(non_whitespace_escape_before)s (?P=quote) # close quote if open quote used ) (?<!(?<!\x00):) # no unescaped colon at end %(non_whitespace_escape_before)s [ ]? # optional space : # end of reference name ([ ]+|$) # followed by whitespace """ % vars(Inliner), re.VERBOSE), reference=re.compile(r""" ( (?P<simple>%(simplename)s)_ | # *OR* ` # open backquote (?![ ]) # not space (?P<phrase>.+?) # hyperlink phrase %(non_whitespace_escape_before)s `_ # close backquote, # reference mark ) $ # end of string """ % vars(Inliner), re.VERBOSE | re.UNICODE), substitution=re.compile(r""" ( (?![ ]) # first char. not space (?P<name>.+?) # substitution text %(non_whitespace_escape_before)s \| # close delimiter ) ([ ]+|$) # followed by whitespace """ % vars(Inliner), re.VERBOSE),) def footnote(self, match): lineno = self.state_machine.abs_line_number() indented, indent, offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end()) label = match.group(1) name = normalize_name(label) footnote = nodes.footnote('\n'.join(indented)) footnote.line = lineno if name[0] == '#': # auto-numbered name = name[1:] # autonumber label footnote['auto'] = 1 if name: footnote['names'].append(name) self.document.note_autofootnote(footnote) elif name == '*': # auto-symbol name = '' footnote['auto'] = '*' self.document.note_symbol_footnote(footnote) else: # manually numbered footnote += nodes.label('', label) footnote['names'].append(name) self.document.note_footnote(footnote) if name: self.document.note_explicit_target(footnote, footnote) else: self.document.set_id(footnote, footnote) if indented: self.nested_parse(indented, input_offset=offset, node=footnote) return [footnote], blank_finish def citation(self, match): lineno = self.state_machine.abs_line_number() indented, indent, offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end()) label = match.group(1) name = normalize_name(label) citation = nodes.citation('\n'.join(indented)) citation.line = lineno citation += nodes.label('', label) citation['names'].append(name) self.document.note_citation(citation) self.document.note_explicit_target(citation, citation) if indented: self.nested_parse(indented, input_offset=offset, node=citation) return [citation], blank_finish def hyperlink_target(self, match): pattern = self.explicit.patterns.target lineno = self.state_machine.abs_line_number() block, indent, offset, blank_finish = \ self.state_machine.get_first_known_indented( match.end(), until_blank=1, strip_indent=0) blocktext = match.string[:match.end()] + '\n'.join(block) block = [escape2null(line) for line in block] escaped = block[0] blockindex = 0 while 1: targetmatch = pattern.match(escaped) if targetmatch: break blockindex += 1 try: escaped += block[blockindex] except IndexError: raise MarkupError('malformed hyperlink target.', lineno) del block[:blockindex] block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip() target = self.make_target(block, blocktext, lineno, targetmatch.group('name')) return [target], blank_finish def make_target(self, block, block_text, lineno, target_name): target_type, data = self.parse_target(block, block_text, lineno) if target_type == 'refname': target = nodes.target(block_text, '', refname=normalize_name(data)) target.indirect_reference_name = data self.add_target(target_name, '', target, lineno) self.document.note_indirect_target(target) return target elif target_type == 'refuri': target = nodes.target(block_text, '') self.add_target(target_name, data, target, lineno) return target else: return data def parse_target(self, block, block_text, lineno): """ Determine the type of reference of a target. :Return: A 2-tuple, one of: - 'refname' and the indirect reference name - 'refuri' and the URI - 'malformed' and a system_message node """ if block and block[-1].strip()[-1:] == '_': # possible indirect target reference = ' '.join([line.strip() for line in block]) refname = self.is_reference(reference) if refname: return 'refname', refname reference = ''.join([''.join(line.split()) for line in block]) return 'refuri', unescape(reference) def is_reference(self, reference): match = self.explicit.patterns.reference.match( whitespace_normalize_name(reference)) if not match: return None return unescape(match.group('simple') or match.group('phrase')) def add_target(self, targetname, refuri, target, lineno): target.line = lineno if targetname: name = normalize_name(unescape(targetname)) target['names'].append(name) if refuri: uri = self.inliner.adjust_uri(refuri) if uri: target['refuri'] = uri else: raise ApplicationError('problem with URI: %r' % refuri) self.document.note_explicit_target(target, self.parent) else: # anonymous target if refuri: target['refuri'] = refuri target['anonymous'] = 1 self.document.note_anonymous_target(target) def substitution_def(self, match): pattern = self.explicit.patterns.substitution lineno = self.state_machine.abs_line_number() block, indent, offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end(), strip_indent=0) blocktext = (match.string[:match.end()] + '\n'.join(block)) block.disconnect() escaped = escape2null(block[0].rstrip()) blockindex = 0 while 1: subdefmatch = pattern.match(escaped) if subdefmatch: break blockindex += 1 try: escaped = escaped + ' ' + escape2null(block[blockindex].strip()) except IndexError: raise MarkupError('malformed substitution definition.', lineno) del block[:blockindex] # strip out the substitution marker block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1] if not block[0]: del block[0] offset += 1 while block and not block[-1].strip(): block.pop() subname = subdefmatch.group('name') substitution_node = nodes.substitution_definition(blocktext) substitution_node.line = lineno if not block: msg = self.reporter.warning( 'Substitution definition "%s" missing contents.' % subname, nodes.literal_block(blocktext, blocktext), line=lineno) return [msg], blank_finish block[0] = block[0].strip() substitution_node['names'].append( nodes.whitespace_normalize_name(subname)) new_abs_offset, blank_finish = self.nested_list_parse( block, input_offset=offset, node=substitution_node, initial_state='SubstitutionDef', blank_finish=blank_finish) i = 0 for node in substitution_node[:]: if not (isinstance(node, nodes.Inline) or isinstance(node, nodes.Text)): self.parent += substitution_node[i] del substitution_node[i] else: i += 1 for node in substitution_node.traverse(nodes.Element): if self.disallowed_inside_substitution_definitions(node): pformat = nodes.literal_block('', node.pformat().rstrip()) msg = self.reporter.error( 'Substitution definition contains illegal element:', pformat, nodes.literal_block(blocktext, blocktext), line=lineno) return [msg], blank_finish if len(substitution_node) == 0: msg = self.reporter.warning( 'Substitution definition "%s" empty or invalid.' % subname, nodes.literal_block(blocktext, blocktext), line=lineno) return [msg], blank_finish self.document.note_substitution_def( substitution_node, subname, self.parent) return [substitution_node], blank_finish def disallowed_inside_substitution_definitions(self, node): if (node['ids'] or isinstance(node, nodes.reference) and node.get('anonymous') or isinstance(node, nodes.footnote_reference) and node.get('auto')): return 1 else: return 0 def directive(self, match, **option_presets): """Returns a 2-tuple: list of nodes, and a "blank finish" boolean.""" type_name = match.group(1) directive_class, messages = directives.directive( type_name, self.memo.language, self.document) self.parent += messages if directive_class: return self.run_directive( directive_class, match, type_name, option_presets) else: return self.unknown_directive(type_name) def run_directive(self, directive, match, type_name, option_presets): """ Parse a directive then run its directive function. Parameters: - `directive`: The class implementing the directive. Must be a subclass of `rst.Directive`. - `match`: A regular expression match object which matched the first line of the directive. - `type_name`: The directive name, as used in the source text. - `option_presets`: A dictionary of preset options, defaults for the directive options. Currently, only an "alt" option is passed by substitution definitions (value: the substitution name), which may be used by an embedded image directive. Returns a 2-tuple: list of nodes, and a "blank finish" boolean. """ if isinstance(directive, (FunctionType, MethodType)): from docutils.parsers.rst import convert_directive_function directive = convert_directive_function(directive) lineno = self.state_machine.abs_line_number() initial_line_offset = self.state_machine.line_offset indented, indent, line_offset, blank_finish \ = self.state_machine.get_first_known_indented(match.end(), strip_top=0) block_text = '\n'.join(self.state_machine.input_lines[ initial_line_offset : self.state_machine.line_offset + 1]) try: arguments, options, content, content_offset = ( self.parse_directive_block(indented, line_offset, directive, option_presets)) except MarkupError, detail: error = self.reporter.error( 'Error in "%s" directive:\n%s.' % (type_name, ' '.join(detail.args)), nodes.literal_block(block_text, block_text), line=lineno) return [error], blank_finish directive_instance = directive( type_name, arguments, options, content, lineno, content_offset, block_text, self, self.state_machine) try: result = directive_instance.run() except docutils.parsers.rst.DirectiveError, error: msg_node = self.reporter.system_message(error.level, error.msg, source=error.source, line=error.line) msg_node += nodes.literal_block(block_text, block_text) msg_node['line'] = lineno result = [msg_node] assert isinstance(result, list), \ 'Directive "%s" must return a list of nodes.' % type_name for i in range(len(result)): assert isinstance(result[i], nodes.Node), \ ('Directive "%s" returned non-Node object (index %s): %r' % (type_name, i, result[i])) return (result, blank_finish or self.state_machine.is_next_line_blank()) def parse_directive_block(self, indented, line_offset, directive, option_presets): option_spec = directive.option_spec has_content = directive.has_content if indented and not indented[0].strip(): indented.trim_start() line_offset += 1 while indented and not indented[-1].strip(): indented.trim_end() if indented and (directive.required_arguments or directive.optional_arguments or option_spec): for i in range(len(indented)): if not indented[i].strip(): break else: i += 1 arg_block = indented[:i] content = indented[i+1:] content_offset = line_offset + i + 1 else: content = indented content_offset = line_offset arg_block = [] while content and not content[0].strip(): content.trim_start() content_offset += 1 if option_spec: options, arg_block = self.parse_directive_options( option_presets, option_spec, arg_block) if arg_block and not (directive.required_arguments or directive.optional_arguments): raise MarkupError('no arguments permitted; blank line ' 'required before content block') else: options = {} if directive.required_arguments or directive.optional_arguments: arguments = self.parse_directive_arguments( directive, arg_block) else: arguments = [] if content and not has_content: raise MarkupError('no content permitted') return (arguments, options, content, content_offset) def parse_directive_options(self, option_presets, option_spec, arg_block): options = option_presets.copy() for i in range(len(arg_block)): if arg_block[i][:1] == ':': opt_block = arg_block[i:] arg_block = arg_block[:i] break else: opt_block = [] if opt_block: success, data = self.parse_extension_options(option_spec, opt_block) if success: # data is a dict of options options.update(data) else: # data is an error string raise MarkupError(data) return options, arg_block def parse_directive_arguments(self, directive, arg_block): required = directive.required_arguments optional = directive.optional_arguments arg_text = '\n'.join(arg_block) arguments = arg_text.split() if len(arguments) < required: raise MarkupError('%s argument(s) required, %s supplied' % (required, len(arguments))) elif len(arguments) > required + optional: if directive.final_argument_whitespace: arguments = arg_text.split(None, required + optional - 1) else: raise MarkupError( 'maximum %s argument(s) allowed, %s supplied' % (required + optional, len(arguments))) return arguments def parse_extension_options(self, option_spec, datalines): """ Parse `datalines` for a field list containing extension options matching `option_spec`. :Parameters: - `option_spec`: a mapping of option name to conversion function, which should raise an exception on bad input. - `datalines`: a list of input strings. :Return: - Success value, 1 or 0. - An option dictionary on success, an error string on failure. """ node = nodes.field_list() newline_offset, blank_finish = self.nested_list_parse( datalines, 0, node, initial_state='ExtensionOptions', blank_finish=1) if newline_offset != len(datalines): # incomplete parse of block return 0, 'invalid option block' try: options = utils.extract_extension_options(node, option_spec) except KeyError, detail: return 0, ('unknown option: "%s"' % detail.args[0]) except (ValueError, TypeError), detail: return 0, ('invalid option value: %s' % ' '.join(detail.args)) except utils.ExtensionOptionError, detail: return 0, ('invalid option data: %s' % ' '.join(detail.args)) if blank_finish: return 1, options else: return 0, 'option data incompletely parsed' def unknown_directive(self, type_name): lineno = self.state_machine.abs_line_number() indented, indent, offset, blank_finish = \ self.state_machine.get_first_known_indented(0, strip_indent=0) text = '\n'.join(indented) error = self.reporter.error( 'Unknown directive type "%s".' % type_name, nodes.literal_block(text, text), line=lineno) return [error], blank_finish def comment(self, match): if not match.string[match.end():].strip() \ and self.state_machine.is_next_line_blank(): # an empty comment? return [nodes.comment()], 1 # "A tiny but practical wart." indented, indent, offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end()) while indented and not indented[-1].strip(): indented.trim_end() text = '\n'.join(indented) return [nodes.comment(text, text)], blank_finish explicit.constructs = [ (footnote, re.compile(r""" \.\.[ ]+ # explicit markup start \[ ( # footnote label: [0-9]+ # manually numbered footnote | # *OR* \# # anonymous auto-numbered footnote | # *OR* \#%s # auto-number ed?) footnote label | # *OR* \* # auto-symbol footnote ) \] ([ ]+|$) # whitespace or end of line """ % Inliner.simplename, re.VERBOSE | re.UNICODE)), (citation, re.compile(r""" \.\.[ ]+ # explicit markup start \[(%s)\] # citation label ([ ]+|$) # whitespace or end of line """ % Inliner.simplename, re.VERBOSE | re.UNICODE)), (hyperlink_target, re.compile(r""" \.\.[ ]+ # explicit markup start _ # target indicator (?![ ]|$) # first char. not space or EOL """, re.VERBOSE)), (substitution_def, re.compile(r""" \.\.[ ]+ # explicit markup start \| # substitution indicator (?![ ]|$) # first char. not space or EOL """, re.VERBOSE)), (directive, re.compile(r""" \.\.[ ]+ # explicit markup start (%s) # directive name [ ]? # optional space :: # directive delimiter ([ ]+|$) # whitespace or end of line """ % Inliner.simplename, re.VERBOSE | re.UNICODE))] def explicit_markup(self, match, context, next_state): """Footnotes, hyperlink targets, directives, comments.""" nodelist, blank_finish = self.explicit_construct(match) self.parent += nodelist self.explicit_list(blank_finish) return [], next_state, [] def explicit_construct(self, match): """Determine which explicit construct this is, parse & return it.""" errors = [] for method, pattern in self.explicit.constructs: expmatch = pattern.match(match.string) if expmatch: try: return method(self, expmatch) except MarkupError, error: # never reached? message, lineno = error.args errors.append(self.reporter.warning(message, line=lineno)) break nodelist, blank_finish = self.comment(match) return nodelist + errors, blank_finish def explicit_list(self, blank_finish): """ Create a nested state machine for a series of explicit markup constructs (including anonymous hyperlink targets). """ offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=self.parent, initial_state='Explicit', blank_finish=blank_finish, match_titles=self.state_machine.match_titles) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning('Explicit markup') def anonymous(self, match, context, next_state): """Anonymous hyperlink targets.""" nodelist, blank_finish = self.anonymous_target(match) self.parent += nodelist self.explicit_list(blank_finish) return [], next_state, [] def anonymous_target(self, match): lineno = self.state_machine.abs_line_number() block, indent, offset, blank_finish \ = self.state_machine.get_first_known_indented(match.end(), until_blank=1) blocktext = match.string[:match.end()] + '\n'.join(block) block = [escape2null(line) for line in block] target = self.make_target(block, blocktext, lineno, '') return [target], blank_finish def line(self, match, context, next_state): """Section title overline or transition marker.""" if self.state_machine.match_titles: return [match.string], 'Line', [] elif match.string.strip() == '::': raise statemachine.TransitionCorrection('text') elif len(match.string.strip()) < 4: msg = self.reporter.info( 'Unexpected possible title overline or transition.\n' "Treating it as ordinary text because it's so short.", line=self.state_machine.abs_line_number()) self.parent += msg raise statemachine.TransitionCorrection('text') else: blocktext = self.state_machine.line msg = self.reporter.severe( 'Unexpected section title or transition.', nodes.literal_block(blocktext, blocktext), line=self.state_machine.abs_line_number()) self.parent += msg return [], next_state, [] def text(self, match, context, next_state): """Titles, definition lists, paragraphs.""" return [match.string], 'Text', [] class RFC2822Body(Body): """ RFC2822 headers are only valid as the first constructs in documents. As soon as anything else appears, the `Body` state should take over. """ patterns = Body.patterns.copy() # can't modify the original patterns['rfc2822'] = r'[!-9;-~]+:( +|$)' initial_transitions = [(name, 'Body') for name in Body.initial_transitions] initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text' def rfc2822(self, match, context, next_state): """RFC2822-style field list item.""" fieldlist = nodes.field_list(classes=['rfc2822']) self.parent += fieldlist field, blank_finish = self.rfc2822_field(match) fieldlist += field offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=fieldlist, initial_state='RFC2822List', blank_finish=blank_finish) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning( 'RFC2822-style field list') return [], next_state, [] def rfc2822_field(self, match): name = match.string[:match.string.find(':')] indented, indent, line_offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end(), until_blank=1) fieldnode = nodes.field() fieldnode += nodes.field_name(name, name) fieldbody = nodes.field_body('\n'.join(indented)) fieldnode += fieldbody if indented: self.nested_parse(indented, input_offset=line_offset, node=fieldbody) return fieldnode, blank_finish class SpecializedBody(Body): """ Superclass for second and subsequent compound element members. Compound elements are lists and list-like constructs. All transition methods are disabled (redefined as `invalid_input`). Override individual methods in subclasses to re-enable. For example, once an initial bullet list item, say, is recognized, the `BulletList` subclass takes over, with a "bullet_list" node as its container. Upon encountering the initial bullet list item, `Body.bullet` calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which starts up a nested parsing session with `BulletList` as the initial state. Only the ``bullet`` transition method is enabled in `BulletList`; as long as only bullet list items are encountered, they are parsed and inserted into the container. The first construct which is *not* a bullet list item triggers the `invalid_input` method, which ends the nested parse and closes the container. `BulletList` needs to recognize input that is invalid in the context of a bullet list, which means everything *other than* bullet list items, so it inherits the transition list created in `Body`. """ def invalid_input(self, match=None, context=None, next_state=None): """Not a compound element member. Abort this state machine.""" self.state_machine.previous_line() # back up so parent SM can reassess raise EOFError indent = invalid_input bullet = invalid_input enumerator = invalid_input field_marker = invalid_input option_marker = invalid_input doctest = invalid_input line_block = invalid_input grid_table_top = invalid_input simple_table_top = invalid_input explicit_markup = invalid_input anonymous = invalid_input line = invalid_input text = invalid_input class BulletList(SpecializedBody): """Second and subsequent bullet_list list_items.""" def bullet(self, match, context, next_state): """Bullet list item.""" if match.string[0] != self.parent['bullet']: # different bullet: new list self.invalid_input() listitem, blank_finish = self.list_item(match.end()) self.parent += listitem self.blank_finish = blank_finish return [], next_state, [] class DefinitionList(SpecializedBody): """Second and subsequent definition_list_items.""" def text(self, match, context, next_state): """Definition lists.""" return [match.string], 'Definition', [] class EnumeratedList(SpecializedBody): """Second and subsequent enumerated_list list_items.""" def enumerator(self, match, context, next_state): """Enumerated list item.""" format, sequence, text, ordinal = self.parse_enumerator( match, self.parent['enumtype']) if ( format != self.format or (sequence != '#' and (sequence != self.parent['enumtype'] or self.auto or ordinal != (self.lastordinal + 1))) or not self.is_enumerated_list_item(ordinal, sequence, format)): # different enumeration: new list self.invalid_input() if sequence == '#': self.auto = 1 listitem, blank_finish = self.list_item(match.end()) self.parent += listitem self.blank_finish = blank_finish self.lastordinal = ordinal return [], next_state, [] class FieldList(SpecializedBody): """Second and subsequent field_list fields.""" def field_marker(self, match, context, next_state): """Field list field.""" field, blank_finish = self.field(match) self.parent += field self.blank_finish = blank_finish return [], next_state, [] class OptionList(SpecializedBody): """Second and subsequent option_list option_list_items.""" def option_marker(self, match, context, next_state): """Option list item.""" try: option_list_item, blank_finish = self.option_list_item(match) except MarkupError, (message, lineno): self.invalid_input() self.parent += option_list_item self.blank_finish = blank_finish return [], next_state, [] class RFC2822List(SpecializedBody, RFC2822Body): """Second and subsequent RFC2822-style field_list fields.""" patterns = RFC2822Body.patterns initial_transitions = RFC2822Body.initial_transitions def rfc2822(self, match, context, next_state): """RFC2822-style field list item.""" field, blank_finish = self.rfc2822_field(match) self.parent += field self.blank_finish = blank_finish return [], 'RFC2822List', [] blank = SpecializedBody.invalid_input class ExtensionOptions(FieldList): """ Parse field_list fields for extension options. No nested parsing is done (including inline markup parsing). """ def parse_field_body(self, indented, offset, node): """Override `Body.parse_field_body` for simpler parsing.""" lines = [] for line in list(indented) + ['']: if line.strip(): lines.append(line) elif lines: text = '\n'.join(lines) node += nodes.paragraph(text, text) lines = [] class LineBlock(SpecializedBody): """Second and subsequent lines of a line_block.""" blank = SpecializedBody.invalid_input def line_block(self, match, context, next_state): """New line of line block.""" lineno = self.state_machine.abs_line_number() line, messages, blank_finish = self.line_block_line(match, lineno) self.parent += line self.parent.parent += messages self.blank_finish = blank_finish return [], next_state, [] class Explicit(SpecializedBody): """Second and subsequent explicit markup construct.""" def explicit_markup(self, match, context, next_state): """Footnotes, hyperlink targets, directives, comments.""" nodelist, blank_finish = self.explicit_construct(match) self.parent += nodelist self.blank_finish = blank_finish return [], next_state, [] def anonymous(self, match, context, next_state): """Anonymous hyperlink targets.""" nodelist, blank_finish = self.anonymous_target(match) self.parent += nodelist self.blank_finish = blank_finish return [], next_state, [] blank = SpecializedBody.invalid_input class SubstitutionDef(Body): """ Parser for the contents of a substitution_definition element. """ patterns = { 'embedded_directive': re.compile(r'(%s)::( +|$)' % Inliner.simplename, re.UNICODE), 'text': r''} initial_transitions = ['embedded_directive', 'text'] def embedded_directive(self, match, context, next_state): nodelist, blank_finish = self.directive(match, alt=self.parent['names'][0]) self.parent += nodelist if not self.state_machine.at_eof(): self.blank_finish = blank_finish raise EOFError def text(self, match, context, next_state): if not self.state_machine.at_eof(): self.blank_finish = self.state_machine.is_next_line_blank() raise EOFError class Text(RSTState): """ Classifier of second line of a text block. Could be a paragraph, a definition list item, or a title. """ patterns = {'underline': Body.patterns['line'], 'text': r''} initial_transitions = [('underline', 'Body'), ('text', 'Body')] def blank(self, match, context, next_state): """End of paragraph.""" paragraph, literalnext = self.paragraph( context, self.state_machine.abs_line_number() - 1) self.parent += paragraph if literalnext: self.parent += self.literal_block() return [], 'Body', [] def eof(self, context): if context: self.blank(None, context, None) return [] def indent(self, match, context, next_state): """Definition list item.""" definitionlist = nodes.definition_list() definitionlistitem, blank_finish = self.definition_list_item(context) definitionlist += definitionlistitem self.parent += definitionlist offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=definitionlist, initial_state='DefinitionList', blank_finish=blank_finish, blank_finish_state='Definition') self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning('Definition list') return [], 'Body', [] def underline(self, match, context, next_state): """Section title.""" lineno = self.state_machine.abs_line_number() title = context[0].rstrip() underline = match.string.rstrip() source = title + '\n' + underline messages = [] if column_width(title) > len(underline): if len(underline) < 4: if self.state_machine.match_titles: msg = self.reporter.info( 'Possible title underline, too short for the title.\n' "Treating it as ordinary text because it's so short.", line=lineno) self.parent += msg raise statemachine.TransitionCorrection('text') else: blocktext = context[0] + '\n' + self.state_machine.line msg = self.reporter.warning( 'Title underline too short.', nodes.literal_block(blocktext, blocktext), line=lineno) messages.append(msg) if not self.state_machine.match_titles: blocktext = context[0] + '\n' + self.state_machine.line msg = self.reporter.severe( 'Unexpected section title.', nodes.literal_block(blocktext, blocktext), line=lineno) self.parent += messages self.parent += msg return [], next_state, [] style = underline[0] context[:] = [] self.section(title, source, style, lineno - 1, messages) return [], next_state, [] def text(self, match, context, next_state): """Paragraph.""" startline = self.state_machine.abs_line_number() - 1 msg = None try: block = self.state_machine.get_text_block(flush_left=1) except statemachine.UnexpectedIndentationError, instance: block, source, lineno = instance.args msg = self.reporter.error('Unexpected indentation.', source=source, line=lineno) lines = context + list(block) paragraph, literalnext = self.paragraph(lines, startline) self.parent += paragraph self.parent += msg if literalnext: try: self.state_machine.next_line() except EOFError: pass self.parent += self.literal_block() return [], next_state, [] def literal_block(self): """Return a list of nodes.""" indented, indent, offset, blank_finish = \ self.state_machine.get_indented() while indented and not indented[-1].strip(): indented.trim_end() if not indented: return self.quoted_literal_block() data = '\n'.join(indented) literal_block = nodes.literal_block(data, data) literal_block.line = offset + 1 nodelist = [literal_block] if not blank_finish: nodelist.append(self.unindent_warning('Literal block')) return nodelist def quoted_literal_block(self): abs_line_offset = self.state_machine.abs_line_offset() offset = self.state_machine.line_offset parent_node = nodes.Element() new_abs_offset = self.nested_parse( self.state_machine.input_lines[offset:], input_offset=abs_line_offset, node=parent_node, match_titles=0, state_machine_kwargs={'state_classes': (QuotedLiteralBlock,), 'initial_state': 'QuotedLiteralBlock'}) self.goto_line(new_abs_offset) return parent_node.children def definition_list_item(self, termline): indented, indent, line_offset, blank_finish = \ self.state_machine.get_indented() definitionlistitem = nodes.definition_list_item( '\n'.join(termline + list(indented))) lineno = self.state_machine.abs_line_number() - 1 definitionlistitem.line = lineno termlist, messages = self.term(termline, lineno) definitionlistitem += termlist definition = nodes.definition('', *messages) definitionlistitem += definition if termline[0][-2:] == '::': definition += self.reporter.info( 'Blank line missing before literal block (after the "::")? ' 'Interpreted as a definition list item.', line=line_offset+1) self.nested_parse(indented, input_offset=line_offset, node=definition) return definitionlistitem, blank_finish classifier_delimiter = re.compile(' +: +') def term(self, lines, lineno): """Return a definition_list's term and optional classifiers.""" assert len(lines) == 1 text_nodes, messages = self.inline_text(lines[0], lineno) term_node = nodes.term() node_list = [term_node] for i in range(len(text_nodes)): node = text_nodes[i] if isinstance(node, nodes.Text): parts = self.classifier_delimiter.split(node.rawsource) if len(parts) == 1: node_list[-1] += node else: node_list[-1] += nodes.Text(parts[0].rstrip()) for part in parts[1:]: classifier_node = nodes.classifier('', part) node_list.append(classifier_node) else: node_list[-1] += node return node_list, messages class SpecializedText(Text): """ Superclass for second and subsequent lines of Text-variants. All transition methods are disabled. Override individual methods in subclasses to re-enable. """ def eof(self, context): """Incomplete construct.""" return [] def invalid_input(self, match=None, context=None, next_state=None): """Not a compound element member. Abort this state machine.""" raise EOFError blank = invalid_input indent = invalid_input underline = invalid_input text = invalid_input class Definition(SpecializedText): """Second line of potential definition_list_item.""" def eof(self, context): """Not a definition.""" self.state_machine.previous_line(2) # so parent SM can reassess return [] def indent(self, match, context, next_state): """Definition list item.""" definitionlistitem, blank_finish = self.definition_list_item(context) self.parent += definitionlistitem self.blank_finish = blank_finish return [], 'DefinitionList', [] class Line(SpecializedText): """ Second line of over- & underlined section title or transition marker. """ eofcheck = 1 # @@@ ??? """Set to 0 while parsing sections, so that we don't catch the EOF.""" def eof(self, context): """Transition marker at end of section or document.""" marker = context[0].strip() if self.memo.section_bubble_up_kludge: self.memo.section_bubble_up_kludge = 0 elif len(marker) < 4: self.state_correction(context) if self.eofcheck: # ignore EOFError with sections lineno = self.state_machine.abs_line_number() - 1 transition = nodes.transition(rawsource=context[0]) transition.line = lineno self.parent += transition self.eofcheck = 1 return [] def blank(self, match, context, next_state): """Transition marker.""" lineno = self.state_machine.abs_line_number() - 1 marker = context[0].strip() if len(marker) < 4: self.state_correction(context) transition = nodes.transition(rawsource=marker) transition.line = lineno self.parent += transition return [], 'Body', [] def text(self, match, context, next_state): """Potential over- & underlined title.""" lineno = self.state_machine.abs_line_number() - 1 overline = context[0] title = match.string underline = '' try: underline = self.state_machine.next_line() except EOFError: blocktext = overline + '\n' + title if len(overline.rstrip()) < 4: self.short_overline(context, blocktext, lineno, 2) else: msg = self.reporter.severe( 'Incomplete section title.', nodes.literal_block(blocktext, blocktext), line=lineno) self.parent += msg return [], 'Body', [] source = '%s\n%s\n%s' % (overline, title, underline) overline = overline.rstrip() underline = underline.rstrip() if not self.transitions['underline'][0].match(underline): blocktext = overline + '\n' + title + '\n' + underline if len(overline.rstrip()) < 4: self.short_overline(context, blocktext, lineno, 2) else: msg = self.reporter.severe( 'Missing matching underline for section title overline.', nodes.literal_block(source, source), line=lineno) self.parent += msg return [], 'Body', [] elif overline != underline: blocktext = overline + '\n' + title + '\n' + underline if len(overline.rstrip()) < 4: self.short_overline(context, blocktext, lineno, 2) else: msg = self.reporter.severe( 'Title overline & underline mismatch.', nodes.literal_block(source, source), line=lineno) self.parent += msg return [], 'Body', [] title = title.rstrip() messages = [] if column_width(title) > len(overline): blocktext = overline + '\n' + title + '\n' + underline if len(overline.rstrip()) < 4: self.short_overline(context, blocktext, lineno, 2) else: msg = self.reporter.warning( 'Title overline too short.', nodes.literal_block(source, source), line=lineno) messages.append(msg) style = (overline[0], underline[0]) self.eofcheck = 0 # @@@ not sure this is correct self.section(title.lstrip(), source, style, lineno + 1, messages) self.eofcheck = 1 return [], 'Body', [] indent = text # indented title def underline(self, match, context, next_state): overline = context[0] blocktext = overline + '\n' + self.state_machine.line lineno = self.state_machine.abs_line_number() - 1 if len(overline.rstrip()) < 4: self.short_overline(context, blocktext, lineno, 1) msg = self.reporter.error( 'Invalid section title or transition marker.', nodes.literal_block(blocktext, blocktext), line=lineno) self.parent += msg return [], 'Body', [] def short_overline(self, context, blocktext, lineno, lines=1): msg = self.reporter.info( 'Possible incomplete section title.\nTreating the overline as ' "ordinary text because it's so short.", line=lineno) self.parent += msg self.state_correction(context, lines) def state_correction(self, context, lines=1): self.state_machine.previous_line(lines) context[:] = [] raise statemachine.StateCorrection('Body', 'text') class QuotedLiteralBlock(RSTState): """ Nested parse handler for quoted (unindented) literal blocks. Special-purpose. Not for inclusion in `state_classes`. """ patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats, 'text': r''} initial_transitions = ('initial_quoted', 'text') def __init__(self, state_machine, debug=0): RSTState.__init__(self, state_machine, debug) self.messages = [] self.initial_lineno = None def blank(self, match, context, next_state): if context: raise EOFError else: return context, next_state, [] def eof(self, context): if context: text = '\n'.join(context) literal_block = nodes.literal_block(text, text) literal_block.line = self.initial_lineno self.parent += literal_block else: self.parent += self.reporter.warning( 'Literal block expected; none found.', line=self.state_machine.abs_line_number()) self.state_machine.previous_line() self.parent += self.messages return [] def indent(self, match, context, next_state): assert context, ('QuotedLiteralBlock.indent: context should not ' 'be empty!') self.messages.append( self.reporter.error('Unexpected indentation.', line=self.state_machine.abs_line_number())) self.state_machine.previous_line() raise EOFError def initial_quoted(self, match, context, next_state): """Match arbitrary quote character on the first line only.""" self.remove_transition('initial_quoted') quote = match.string[0] pattern = re.compile(re.escape(quote)) # New transition matches consistent quotes only: self.add_transition('quoted', (pattern, self.quoted, self.__class__.__name__)) self.initial_lineno = self.state_machine.abs_line_number() return [match.string], next_state, [] def quoted(self, match, context, next_state): """Match consistent quotes on subsequent lines.""" context.append(match.string) return context, next_state, [] def text(self, match, context, next_state): if context: self.messages.append( self.reporter.error('Inconsistent literal block quoting.', line=self.state_machine.abs_line_number())) self.state_machine.previous_line() raise EOFError state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList, OptionList, LineBlock, ExtensionOptions, Explicit, Text, Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List) """Standard set of State classes used to start `RSTStateMachine`."""
unknown
codeparrot/codeparrot-clean
# This file is part of VoltDB. # Copyright (C) 2008-2014 VoltDB Inc. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # All the commands supported by the Voter application. import os @VOLT.Command(description = 'Build the Voter application and catalog.', options = VOLT.BooleanOption('-C', '--conditional', 'conditional', 'only build when the catalog file is missing')) def build(runner): if not runner.opts.conditional or not os.path.exists('voter.jar'): runner.java.compile('obj', 'src/voter/*.java', 'src/voter/procedures/*.java') runner.call('volt.compile', '-c', 'obj', '-o', 'voter.jar', 'ddl.sql') @VOLT.Command(description = 'Clean the Voter build output.') def clean(runner): runner.shell('rm', '-rfv', 'obj', 'debugoutput', 'voter.jar', 'voltdbroot') @VOLT.Server('create', description = 'Start the Voter VoltDB server.', command_arguments = 'voter.jar', classpath = 'obj') def server(runner): runner.call('build', '-C') runner.go() @VOLT.Java('voter.AsyncBenchmark', classpath = 'obj', description = 'Run the Voter asynchronous benchmark.') def async(runner): runner.call('build', '-C') runner.go() @VOLT.Java('voter.SyncBenchmark', classpath = 'obj', description = 'Run the Voter synchronous benchmark.') def sync(runner): runner.call('build', '-C') runner.go() @VOLT.Java('voter.JDBCBenchmark', classpath = 'obj', description = 'Run the Voter JDBC benchmark.') def jdbc(runner): runner.call('build', '-C') runner.go() @VOLT.Java('voter.SimpleBenchmark', classpath = 'obj', description = 'Run the Voter simple benchmark.') def simple(runner): runner.call('build', '-C') runner.go()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # # Copyright (c) 2016 Simply Measured # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # # This method will be used by the mock to replace requests.get """ Unit test for the Salesforce contrib package """ from luigi.contrib.salesforce import SalesforceAPI, QuerySalesforce from helpers import unittest import mock from luigi.mock import MockTarget from luigi.six import PY3 import re def mocked_requests_get(*args, **kwargs): class MockResponse: def __init__(self, body, status_code): self.body = body self.status_code = status_code @property def text(self): return self.body def raise_for_status(self): return None result_list = ( '<result-list xmlns="http://www.force.com/2009/06/asyncapi/dataload">' '<result>1234</result><result>1235</result><result>1236</result>' '</result-list>' ) return MockResponse(result_list, 200) # Keep open around so we can use it in the mock responses old__open = open def mocked_open(*args, **kwargs): if re.match("job_data", args[0]): return MockTarget(args[0]).open(args[1]) else: return old__open(*args) class TestSalesforceAPI(unittest.TestCase): # We patch 'requests.get' with our own method. The mock object is passed in to our test case method. @mock.patch('requests.get', side_effect=mocked_requests_get) def test_deprecated_results(self, mock_get): sf = SalesforceAPI('xx', 'xx', 'xx') result_id = sf.get_batch_results('job_id', 'batch_id') self.assertEqual('1234', result_id) @mock.patch('requests.get', side_effect=mocked_requests_get) def test_result_ids(self, mock_get): sf = SalesforceAPI('xx', 'xx', 'xx') result_ids = sf.get_batch_result_ids('job_id', 'batch_id') self.assertEqual(['1234', '1235', '1236'], result_ids) class TestQuerySalesforce(QuerySalesforce): def output(self): return MockTarget('job_data.csv') @property def object_name(self): return 'dual' @property def soql(self): return "SELECT * FROM %s" % self.object_name class TestSalesforceQuery(unittest.TestCase): patch_name = '__builtin__.open' if PY3: patch_name = 'builtins.open' @mock.patch(patch_name, side_effect=mocked_open) def setUp(self, mock_open): MockTarget.fs.clear() self.result_ids = ['a', 'b', 'c'] counter = 1 self.all_lines = "Lines\n" self.header = "Lines" for i, id in enumerate(self.result_ids): filename = "%s.%d" % ('job_data.csv', i) with MockTarget(filename).open('w') as f: line = "%d line\n%d line" % ((counter), (counter+1)) f.write(self.header + "\n" + line + "\n") self.all_lines += line+"\n" counter += 2 @mock.patch(patch_name, side_effect=mocked_open) def test_multi_csv_download(self, mock_open): qsf = TestQuerySalesforce() qsf.merge_batch_results(self.result_ids) self.assertEqual(MockTarget(qsf.output().path).open('r').read(), self.all_lines)
unknown
codeparrot/codeparrot-clean
// RUN: %check_clang_tidy %s altera-struct-pack-align %t -- -header-filter=.* // Struct needs both alignment and packing struct error { char a; double b; char c; }; // CHECK-MESSAGES: :[[@LINE-5]]:8: warning: accessing fields in struct 'error' is inefficient due to padding; only needs 10 bytes but is using 24 bytes [altera-struct-pack-align] // CHECK-MESSAGES: :[[@LINE-6]]:8: note: use "__attribute__((packed))" to reduce the amount of padding applied to struct 'error' // CHECK-MESSAGES: :[[@LINE-7]]:8: warning: accessing fields in struct 'error' is inefficient due to poor alignment; currently aligned to 8 bytes, but recommended alignment is 16 bytes [altera-struct-pack-align] // CHECK-MESSAGES: :[[@LINE-8]]:8: note: use "__attribute__((aligned(16)))" to align struct 'error' to 16 bytes // CHECK-FIXES: } __attribute__((packed)) __attribute__((aligned(16))); // Struct is explicitly packed, but needs alignment struct error_packed { char a; double b; char c; } __attribute__((packed)); // CHECK-MESSAGES: :[[@LINE-5]]:8: warning: accessing fields in struct 'error_packed' is inefficient due to poor alignment; currently aligned to 1 bytes, but recommended alignment is 16 bytes [altera-struct-pack-align] // CHECK-MESSAGES: :[[@LINE-6]]:8: note: use "__attribute__((aligned(16)))" to align struct 'error_packed' to 16 bytes // CHECK-FIXES: } __attribute__((aligned(16))) __attribute__((packed)); // Struct is properly packed, but needs alignment struct align_only { char a; char b; char c; char d; int e; double f; }; // CHECK-MESSAGES: :[[@LINE-8]]:8: warning: accessing fields in struct 'align_only' is inefficient due to poor alignment; currently aligned to 8 bytes, but recommended alignment is 16 bytes [altera-struct-pack-align] // CHECK-MESSAGES: :[[@LINE-9]]:8: note: use "__attribute__((aligned(16)))" to align struct 'align_only' to 16 bytes // CHECK-FIXES: } __attribute__((aligned(16))); // Struct is perfectly packed but wrongly aligned struct bad_align { char a; double b; char c; } __attribute__((packed)) __attribute__((aligned(8))); // CHECK-MESSAGES: :[[@LINE-5]]:8: warning: accessing fields in struct 'bad_align' is inefficient due to poor alignment; currently aligned to 8 bytes, but recommended alignment is 16 bytes [altera-struct-pack-align] // CHECK-MESSAGES: :[[@LINE-6]]:8: note: use "__attribute__((aligned(16)))" to align struct 'bad_align' to 16 bytes // CHECK-FIXES: } __attribute__((packed)) __attribute__((aligned(16))); struct bad_align2 { char a; double b; char c; } __attribute__((packed)) __attribute__((aligned(32))); // CHECK-MESSAGES: :[[@LINE-5]]:8: warning: accessing fields in struct 'bad_align2' is inefficient due to poor alignment; currently aligned to 32 bytes, but recommended alignment is 16 bytes [altera-struct-pack-align] // CHECK-MESSAGES: :[[@LINE-6]]:8: note: use "__attribute__((aligned(16)))" to align struct 'bad_align2' to 16 bytes // CHECK-FIXES: } __attribute__((packed)) __attribute__((aligned(16))); struct bad_align3 { char a; double b; char c; } __attribute__((packed)) __attribute__((aligned(4))); // CHECK-MESSAGES: :[[@LINE-5]]:8: warning: accessing fields in struct 'bad_align3' is inefficient due to poor alignment; currently aligned to 4 bytes, but recommended alignment is 16 bytes [altera-struct-pack-align] // CHECK-MESSAGES: :[[@LINE-6]]:8: note: use "__attribute__((aligned(16)))" to align struct 'bad_align3' to 16 bytes // CHECK-FIXES: } __attribute__((packed)) __attribute__((aligned(16))); // Struct is both perfectly packed and aligned struct success { char a; double b; char c; } __attribute__((packed)) __attribute__((aligned(16))); //Should take 10 bytes and be aligned to 16 bytes // Struct is properly packed, and explicitly aligned struct success2 { int a; int b; int c; } __attribute__((aligned(16))); // If struct is properly aligned, packing not needed struct success3 { char a; double b; char c; } __attribute__((aligned(16))); // If struct is templated, warnings should not be triggered template <typename A, typename B> struct success4 { A a; B b; int c; }; // Warnings should not trigger on struct instantiations void no_trigger_on_instantiation() { struct bad_align3 instantiated { 'a', 0.001, 'b' }; } // Make sure that we don't recommend aligning an empty struct to zero bytes (PR#51620) struct StructWithNoFields {}; struct ContainsStructWithNoFields { StructWithNoFields s; }; // Make sure that an empty struct is treated like "char" for padding and alignment purposes struct ContainsStructWithNoFields2 { StructWithNoFields s; double d; StructWithNoFields t; }; // CHECK-MESSAGES: :[[@LINE-5]]:8: warning: accessing fields in struct 'ContainsStructWithNoFields2' is inefficient due to padding; only needs 10 bytes but is using 24 bytes [altera-struct-pack-align] // CHECK-MESSAGES: :[[@LINE-6]]:8: note: use "__attribute__((packed))" to reduce the amount of padding applied to struct 'ContainsStructWithNoFields2' // CHECK-MESSAGES: :[[@LINE-7]]:8: warning: accessing fields in struct 'ContainsStructWithNoFields2' is inefficient due to poor alignment; currently aligned to 8 bytes, but recommended alignment is 16 bytes [altera-struct-pack-align] // CHECK-MESSAGES: :[[@LINE-8]]:8: note: use "__attribute__((aligned(16)))" to align struct 'ContainsStructWithNoFields2' to 16 bytes // CHECK-FIXES: } __attribute__((packed)) __attribute__((aligned(16)));
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/test/clang-tidy/checkers/altera/struct-pack-align.cpp
#!/usr/bin/env python '''Test that mouse cursor can be set to a platform-dependent image. Expected behaviour: One window will be opened. Press the left and right arrow keys to cycle through the system mouse cursors. The current cursor selected will be printed to the terminal. Note that not all cursors are unique on each platform; for example, if a platform doesn't define a cursor for a given name, a suitable replacement (e.g., a plain arrow) will be used instead. Close the window or press ESC to end the test. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: WINDOW_SET_MOUSE_VISIBLE.py 703 2007-02-28 14:18:00Z Alex.Holkner $' import unittest from pyglet import window from pyglet.window import key from pyglet.gl import * class WINDOW_SET_MOUSE_PLATFORM_CURSOR(unittest.TestCase): i = 0 def on_key_press(self, symbol, modifiers): names = [ self.w.CURSOR_DEFAULT, self.w.CURSOR_CROSSHAIR, self.w.CURSOR_HAND, self.w.CURSOR_HELP, self.w.CURSOR_NO, self.w.CURSOR_SIZE, self.w.CURSOR_SIZE_UP, self.w.CURSOR_SIZE_UP_RIGHT, self.w.CURSOR_SIZE_RIGHT, self.w.CURSOR_SIZE_DOWN_RIGHT, self.w.CURSOR_SIZE_DOWN, self.w.CURSOR_SIZE_DOWN_LEFT, self.w.CURSOR_SIZE_LEFT, self.w.CURSOR_SIZE_UP_LEFT, self.w.CURSOR_SIZE_UP_DOWN, self.w.CURSOR_SIZE_LEFT_RIGHT, self.w.CURSOR_TEXT, self.w.CURSOR_WAIT, self.w.CURSOR_WAIT_ARROW, ] if symbol == key.ESCAPE: self.w.on_close() if symbol == key.RIGHT: self.i = (self.i + 1) % len(names) elif symbol == key.LEFT: self.i = (self.i - 1) % len(names) cursor = self.w.get_system_mouse_cursor(names[self.i]) self.w.set_mouse_cursor(cursor) print 'Set cursor to "%s"' % names[self.i] return True def test_set_visible(self): self.width, self.height = 200, 200 self.w = w = window.Window(self.width, self.height) w.push_handlers(self) while not w.has_exit: glClear(GL_COLOR_BUFFER_BIT) w.dispatch_events() w.flip() w.close() if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
import copy import os import sys from importlib import import_module from importlib.util import find_spec as importlib_find def cached_import(module_path, class_name): # Check whether module is loaded and fully initialized. if not ( (module := sys.modules.get(module_path)) and (spec := getattr(module, "__spec__", None)) and getattr(spec, "_initializing", False) is False ): module = import_module(module_path) return getattr(module, class_name) def import_string(dotted_path): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import failed. """ try: module_path, class_name = dotted_path.rsplit(".", 1) except ValueError as err: raise ImportError("%s doesn't look like a module path" % dotted_path) from err try: return cached_import(module_path, class_name) except AttributeError as err: raise ImportError( 'Module "%s" does not define a "%s" attribute/class' % (module_path, class_name) ) from err def autodiscover_modules(*args, **kwargs): """ Auto-discover INSTALLED_APPS modules and fail silently when not present. This forces an import on them to register any admin bits they may want. You may provide a register_to keyword parameter as a way to access a registry. This register_to object must have a _registry instance variable to access it. """ from django.apps import apps register_to = kwargs.get("register_to") for app_config in apps.get_app_configs(): for module_to_search in args: # Attempt to import the app's module. try: if register_to: before_import_registry = copy.copy(register_to._registry) import_module("%s.%s" % (app_config.name, module_to_search)) except Exception: # Reset the registry to the state before the last import # as this import will have to reoccur on the next request and # this could raise NotRegistered and AlreadyRegistered # exceptions (see #8245). if register_to: register_to._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have the module in question, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(app_config.module, module_to_search): raise def module_has_submodule(package, module_name): """See if 'module' is in 'package'.""" try: package_name = package.__name__ package_path = package.__path__ except AttributeError: # package isn't a package. return False full_module_name = package_name + "." + module_name try: return importlib_find(full_module_name, package_path) is not None except ModuleNotFoundError: # When module_name is an invalid dotted path, Python raises # ModuleNotFoundError. return False def module_dir(module): """ Find the name of the directory that contains a module, if possible. Raise ValueError otherwise, e.g. for namespace packages that are split over several directories. """ # Convert to list because __path__ may not support indexing. paths = list(getattr(module, "__path__", [])) if len(paths) == 1: return paths[0] else: filename = getattr(module, "__file__", None) if filename is not None: return os.path.dirname(filename) raise ValueError("Cannot determine directory containing %s" % module)
python
github
https://github.com/django/django
django/utils/module_loading.py
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * * SPDX-License-Identifier: curl * ***************************************************************************/ /* <DESC> * simple HTTP POST using the easy interface * </DESC> */ #include <stdio.h> #include <curl/curl.h> int main(void) { CURL *curl; CURLcode result; /* In Windows, this inits the Winsock stuff */ result = curl_global_init(CURL_GLOBAL_ALL); if(result != CURLE_OK) return (int)result; /* get a curl handle */ curl = curl_easy_init(); if(curl) { /* First set the URL that is about to receive our POST. This URL can just as well be an https:// URL if that is what should receive the data. */ curl_easy_setopt(curl, CURLOPT_URL, "http://postit.example.com/moo.cgi"); /* Now specify the POST data */ curl_easy_setopt(curl, CURLOPT_POSTFIELDS, "name=daniel&project=curl"); /* Perform the request, result gets the return code */ result = curl_easy_perform(curl); /* Check for errors */ if(result != CURLE_OK) fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(result)); /* always cleanup */ curl_easy_cleanup(curl); } curl_global_cleanup(); return (int)result; }
c
github
https://github.com/curl/curl
docs/examples/http-post.c
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: BUSL-1.1 //go:build !enterprise package vault import ( "context" "github.com/hashicorp/vault/sdk/physical" ) //go:generate go run github.com/hashicorp/vault/tools/stubmaker // isSealOldKeyError returns true if a value was decrypted using the // old "unwrapSeal". func isSealOldKeyError(err error) bool { return false } func startPartialSealRewrapping(c *Core) { // nothing to do } func GetPartiallySealWrappedPaths(ctx context.Context, backend physical.Backend) ([]string, error) { return nil, nil }
go
github
https://github.com/hashicorp/vault
vault/seal_stubs_oss.go
# -*- coding: utf-8 -*- # # Copyright (C) 2012, BMW AG # # This file is part of GENIVI Project AudioManager. # # Contributions are licensed to the GENIVI Alliance under one or more # Contribution License Agreements. # # \copyright # This Source Code Form is subject to the terms of the # Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with # this file, You can obtain one at http://mozilla.org/MPL/2.0/. # # # \author Christian Linke, christian.linke@bmw.de BMW 2012 # # For further information see http://www.genivi.org/. # import sys import traceback import gobject import math import dbus import dbus.service import dbus.mainloop.glib loop = gobject.MainLoop() dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) class NodeStateManager(dbus.service.Object): def __init__(self, loop): busName = dbus.service.BusName('org.genivi.NodeStateManager', bus = dbus.SessionBus()) dbus.service.Object.__init__(self, busName, '/org/genivi/NodeStateManager') self.properties = {'RestartReason': 1, 'ShutdownReason': 2, 'WakeUpReason' :3, 'BootMode' :4} self.ABus="" self.APath="" self.loop=loop @dbus.service.method(dbus_interface='org.freedesktop.DBus.Introspectable', out_signature = 's') def Introspect(self): f = open('org.genivi.NodeStateManager.Consumer.xml', "r") text = f.read() return text @dbus.service.method(dbus_interface='org.genivi.NodeStateManager.Consumer', out_signature = 'ii') def GetNodeState(self): NodeStateId=1 ErrorCode=1 print('[----------] send out nodeState' + str(NodeStateId) + ' ErrorCode '+ str(1)) return NodeStateId, ErrorCode @dbus.service.method('org.genivi.NodeStateManager.Consumer', out_signature = 'ii') def GetApplicationMode(self): ApplicationModeId=5 ErrorCode=2 print('[----------] send out ApplicationMode' + str(ApplicationModeId) + ' ErrorCode '+ str(2)) return ApplicationModeId, ErrorCode @dbus.service.method('org.genivi.NodeStateManager.Consumer', in_signature='si', out_signature = 'ii') def GetSessionState(self,SessionName,seatID): SessionState=0 ErrorCode=2 if SessionName=="mySession" and seatID==1: SessionState=5 ErrorCode=1 print('[----------] GetSessionState for session ' + SessionName + ' seatID '+ str(seatID) + ' returnState ' + str (SessionState)) return SessionState, ErrorCode @dbus.service.method('org.genivi.NodeStateManager.Consumer', in_signature='ssuu', out_signature = 'i') def RegisterShutdownClient(self,BName,ObjName,ShutdownMode,TimeoutMs): print('[----------] Busname: ' + BName) print('[----------] ObjName: ' + ObjName) print('[----------] ShutdownMode: ' + str(ShutdownMode)) print('[----------] TimeoutMs: ' + str(TimeoutMs)) ErrorCode=1 if TimeoutMs!=100: ErrorCode=3 if BName!="org.genivi.audiomanager": ErrorCode=4 if ShutdownMode!=1: ErrorCode=5 if ObjName!="/org/genivi/audiomanager/LifeCycleConsumer": ErrorCode=6 self.ABus=BName self.APath=ObjName return ErrorCode @dbus.service.method('org.genivi.NodeStateManager.Consumer', in_signature='ssu', out_signature = 'i') def UnRegisterShutdownClient(self,BusName,ObjName,ShutdownMode): print('[----------] Busname: ' + str(BusName)) print('[----------] ObjName: ' + str(ObjName)) print('[----------] ShutdownMode: ' + str(ShutdownMode)) ErrorCode=1 if BusName!=self.ABus: ErrorCode=2 if ObjName!=self.APath: ErrorCode=2 if ShutdownMode!=1: ErrorCode=2 return ErrorCode @dbus.service.method(dbus_interface='org.genivi.NodeStateManager.Consumer', out_signature = 'u') def GetInterfaceVersion(self): version=23 return version @dbus.service.method('org.genivi.NodeStateManager.Consumer', in_signature='ui', out_signature='i') def LifecycleRequestComplete(self,RequestID,Status): print('[----------] RequestId: ' + str(RequestID)) print('[----------] Status: ' + str(Status)) ErrorCode=1 if RequestID!=22: ErrorCode=2 if Status!=4: ErrorCode=2 return ErrorCode @dbus.service.method(dbus.PROPERTIES_IFACE, in_signature='ss', out_signature='v') def Get(self, interface, prop): if prop in self.properties: print('[----------] send out ' + str(self.properties[prop]) + ' for property '+ prop) return self.properties[prop] return 0 @dbus.service.method(dbus.PROPERTIES_IFACE, in_signature='ssv') def Set(self, interface, prop, value): return 3 @dbus.service.method(dbus.PROPERTIES_IFACE, in_signature='s', out_signature='a{sv}') def GetAll(self, interface): return self.properties @dbus.service.signal(dbus_interface='org.genivi.NodeStateManager.Consumer', signature='i') def NodeApplicationMode(self, ApplicationModeId): print "[----------] Send out application mode ID %d" % (ApplicationModeId) @dbus.service.signal(dbus_interface='org.genivi.NodeStateManager.Consumer', signature='i') def NodeState(self, NodeState): print "[----------] Send out NodeState %d" % (NodeState) @dbus.service.signal(dbus_interface='org.genivi.NodeStateManager.Consumer', signature='sii') def SessionStateChanged(self, SessionStateName,SeatID,SessionState): print "[----------] Send out SessionStateChanged " + SessionStateName @dbus.service.method('org.genivi.NodeStateManager.Control', in_signature='i') def sendNodeApplicationMode(self, input): self.NodeApplicationMode(input) return input @dbus.service.method('org.genivi.NodeStateManager.Control', in_signature='i') def sendNodeState(self, input): self.NodeState(input) return input @dbus.service.method('org.genivi.NodeStateManager.Control', in_signature='sii') def sendSessionState(self, SessionStateName,SeatID,SessionState): self.SessionStateChanged (SessionStateName,SeatID,SessionState) return SeatID @dbus.service.method('org.genivi.NodeStateManager.Control', in_signature='uu', out_signature='i') def sendLifeCycleRequest(self, request, requestID): bus = dbus.SessionBus() remote_object = bus.get_object(self.ABus,self.APath) iface = dbus.Interface(remote_object, 'org.genivi.NodeStateManager.LifeCycleConsumer') iface.LifecycleRequest(request,requestID) return 42 @dbus.service.method('org.genivi.NodeStateManager.Control') def finish(self): print '[----------] Going to exit now!' self.loop.quit() return 0 nsm = NodeStateManager(loop) loop.run()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python import quandl import jhtalib as jhta import matplotlib.pyplot as plt def main(): # quandl_data = quandl.get('BCHARTS/BITSTAMPUSD', start_date='2011-01-01', end_date='2018-11-01', order='asc', collapse='daily', returns='numpy', authtoken='YOUR_AUTH_TOKEN') quandl_data = quandl.get('BCHARTS/BITSTAMPUSD', start_date='2011-01-01', end_date='2018-11-01', order='asc', collapse='daily', returns='numpy') df = {'datetime': [], 'Open': [], 'High': [], 'Low': [], 'Close': [], 'Volume': []} i = 0 while i < len(quandl_data['Close']): df['datetime'].append(str(quandl_data['Date'][i])) df['Open'].append(float(quandl_data['Open'][i])) df['High'].append(float(quandl_data['High'][i])) df['Low'].append(float(quandl_data['Low'][i])) df['Close'].append(float(quandl_data['Close'][i])) df['Volume'].append(int(quandl_data['Volume (BTC)'][i])) i += 1 x = df['datetime'] plt.figure(1) plt.subplot(211) plt.title('Time / Price') plt.xlabel('Time') plt.ylabel('Price') plt.grid(True) plt.plot(x, df['Close'], color='blue') plt.plot(x, df['High'], color='grey') plt.plot(x, df['Low'], color='grey') plt.plot(x, jhta.ATH(df)['ath'], color='red') plt.plot(x, jhta.LMC(df)['lmc'], color='green') plt.legend(['Close', 'High', 'Low', 'ATH', 'LMC'], loc='upper left') plt.show() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- from __future__ import with_statement import os import re import traceback from module.plugins.internal.Addon import Addon, threaded from module.utils import save_join as fs_join class MergeFiles(Addon): __name__ = "MergeFiles" __type__ = "hook" __version__ = "0.16" __status__ = "testing" __config__ = [("activated", "bool", "Activated", True)] __description__ = """Merges parts splitted with hjsplit""" __license__ = "GPLv3" __authors__ = [("and9000", "me@has-no-mail.com")] BUFFER_SIZE = 4096 @threaded def package_finished(self, pack): files = {} fid_dict = {} for fid, data in pack.getChildren().items(): if re.search("\.\d{3}$", data['name']): if data['name'][:-4] not in files: files[data['name'][:-4]] = [] files[data['name'][:-4]].append(data['name']) files[data['name'][:-4]].sort() fid_dict[data['name']] = fid download_folder = self.pyload.config.get("general", "download_folder") if self.pyload.config.get("general", "folder_per_package"): download_folder = fs_join(download_folder, pack.folder) for name, file_list in files.items(): self.log_info(_("Starting merging of"), name) with open(fs_join(download_folder, name), "wb") as final_file: for splitted_file in file_list: self.log_debug("Merging part", splitted_file) pyfile = self.pyload.files.getFile(fid_dict[splitted_file]) pyfile.setStatus("processing") try: with open(fs_join(download_folder, splitted_file), "rb") as s_file: size_written = 0 s_file_size = int(os.path.getsize(os.path.join(download_folder, splitted_file))) while True: f_buffer = s_file.read(self.BUFFER_SIZE) if f_buffer: final_file.write(f_buffer) size_written += self.BUFFER_SIZE pyfile.setProgress((size_written * 100) / s_file_size) else: break self.log_debug("Finished merging part", splitted_file) except Exception, e: traceback.print_exc() finally: pyfile.setProgress(100) pyfile.setStatus("finished") pyfile.release() self.log_info(_("Finished merging of"), name)
unknown
codeparrot/codeparrot-clean
import unittest from cStringIO import StringIO from ..backends import static # There aren't many tests here because it turns out to be way more convenient to # use test_serializer for the majority of cases class TestStatic(unittest.TestCase): def compile(self, input_text, input_data): return static.compile(input_text, input_data) def test_get_0(self): data = """ key: value [Heading 1] other_key: if a == 1: value_1 if a == 2: value_2 value_3 """ manifest = self.compile(data, {"a": 2}) self.assertEquals(manifest.get("key"), "value") children = list(item for item in manifest.iterchildren()) self.assertEquals(len(children), 1) section = children[0] self.assertEquals(section.name, "Heading 1") self.assertEquals(section.get("other_key"), "value_2") self.assertEquals(section.get("key"), "value") def test_get_1(self): data = """ key: value [Heading 1] other_key: if a == 1: value_1 if a == 2: value_2 value_3 """ manifest = self.compile(data, {"a": 3}) children = list(item for item in manifest.iterchildren()) section = children[0] self.assertEquals(section.get("other_key"), "value_3") def test_get_3(self): data = """key: if a == "1": value_1 if a[0] == "ab"[0]: value_2 """ manifest = self.compile(data, {"a": "1"}) self.assertEquals(manifest.get("key"), "value_1") manifest = self.compile(data, {"a": "ac"}) self.assertEquals(manifest.get("key"), "value_2") def test_get_4(self): data = """key: if not a: value_1 value_2 """ manifest = self.compile(data, {"a": True}) self.assertEquals(manifest.get("key"), "value_2") manifest = self.compile(data, {"a": False}) self.assertEquals(manifest.get("key"), "value_1") def test_api(self): data = """key: if a == 1.5: value_1 value_2 key_1: other_value """ manifest = self.compile(data, {"a": 1.5}) self.assertFalse(manifest.is_empty) self.assertEquals(manifest.root, manifest) self.assertTrue(manifest.has_key("key_1")) self.assertFalse(manifest.has_key("key_2")) self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"])) self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"])) def test_is_empty_1(self): data = """ [Section] [Subsection] """ manifest = self.compile(data, {}) self.assertTrue(manifest.is_empty)
unknown
codeparrot/codeparrot-clean
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authorization // +k8s:conversion-gen-external-types=k8s.io/api/authorization/v1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=k8s.io/api/authorization/v1 // +groupName=authorization.k8s.io package v1
go
github
https://github.com/kubernetes/kubernetes
pkg/apis/authorization/v1/doc.go
- Feature Name: Copysets - Status: draft - Start Date: 2018-12-04 - Authors: Vijay Karthik, Mohammed Hassan - RFC PR: (PR # after acceptance of initial draft) - Cockroach Issue: [#25194](https://github.com/cockroachdb/cockroach/issues/25194) # Table of Contents - [Table of Contents](#table-of-contents) - [Summary](#summary) - [Motivation](#motivation) - [Guide level explanation](#guide-level-explanation) - [Design](#design) - [Managing copysets](#managing-copysets) - [Rebalancing ranges](#rebalancing-ranges) - [Copyset score](#copyset-score) - [Drawbacks](#drawbacks) - [Rationale and Alternatives](#rationale-and-alternatives) - [Copyset allocation to minimize data movement](#copyset-allocation-to-minimize-data-movement) - [Chainsets](#chainsets) - [Testing scenarios](#testing-scenarios) # Summary Copysets reduce the probability of data loss in the presence of multi node failures in large clusters. This RFC will present a design for integrating copysets in cockroach and discuss its tradeoffs. Copysets have earlier been discussed in [RFC #6484](https://github.com/cockroachdb/cockroach/pull/6484). More details on copysets can be seen in the [academic literature](https://web.stanford.edu/~skatti/pubs/usenix13-copysets.pdf). # Motivation In large clusters simultaneous loss of multiple nodes have a very high probability of data loss. For example, consider a cluster of 100 nodes using a replication factor of 3 having ~10k ranges. The simultaneous loss of 2 or more nodes has a very high probability of data loss since there could be a range out of the 10k ranges which has 2 out of its 3 replicas on the 2 lost nodes. This probability can be reduced by adding locality to nodes since cockroach supports failures of all nodes in a locality, but the loss of two nodes in different localities again has a high probability of data loss. Copysets significantly reduces the probability of data loss in the presence of multi node failures. # Guide-level explanation Copysets divides the cluster into disjoint sets of nodes. The size of each set will be based on the used replication factors. Separate copysets are created for each replication factor. A range should prefer to allocate its replicas within a copyset rather than spread its replicas across copysets. So there are two major components 1. Managing copysets (which node belongs to which copyset) Copyset assignments should take into account locality of nodes so that locality fault tolerance is not lost. Addition / Removal / Crashed nodes should be taken into account when assigning nodes to copysets. 2. Rebalancing all replicas of a range to reside within a single copyset on a best effort basis. Rebalancing replicas into copysets is important, but some properties like constraints set by a user should take priority over copysets. Copysets will initially be an opt-in feature (based on a cluster setting) and implemented for a scatter width of `replication_factor - 1` (eventually it will be extended to support higher scatter width). **For simplicity, we will explain the design without considering scatter width in copysets.** ## Design ### Managing copysets The cluster will be divided into copysets. For each replication factor in the cluster, separate copysets will be generated. The requirements for copysets of a replication factor are 1. There should be no overlap of nodes between copysets for scatter width of rf -1 and minimize overlapping nodes for scatter width >= rf (where rf is the replication factor). 2. Copysets should be locality fault tolerant (each node in a copyset should preferably be from a different locality) 3. Copysets should rebalance on node additions / removal / failures. Copysets are generated for each replication factor used in the system. Better failure tolerance can be provided if copysets for different replication factors are aligned, but this is not the case in the presented strategies. Two possible strategies for copyset allocation is presented below. #### Optimal diversity copyset allocation Optimal allocation (for locality diversity) of copysets for a particular replication factor can be done as follows: ``` 1. Compute num_copysets = floor(num_stores/replication_factor) 2. Sort stores based on increasing order of locality. 3. Assign copysets to stores in a round robin fashion. ``` For example, consider the case where we have stores as follows: ``` Locality1: S1 S2 S3 Locality2: S4 S5 S6 Locality3: S7 S8 S9 S10 ``` Copysets for RF 3 would be created as ``` num_copysets = 10/3 = 3 CS1: S1 S4 S7 S10 CS2: S2 S5 S8 CS3: S3 S6 S9 ``` #### Minimize data movement copyset allocation In this strategy the goal is to minimize data movement when copysets are regenerated with a different store list (some stores added, some stores removed). This allocation tries to create a copyset-store mapping (with incremental changes over previously used copysets) which is diverse in locality. It tries to minimize the number of changes to previously used copysets and ensure that each store in a copyset belongs to a different locality when possible. The allocation 1. Computes the required number of copysets for the new store list. 2. Assign previously existing stores to the same copyset id they belonged to (if copyset id exists based on 1) if copyset size < replication factor 3. Adds the newly added stores (not present in previous copyset allocation) and remaining stores from (2) to empty spots in each copyset (if the copyset has < replication factor stores or if it is the last copyset). after assigning previously existing stores which have carried over). 4. Swaps stores between copysets to avoid duplicate localities in a single copyset till it converges (diversity cannot be improved further). #### Swaps Swaps are made between a source copyset and a target copyset which guarantee that the diversity of the source copyset increases while the diversity of the target copyset does decrease (or if it decreases it still is > replication factor). Store swaps are made between a source copyset and a target copyset based on the localities present in the source and target copyset. The conditions required for a swap are: 1. The source copyset has diversity < replication factor. This means that the source copyset has two stores in a particular locality. One of these stores will be a source swap candidate. 2. The target copyset has a locality not present in the source copyset (let's call this target locality). A store from this locality will be a target swap candidate. 3. One of the following is true 1. Locality of the source swap candidate is not present in the target copyset. 2. Target copyset either 1. Has two stores in the target locality. 2. Has diversity > replication factor. By diversity above we mean the number of localities in a copyset. Point (3) above ensures that diversity of the target copyset does not decrease (or if it decreases it does not fall below replication factor). A single iteration doing swaps considers all `(n choose 2)` copyset combinations where `n` is the number of copysets. These iterations continue till sum of diversity of all copysets cannot be improved further (no swap are candidates found for a whole iteration). For example, consider the case where we have stores as follows: ``` Locality1: S1 S2 S3 Locality2: S4 S5 S6 Locality3: S7 S8 S9 Locality4: S10 S11 S12 S13 ``` And initial copyset allocation as ``` CS1: S1 S5 S9 CS2: S2 S6 S10 CS3: S3 S7 S11 CS4: S4 S8 S12 S13 ``` Say store `S6` is removed. After step 2 (assign stores to same copyset ID till size reaches rf), we have ``` CS1: S1 S5 S9 CS2: S2 S10 CS3: S3 S7 S11 CS4: S4 S8 S12 ``` After filling empty spots by adding remaining stores (`S13` in this case) ``` CS1: S1 S5 S9 CS2: S2 S10 S13 CS3: S3 S7 S11 CS4: S4 S8 S12 ``` After swaps (between `CS1` and `CS2` since CS2 has 2 stores from `Locality4`) ``` CS1: S1 S5 S13 CS2: S2 S10 S9 CS3: S3 S7 S11 CS4: S4 S8 S12 ``` This strategy may not achieve optimal possible diversity but tries to ensure that each locality within a copyset is different. #### Copyset re-generation The store list considered for copyset allocation would be the current live stores. The way live stores are computed will be the same as the way allocator detects live stores (but throttled stores will not be excluded.) Copysets will be re-generated if the store list has been stable and not changed for 3 ticks (each tick has a 10s interval). Copyset allocation can be persisted as a proto in the distributed KV layer. The copysets strategy which minimizes data movement requires copysets to be persisted (it requires the previous state to be global and survive restarts). The lowest live node ID in the cluster would be managing (persisting) copysets. Other nodes will be periodically (every 10s) cache the persisted copysets and using it for re-balancing. Copysets will only be re-generated (and persisted) if the store list changes. In steady state all nodes will be periodically reading the persisted copysets and there will be no need to re-generate and persist new copysets. The cluster can tolerate failure of one node within each copyset for RF=3. For example a 100 node cluster can tolerate the simultaneous failure of 33 nodes in the best case (for RF=3) without suffering any data loss. ## Rebalancing ranges Ranges need to be rebalanced to be contained within a copyset. There are two range re-balancers currently being used in cockroach: 1. Replicate queue 2. Store rebalancer This RFC will explain the implementation for copyset rebalancing for the replicate queue which processes one replica at a time. Replica rebalancing by the store rebalancer will be disabled if copysets is enabled (at least for the initial version). The store rebalancer can still perform lease holder rebalancing. The allocator uses a scoring function to 1. Decide which store to use for a new replica for a range 2. Which replica to remove when a range has more than required replicas 3. Whether a replica has to move from one store to another where the resultant score for the range will he higher. The scoring function considers the following (given in order of priority) 1. Zone constraints (which are constraints on having certain tables in certain zones) 2. Disk fullness: checks whether the source or target is too full. 3. Diversity score difference: Diversity score is proportional to the number of different localities the range has a replica in. It looks at nC2 diversity score based on their localities where n is the number of replicas. 4. Convergence score difference: Convergence score is used to avoid moving ranges whose movement will cause the stats (range count) of a range to move away from the global mean. 5. Balance score difference: Balance score is the normalized utilization of a node. It currently considers number of ranges. Nodes with a low balance score are preferred. 6. Range count difference: Stores with a low range count are preferred. ### Copyset score For rebalancing ranges into copysets, a new "copyset score" will be added to the allocator. Priority wise it will be between (2) and (3) above. Zone constraints and disk fullness take a higher priority over copyset score. Since copyset allocation considers diversity, it's priority can be placed above diversity score. If copysets are disabled in the cluster, this score will have no impact in rebalancing. Copyset score (higher score is better) of a range is high if: 1. A range is completely contained within a copyset. 2. The copysets the range is in are under-utilized. We want each copyset to be equally loaded. If a range is completely contained in a copyset `x` we should move the range completely to a copyset `y` if the nodes in copyset `y` have a **significantly** lower load (for example nodes in `y` have a lot more free disk space). So the following replica transition for a range of RF 3 should be allowed in case (2): `x x x -> x x y -> x y y -> y y y` where `x x x` means that the 3 replicas of the range are in copyset `x`. Let's say `r` is the replication factor of a range. Each of its replicas belongs to a node with a particular copyset id. We can formally define the scores as: 1. Homogeneity score: `Number of pairwise same copyset id / (r choose 2)` 2. Idle score: This score is proportional to how "idle" a store is. For starters we can consider this to be % disk free (Available Capacity / Total Capacity of the store). We want ranges to migrate to copysets with significantly lower load. 1. The idle score of a store is proportional to the idleness of a store, like % disk free on the store. 2. The idle score of a copyset is the lowest idle score of the stores in the copyset. 3. The idle score of a range is the weighted average idle score of the copysets of the stores a range is present in. A range can be a part of multiple copysets when it is in flux (examples given below). Copyset score can be defined as `(k * homogeneity_score + idle_score) / (k + 1)`. It is normalized and lies between 0 and 1. #### Computation of k Let's say we want to migrate a range from a copyset `x` to a copyset `y` if the idle score of `y` differs by more than `d` (configurable). If `d` is too small, it could lead to thrashing of replicas, so we can use a value like 15%. Though the below calculations may seem a bit complex, to the end user we can just expose `d`, which is easy to understand - the max difference between idle scores of two copysets in the cluster. For example, if idle score of `x` is `a` and `y` is `a + d`, we require: ``` copyset_score(x x x) < copyset_score(x x y) k * homogeneityScore(x x x) + idleScore(x x x) < k * homogeneityScore(x x y) + idleScore(x x y) # Generalizing for replication factor r where r = 3 below homogeneityScore(x x x) = 1 idleScore(x x x) = ra/r = a homogeneityScore(x x y) = (r-1 choose 2) / (r choose 2) # since 1 copyset is different. idleScore(x x y) = ((r-1) * a + a + d)/r = (ra + d) / r # So we get k * 1 + a <= k * (r-1 choose 2) / (r choose 2) + (ra + d) / r => k <= d / 2 ``` For example, for `r = 3`, `d = 0.15`, and idle score of x being `0.2` and idle score of y being `0.36` ``` totalScore(x x x) = 0.075 * 1 + 0.2 = 0.275 totalScore(x x y) = 0.075 * 0.33 + (0.2 + 0.2 + 0.36)/3 = 0.278 ``` So a range will migrate from ``` (x x x) -> (x x y) -> (x y y) -> (y y y) ``` The above migration will not happen if `y` has an idle score of `0.34` (since `d = 0.15`). The first step `(x x x) -> (x x y)` is the hardest as homogeneity is broken. The proof for this is given above. For `(x x y) -> (x y y)` step, the homogeneity score remains the same, and idle score improves (since y has a better idle score). For `(x y y) -> (y y y)` step, both the homogeneity score and idle score improve. When a range actually migrates from `(x x x)` to `(x x y)`, it goes through an intermediate step `(x x x y)` after which one `x` is removed, but similar math applies. This scoring function will allow ranges to organically move into copysets and try to maintain approximately equal load among copysets. Thrashing will be avoided by choosing an appropriate value of `d`. ## Drawbacks 1. Copysets increase recovery time since only nodes within the copyset of a crashed node can up-replicate data. This can be mitigated by choosing a higher scatter width (description of scatter width is given in the [academic literature](https://web.stanford.edu/~skatti/pubs/usenix13-copysets.pdf)). 2. Zone constraints will not be supported in the initial version of copysets. Copyset allocation can later be tweaked to respect zone constraints. 3. Heterogeneous clusters. Copysets will work in heterogeneous clusters but each copyset will be limited by the weakest node in the copyset (since idle score of a copyset is the lowest node idle score). This may be something we can live with. 4. Doesn't play well with the store rebalancer. For the first cut store based replica rebalancing will be disabled with copysets enabled. A similar logic can be incorporated into the store rebalancer at a later point. Due to the above drawbacks, copysets will be disabled by default and there will be a cluster setting where users can enable copysets if they are ok with the above drawbacks. ## Rationale and Alternatives There can be multiple approaches for both copyset allocation and the scoring function. This design in this RFC is something simple and the respective algorithms can be tweaked independently later. ### Chainsets [Chainsets](http://hackingdistributed.com/2014/02/14/chainsets/) is one way to make incremental changes to copysets, but again potentially at the cost of reduced locality diversity. The length of the chain used in chainsets could be considered equivalent to replication factor in cockroach. ## Testing scenarios Apart from unit tests, roachtests can be added which verify copyset based rebalancing in the presence of 1. Node addition / removal 2. Node crashes (up to 1/3rd of the cluster) 3. Change of replication factors 4. Locality fault tolerance 5. Changes of constraints
unknown
github
https://github.com/cockroachdb/cockroach
docs/RFCS/20181204_copysets.md
'use strict'; const common = require('../common.js'); const bench = common.createBenchmark(main, { dur: [5], securing: ['TLSSocket', 'clear'], size: [100, 1024, 1024 * 1024], }, { flags: ['--no-warnings'], }); const fixtures = require('../../test/common/fixtures'); const tls = require('tls'); const net = require('net'); const REDIRECT_PORT = 28347; function main({ dur, size, securing }) { const chunk = Buffer.alloc(size, 'b'); const options = { key: fixtures.readKey('rsa_private.pem'), cert: fixtures.readKey('rsa_cert.crt'), ca: fixtures.readKey('rsa_ca.crt'), ciphers: 'AES256-GCM-SHA384', isServer: true, requestCert: true, rejectUnauthorized: true, maxVersion: 'TLSv1.2', }; const server = net.createServer(onRedirectConnection); server.listen(REDIRECT_PORT, () => { const proxy = net.createServer(onProxyConnection); proxy.listen(common.PORT, () => { const clientOptions = { port: common.PORT, ca: options.ca, key: options.key, cert: options.cert, isServer: false, rejectUnauthorized: false, maxVersion: options.maxVersion, }; const network = securing === 'clear' ? net : tls; const conn = network.connect(clientOptions, () => { setTimeout(() => { const mbits = (received * 8) / (1024 * 1024); bench.end(mbits); if (conn) conn.destroy(); server.close(); proxy.close(); }, dur * 1000); bench.start(); conn.on('drain', write); write(); }); conn.on('error', (e) => { throw new Error(`Client error: ${e}`); }); function write() { while (false !== conn.write(chunk)); } }); }); function onProxyConnection(conn) { const client = net.connect(REDIRECT_PORT, () => { switch (securing) { case 'TLSSocket': secureTLSSocket(conn, client); break; case 'clear': conn.pipe(client); break; default: throw new Error('Invalid securing method'); } }); } function secureTLSSocket(conn, client) { const serverSocket = new tls.TLSSocket(conn, options); serverSocket.on('error', (e) => { throw new Error(`Socket error: ${e}`); }); serverSocket.pipe(client); } let received = 0; function onRedirectConnection(conn) { conn.on('data', (chunk) => { received += chunk.length; }); } }
javascript
github
https://github.com/nodejs/node
benchmark/tls/secure-pair.js
import myhdl from myhdl import * from myhdl import Signal from myhdl._always_seq import AlwaysSeqError, _error, always_seq from helpers import raises_kind def test_clock(): """ check the edge parameter """ # should fail without a valid Signal clock = Signal(bool(0)) reset = ResetSignal(0, active=0, isasync=True) with raises_kind(AlwaysSeqError, _error.EdgeType): @always_seq(clock, reset=reset) def logic1(): pass # should work with a valid Signal clock = Signal(bool(0)) try: @always_seq(clock.posedge, reset=reset) def logic2(): pass except: assert False def test_reset(): """ check the reset parameter """ # should fail without a valid ResetSignal clock = Signal(bool(0)) reset = Signal(bool(0)) with raises_kind(AlwaysSeqError, _error.ResetType): @always_seq(clock.posedge, reset=reset) def logic(): pass # should work with a valid Signal reset = ResetSignal(0, active=0, isasync=True) try: @always_seq(clock.posedge, reset=reset) def logic2(): pass except: assert False
unknown
codeparrot/codeparrot-clean
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations import pytest from airflow.assets.evaluation import AssetEvaluator from airflow.serialization.definitions.assets import ( SerializedAsset, SerializedAssetAlias, SerializedAssetAll, SerializedAssetAny, SerializedAssetUniqueKey, ) pytestmark = pytest.mark.db_test asset1 = SerializedAsset("asset-1", "s3://bucket1/data1", "asset", {}, []) asset2 = SerializedAsset("asset-2", "s3://bucket2/data2", "asset", {}, []) @pytest.fixture def evaluator(session): return AssetEvaluator(session) @pytest.mark.parametrize( ("statuses", "result"), [ ({SerializedAssetUniqueKey.from_asset(asset1): True}, True), ({SerializedAssetUniqueKey.from_asset(asset1): False}, False), ({}, False), ], ) def test_asset_evaluate(evaluator, statuses, result): assert evaluator.run(asset1, statuses) is result @pytest.mark.parametrize( ("condition", "statuses", "result"), [ ( SerializedAssetAny([asset1, asset2]), { SerializedAssetUniqueKey.from_asset(asset1): False, SerializedAssetUniqueKey.from_asset(asset2): True, }, True, ), ( SerializedAssetAll([asset1, asset2]), { SerializedAssetUniqueKey.from_asset(asset1): True, SerializedAssetUniqueKey.from_asset(asset2): False, }, False, ), ], ) def test_assset_boolean_condition_evaluate_iter(evaluator, condition, statuses, result): """ Tests _AssetBooleanCondition's evaluate and iter_assets methods through AssetAny and AssetAll. Ensures AssetAny evaluate returns True with any true condition, AssetAll evaluate returns False if any condition is false, and both classes correctly iterate over assets without duplication. """ assert evaluator.run(condition, statuses) is result assert dict(condition.iter_assets()) == { SerializedAssetUniqueKey("asset-1", "s3://bucket1/data1"): asset1, SerializedAssetUniqueKey("asset-2", "s3://bucket2/data2"): asset2, } @pytest.mark.parametrize( ("inputs", "scenario", "expected"), [ # Scenarios for "any" ((True, True, True), SerializedAssetAny, True), ((True, True, False), SerializedAssetAny, True), ((True, False, True), SerializedAssetAny, True), ((True, False, False), SerializedAssetAny, True), ((False, False, True), SerializedAssetAny, True), ((False, True, False), SerializedAssetAny, True), ((False, True, True), SerializedAssetAny, True), ((False, False, False), SerializedAssetAny, False), # Scenarios for "all" ((True, True, True), SerializedAssetAll, True), ((True, True, False), SerializedAssetAll, False), ((True, False, True), SerializedAssetAll, False), ((True, False, False), SerializedAssetAll, False), ((False, False, True), SerializedAssetAll, False), ((False, True, False), SerializedAssetAll, False), ((False, True, True), SerializedAssetAll, False), ((False, False, False), SerializedAssetAll, False), ], ) def test_asset_logical_conditions_evaluation_and_serialization(evaluator, inputs, scenario, expected): assets = [SerializedAsset(f"asset_{i}", f"s3://abc/{i}", "asset", {}, []) for i in range(123, 126)] condition = scenario(assets) statuses = {SerializedAssetUniqueKey.from_asset(asset): status for asset, status in zip(assets, inputs)} assert evaluator.run(condition, statuses) == expected, ( f"Condition evaluation failed for inputs {inputs} and scenario '{scenario}'" ) @pytest.mark.parametrize( ("status_values", "expected_evaluation"), [ pytest.param( (False, True, True), False, id="f & (t | t)", ), # AssetAll requires all conditions to be True, but asset1 is False pytest.param( (True, True, True), True, id="t & (t | t)", ), # All conditions are True pytest.param( (True, False, True), True, id="t & (f | t)", ), # asset1 is True, and AssetAny condition (asset2 or asset3 being True) is met pytest.param( (True, False, False), False, id="t & (f | f)", ), # asset1 is True, but neither asset2 nor asset3 meet the AssetAny condition ], ) def test_nested_asset_conditions_with_serialization(evaluator, status_values, expected_evaluation): # Define assets asset1 = SerializedAsset("123", "s3://abc/123", "asset", {}, []) asset2 = SerializedAsset("124", "s3://abc/124", "asset", {}, []) asset3 = SerializedAsset("125", "s3://abc/125", "asset", {}, []) # Create a nested condition: AssetAll with asset1 and AssetAny with asset2 and asset3 nested_condition = SerializedAssetAll([asset1, SerializedAssetAny([asset2, asset3])]) statuses = { SerializedAssetUniqueKey.from_asset(asset1): status_values[0], SerializedAssetUniqueKey.from_asset(asset2): status_values[1], SerializedAssetUniqueKey.from_asset(asset3): status_values[2], } assert evaluator.run(nested_condition, statuses) == expected_evaluation, "Initial evaluation mismatch" class TestAssetAlias: @pytest.fixture def asset(self): """Example asset links to asset alias resolved_asset_alias_2.""" return SerializedAsset("test_name", "test://asset1/", "asset", {}, []) @pytest.fixture def asset_alias_1(self): """Example asset alias links to no assets.""" return SerializedAssetAlias("test_name", "test") @pytest.fixture def resolved_asset_alias_2(self): """Example asset alias links to asset.""" return SerializedAssetAlias("test_name_2", "test") @pytest.fixture def evaluator(self, session, asset_alias_1, resolved_asset_alias_2, asset): class _AssetEvaluator(AssetEvaluator): # Can't use mock because AssetEvaluator sets __slots__. def _resolve_asset_alias(self, o): if o is asset_alias_1: return [] if o is resolved_asset_alias_2: return [asset] return super()._resolve_asset_alias(o) return _AssetEvaluator(session) def test_evaluate_empty(self, evaluator, asset_alias_1, asset): assert evaluator.run(asset_alias_1, {SerializedAssetUniqueKey.from_asset(asset): True}) is False def test_evalute_resolved(self, evaluator, resolved_asset_alias_2, asset): assert ( evaluator.run(resolved_asset_alias_2, {SerializedAssetUniqueKey.from_asset(asset): True}) is True )
python
github
https://github.com/apache/airflow
airflow-core/tests/unit/assets/test_evaluation.py
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.autoconfigure.condition; import java.time.Duration; import java.util.Iterator; import java.util.Map; import org.jspecify.annotations.Nullable; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; import org.springframework.beans.factory.BeanFactory; import org.springframework.beans.factory.config.ConfigurableListableBeanFactory; import org.springframework.beans.factory.support.DefaultListableBeanFactory; import org.springframework.boot.autoconfigure.AutoConfiguration; import org.springframework.boot.autoconfigure.ImportAutoConfiguration; import org.springframework.boot.autoconfigure.condition.ConditionEvaluationReport.ConditionAndOutcome; import org.springframework.boot.autoconfigure.condition.ConditionEvaluationReport.ConditionAndOutcomes; import org.springframework.boot.autoconfigure.condition.config.UniqueShortNameAutoConfiguration; import org.springframework.boot.autoconfigure.logging.ConditionEvaluationReportMessage; import org.springframework.boot.test.util.TestPropertyValues; import org.springframework.context.annotation.AnnotationConfigApplicationContext; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Condition; import org.springframework.context.annotation.ConditionContext; import org.springframework.context.annotation.Conditional; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.ConfigurationCondition; import org.springframework.core.type.AnnotatedTypeMetadata; import org.springframework.util.ClassUtils; import static org.assertj.core.api.Assertions.assertThat; /** * Tests for {@link ConditionEvaluationReport}. * * @author Greg Turnquist * @author Phillip Webb */ @ExtendWith(MockitoExtension.class) class ConditionEvaluationReportTests { private DefaultListableBeanFactory beanFactory; private ConditionEvaluationReport report; @Mock @SuppressWarnings("NullAway.Init") private Condition condition1; @Mock @SuppressWarnings("NullAway.Init") private Condition condition2; @Mock @SuppressWarnings("NullAway.Init") private Condition condition3; private @Nullable ConditionOutcome outcome1; private @Nullable ConditionOutcome outcome2; private @Nullable ConditionOutcome outcome3; @BeforeEach void setup() { this.beanFactory = new DefaultListableBeanFactory(); this.report = ConditionEvaluationReport.get(this.beanFactory); } @Test void get() { assertThat(this.report).isNotNull(); assertThat(this.report).isSameAs(ConditionEvaluationReport.get(this.beanFactory)); } @Test void parent() { this.beanFactory.setParentBeanFactory(new DefaultListableBeanFactory()); BeanFactory parentBeanFactory = this.beanFactory.getParentBeanFactory(); assertThat(parentBeanFactory).isNotNull(); ConditionEvaluationReport.get((ConfigurableListableBeanFactory) parentBeanFactory); assertThat(this.report).isSameAs(ConditionEvaluationReport.get(this.beanFactory)); assertThat(this.report).isNotNull(); assertThat(this.report.getParent()).isNotNull(); ConditionEvaluationReport.get((ConfigurableListableBeanFactory) parentBeanFactory); assertThat(this.report).isSameAs(ConditionEvaluationReport.get(this.beanFactory)); assertThat(this.report.getParent()) .isSameAs(ConditionEvaluationReport.get((ConfigurableListableBeanFactory) parentBeanFactory)); } @Test void parentBottomUp() { this.beanFactory = new DefaultListableBeanFactory(); // NB: overrides setup this.beanFactory.setParentBeanFactory(new DefaultListableBeanFactory()); BeanFactory parentBeanFactory = this.beanFactory.getParentBeanFactory(); assertThat(parentBeanFactory).isNotNull(); ConditionEvaluationReport.get((ConfigurableListableBeanFactory) parentBeanFactory); this.report = ConditionEvaluationReport.get(this.beanFactory); assertThat(this.report).isNotNull(); assertThat(this.report).isNotSameAs(this.report.getParent()); assertThat(this.report.getParent()).isNotNull(); assertThat(this.report.getParent().getParent()).isNull(); } @Test void recordConditionEvaluations() { this.outcome1 = new ConditionOutcome(false, "m1"); this.outcome2 = new ConditionOutcome(false, "m2"); this.outcome3 = new ConditionOutcome(false, "m3"); this.report.recordConditionEvaluation("a", this.condition1, this.outcome1); this.report.recordConditionEvaluation("a", this.condition2, this.outcome2); this.report.recordConditionEvaluation("b", this.condition3, this.outcome3); Map<String, ConditionAndOutcomes> map = this.report.getConditionAndOutcomesBySource(); assertThat(map).hasSize(2); ConditionAndOutcomes a = map.get("a"); assertThat(a).isNotNull(); Iterator<ConditionAndOutcome> iterator = a.iterator(); ConditionAndOutcome conditionAndOutcome = iterator.next(); assertThat(conditionAndOutcome.getCondition()).isEqualTo(this.condition1); assertThat(conditionAndOutcome.getOutcome()).isEqualTo(this.outcome1); conditionAndOutcome = iterator.next(); assertThat(conditionAndOutcome.getCondition()).isEqualTo(this.condition2); assertThat(conditionAndOutcome.getOutcome()).isEqualTo(this.outcome2); assertThat(iterator.hasNext()).isFalse(); ConditionAndOutcomes b = map.get("b"); assertThat(b).isNotNull(); iterator = b.iterator(); conditionAndOutcome = iterator.next(); assertThat(conditionAndOutcome.getCondition()).isEqualTo(this.condition3); assertThat(conditionAndOutcome.getOutcome()).isEqualTo(this.outcome3); assertThat(iterator.hasNext()).isFalse(); } @Test void fullMatch() { prepareMatches(true, true, true); ConditionAndOutcomes a = this.report.getConditionAndOutcomesBySource().get("a"); assertThat(a).isNotNull(); assertThat(a.isFullMatch()).isTrue(); } @Test void notFullMatch() { prepareMatches(true, false, true); ConditionAndOutcomes a = this.report.getConditionAndOutcomesBySource().get("a"); assertThat(a).isNotNull(); assertThat(a.isFullMatch()).isFalse(); } private void prepareMatches(boolean m1, boolean m2, boolean m3) { this.outcome1 = new ConditionOutcome(m1, "m1"); this.outcome2 = new ConditionOutcome(m2, "m2"); this.outcome3 = new ConditionOutcome(m3, "m3"); this.report.recordConditionEvaluation("a", this.condition1, this.outcome1); this.report.recordConditionEvaluation("a", this.condition2, this.outcome2); this.report.recordConditionEvaluation("a", this.condition3, this.outcome3); } @Test @SuppressWarnings("resource") void springBootConditionPopulatesReport() { ConditionEvaluationReport report = ConditionEvaluationReport .get(new AnnotationConfigApplicationContext(Config.class).getBeanFactory()); assertThat(report.getUnconditionalClasses()).containsExactly(UnconditionalAutoConfiguration.class.getName()); assertThat(report.getConditionAndOutcomesBySource()).containsOnlyKeys(MatchingAutoConfiguration.class.getName(), NonMatchingAutoConfiguration.class.getName()); assertThat(report.getConditionAndOutcomesBySource().get(MatchingAutoConfiguration.class.getName())) .satisfies((outcomes) -> assertThat(outcomes).extracting(ConditionAndOutcome::getOutcome) .extracting(ConditionOutcome::isMatch) .containsOnly(true)); assertThat(report.getConditionAndOutcomesBySource().get(NonMatchingAutoConfiguration.class.getName())) .satisfies((outcomes) -> assertThat(outcomes).extracting(ConditionAndOutcome::getOutcome) .extracting(ConditionOutcome::isMatch) .containsOnly(false)); } @Test void testDuplicateConditionAndOutcomes() { ConditionAndOutcome outcome1 = new ConditionAndOutcome(this.condition1, new ConditionOutcome(true, "Message 1")); ConditionAndOutcome outcome2 = new ConditionAndOutcome(this.condition2, new ConditionOutcome(true, "Message 2")); ConditionAndOutcome outcome3 = new ConditionAndOutcome(this.condition3, new ConditionOutcome(true, "Message 2")); assertThat(outcome1).isNotEqualTo(outcome2); assertThat(outcome2).isEqualTo(outcome3); ConditionAndOutcomes outcomes = new ConditionAndOutcomes(); outcomes.add(this.condition1, new ConditionOutcome(true, "Message 1")); outcomes.add(this.condition2, new ConditionOutcome(true, "Message 2")); outcomes.add(this.condition3, new ConditionOutcome(true, "Message 2")); assertThat(outcomes).hasSize(2); } @Test void negativeOuterPositiveInnerBean() { AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); TestPropertyValues.of("test.present=true").applyTo(context); context.register(NegativeOuterConfig.class); context.refresh(); ConditionEvaluationReport report = ConditionEvaluationReport.get(context.getBeanFactory()); Map<String, ConditionAndOutcomes> sourceOutcomes = report.getConditionAndOutcomesBySource(); assertThat(context.containsBean("negativeOuterPositiveInnerBean")).isFalse(); String negativeConfig = NegativeOuterConfig.class.getName(); ConditionAndOutcomes negativeOutcome = sourceOutcomes.get(negativeConfig); assertThat(negativeOutcome).isNotNull(); assertThat(negativeOutcome.isFullMatch()).isFalse(); String positiveConfig = NegativeOuterConfig.PositiveInnerConfig.class.getName(); ConditionAndOutcomes positiveOutcome = sourceOutcomes.get(positiveConfig); assertThat(positiveOutcome).isNotNull(); assertThat(positiveOutcome.isFullMatch()).isFalse(); } @Test void reportWhenSameShortNamePresentMoreThanOnceShouldUseFullyQualifiedName() { AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); context.register(UniqueShortNameAutoConfiguration.class, org.springframework.boot.autoconfigure.condition.config.first.SampleAutoConfiguration.class, org.springframework.boot.autoconfigure.condition.config.second.SampleAutoConfiguration.class); context.refresh(); ConditionEvaluationReport report = ConditionEvaluationReport.get(context.getBeanFactory()); assertThat(report.getConditionAndOutcomesBySource()).containsKeys( "org.springframework.boot.autoconfigure.condition.config.UniqueShortNameAutoConfiguration", "org.springframework.boot.autoconfigure.condition.config.first.SampleAutoConfiguration", "org.springframework.boot.autoconfigure.condition.config.second.SampleAutoConfiguration"); context.close(); } @Test void reportMessageWhenSameShortNamePresentMoreThanOnceShouldUseFullyQualifiedName() { AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); context.register(UniqueShortNameAutoConfiguration.class, org.springframework.boot.autoconfigure.condition.config.first.SampleAutoConfiguration.class, org.springframework.boot.autoconfigure.condition.config.second.SampleAutoConfiguration.class); context.refresh(); ConditionEvaluationReport report = ConditionEvaluationReport.get(context.getBeanFactory()); String reportMessage = new ConditionEvaluationReportMessage(report).toString(); assertThat(reportMessage).contains("UniqueShortNameAutoConfiguration", "org.springframework.boot.autoconfigure.condition.config.first.SampleAutoConfiguration", "org.springframework.boot.autoconfigure.condition.config.second.SampleAutoConfiguration"); assertThat(reportMessage) .doesNotContain("org.springframework.boot.autoconfigure.condition.config.UniqueShortNameAutoConfiguration"); context.close(); } @Configuration(proxyBeanMethods = false) @Conditional({ ConditionEvaluationReportTests.MatchParseCondition.class, ConditionEvaluationReportTests.NoMatchBeanCondition.class }) static class NegativeOuterConfig { @Configuration(proxyBeanMethods = false) @Conditional({ ConditionEvaluationReportTests.MatchParseCondition.class }) static class PositiveInnerConfig { @Bean String negativeOuterPositiveInnerBean() { return "negativeOuterPositiveInnerBean"; } } } static class TestMatchCondition extends SpringBootCondition implements ConfigurationCondition { private final ConfigurationPhase phase; private final boolean match; TestMatchCondition(ConfigurationPhase phase, boolean match) { this.phase = phase; this.match = match; } @Override public ConfigurationPhase getConfigurationPhase() { return this.phase; } @Override public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) { return new ConditionOutcome(this.match, ClassUtils.getShortName(getClass())); } } static class MatchParseCondition extends TestMatchCondition { MatchParseCondition() { super(ConfigurationPhase.PARSE_CONFIGURATION, true); } } static class MatchBeanCondition extends TestMatchCondition { MatchBeanCondition() { super(ConfigurationPhase.REGISTER_BEAN, true); } } static class NoMatchParseCondition extends TestMatchCondition { NoMatchParseCondition() { super(ConfigurationPhase.PARSE_CONFIGURATION, false); } } static class NoMatchBeanCondition extends TestMatchCondition { NoMatchBeanCondition() { super(ConfigurationPhase.REGISTER_BEAN, false); } } @Configuration(proxyBeanMethods = false) @ImportAutoConfiguration({ MatchingAutoConfiguration.class, NonMatchingAutoConfiguration.class, UnconditionalAutoConfiguration.class }) static class Config { } @AutoConfiguration @ConditionalOnProperty(name = "com.example.property", matchIfMissing = true) public static final class MatchingAutoConfiguration { } @AutoConfiguration @ConditionalOnBean(Duration.class) public static final class NonMatchingAutoConfiguration { } @AutoConfiguration public static final class UnconditionalAutoConfiguration { @Bean String example() { return "example"; } } }
java
github
https://github.com/spring-projects/spring-boot
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionEvaluationReportTests.java
# -*- coding: utf-8 -*- """ *************************************************************************** __init__.py --------------------- Date : May 2014 Copyright : (C) 2014 by Nathan Woodrow Email : woodrow dot nathan at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Nathan Woodrow' __date__ = 'May 2014' __copyright__ = '(C) 2014, Nathan Woodrow' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from qgis.PyQt import QtCore from qgis._analysis import *
unknown
codeparrot/codeparrot-clean
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Invoke gcc, looking for warnings, and causing a failure if there are # non-whitelisted warnings. import errno import re import os import sys import subprocess # Note that gcc uses unicode, which may depend on the locale. TODO: # force LANG to be set to en_US.UTF-8 to get consistent warnings. allowed_warnings = set([ "return_address.c:62", ]) # Capture the name of the object file, can find it. ofile = None warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''') def interpret_warning(line): """Decode the message from gcc. The messages we care about have a filename, and a warning""" line = line.rstrip('\n') m = warning_re.match(line) if m and m.group(2) not in allowed_warnings: print "error, forbidden warning:", m.group(2) # If there is a warning, remove any object if it exists. if ofile: try: os.remove(ofile) except OSError: pass sys.exit(1) def run_gcc(): args = sys.argv[1:] # Look for -o try: i = args.index('-o') global ofile ofile = args[i+1] except (ValueError, IndexError): pass compiler = sys.argv[0] try: proc = subprocess.Popen(args, stderr=subprocess.PIPE) for line in proc.stderr: print line, interpret_warning(line) result = proc.wait() except OSError as e: result = e.errno if result == errno.ENOENT: print args[0] + ':',e.strerror print 'Is your PATH set correctly?' else: print ' '.join(args), str(e) return result if __name__ == '__main__': status = run_gcc() sys.exit(status)
unknown
codeparrot/codeparrot-clean
""" Provide a mock lock platform. Call init before using it in your tests to ensure clean test data. """ from homeassistant.components.lock import SUPPORT_OPEN, LockEntity from tests.common import MockEntity ENTITIES = {} def init(empty=False): """Initialize the platform with entities.""" global ENTITIES ENTITIES = ( {} if empty else { "support_open": MockLock( name="Support open Lock", is_locked=True, supported_features=SUPPORT_OPEN, unique_id="unique_support_open", ), "no_support_open": MockLock( name="No support open Lock", is_locked=True, supported_features=0, unique_id="unique_no_support_open", ), } ) async def async_setup_platform( hass, config, async_add_entities_callback, discovery_info=None ): """Return mock entities.""" async_add_entities_callback(list(ENTITIES.values())) class MockLock(MockEntity, LockEntity): """Mock Lock class.""" @property def is_locked(self): """Return true if the lock is locked.""" return self._handle("is_locked") @property def supported_features(self): """Return the class of this sensor.""" return self._handle("supported_features")
unknown
codeparrot/codeparrot-clean
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from abc import ABCMeta, abstractmethod from copy import deepcopy import weakref # from ..utils.compat import ignored from .. import log from ..units import Unit, Quantity __all__ = ['MissingDataAssociationException', 'IncompatibleUncertaintiesException', 'NDUncertainty', 'StdDevUncertainty', 'UnknownUncertainty'] class IncompatibleUncertaintiesException(Exception): """This exception should be used to indicate cases in which uncertainties with two different classes can not be propagated. """ class MissingDataAssociationException(Exception): """This exception should be used to indicate that an uncertainty instance has not been associated with a parent `~astropy.nddata.NDData` object. """ class NDUncertainty(metaclass=ABCMeta): """This is the metaclass for uncertainty classes used with `NDData`. Parameters ---------- array : any type, optional The array or value (the parameter name is due to historical reasons) of the uncertainty. `numpy.ndarray`, `~astropy.units.Quantity` or `NDUncertainty` subclasses are recommended. If the `array` is `list`-like or `numpy.ndarray`-like it will be cast to a plain `numpy.ndarray`. Default is ``None``. unit : `~astropy.units.Unit` or str, optional Unit for the uncertainty ``array``. Strings that can be converted to a `~astropy.units.Unit` are allowed. Default is ``None``. copy : `bool`, optional Indicates whether to save the `array` as a copy. ``True`` copies it before saving, while ``False`` tries to save every parameter as reference. Note however that it is not always possible to save the input as reference. Default is ``True``. Raises ------ IncompatibleUncertaintiesException If given another `NDUncertainty`-like class as ``array`` if their ``uncertainty_type`` is different. """ def __init__(self, array=None, copy=True, unit=None): if isinstance(array, NDUncertainty): # Given an NDUncertainty class or subclass check that the type # is the same. if array.uncertainty_type != self.uncertainty_type: raise IncompatibleUncertaintiesException # Check if two units are given and take the explicit one then. if (unit is not None and unit != array._unit): # TODO : Clarify it (see NDData.init for same problem)? log.info("overwriting Uncertainty's current " "unit with specified unit.") elif array._unit is not None: unit = array.unit array = array.array elif isinstance(array, Quantity): # Check if two units are given and take the explicit one then. if (unit is not None and array.unit is not None and unit != array.unit): log.info("overwriting Quantity's current " "unit with specified unit.") elif array.unit is not None: unit = array.unit array = array.value if unit is None: self._unit = None else: self._unit = Unit(unit) if copy: array = deepcopy(array) unit = deepcopy(unit) self.array = array self.parent_nddata = None # no associated NDData - until it is set! @property @abstractmethod def uncertainty_type(self): """`str` : Short description of the type of uncertainty. Defined as abstract property so subclasses *have* to override this. """ return None @property def supports_correlated(self): """`bool` : Supports uncertainty propagation with correlated \ uncertainties? .. versionadded:: 1.2 """ return False @property def array(self): """`numpy.ndarray` : the uncertainty's value. """ return self._array @array.setter def array(self, value): if isinstance(value, (list, np.ndarray)): value = np.array(value, subok=False, copy=False) self._array = value @property def unit(self): """`~astropy.units.Unit` : The unit of the uncertainty, if any. Even though it is not enforced the unit should be convertible to the ``parent_nddata`` unit. Otherwise uncertainty propagation might give wrong results. If the unit is not set the unit of the parent will be returned. """ if self._unit is None: if (self._parent_nddata is None or self.parent_nddata.unit is None): return None else: return self.parent_nddata.unit return self._unit @property def parent_nddata(self): """`NDData` : reference to `NDData` instance with this uncertainty. In case the reference is not set uncertainty propagation will not be possible since propagation might need the uncertain data besides the uncertainty. """ message = "uncertainty is not associated with an NDData object" try: if self._parent_nddata is None: raise MissingDataAssociationException(message) else: # The NDData is saved as weak reference so we must call it # to get the object the reference points to. if isinstance(self._parent_nddata, weakref.ref): return self._parent_nddata() else: log.info("parent_nddata should be a weakref to an NDData " "object.") return self._parent_nddata except AttributeError: raise MissingDataAssociationException(message) @parent_nddata.setter def parent_nddata(self, value): if value is not None and not isinstance(value, weakref.ref): # Save a weak reference on the uncertainty that points to this # instance of NDData. Direct references should NOT be used: # https://github.com/astropy/astropy/pull/4799#discussion_r61236832 value = weakref.ref(value) self._parent_nddata = value def __repr__(self): prefix = self.__class__.__name__ + '(' try: body = np.array2string(self.array, separator=', ', prefix=prefix) except AttributeError: # In case it wasn't possible to use array2string body = str(self.array) return ''.join([prefix, body, ')']) def __getitem__(self, item): """Normal slicing on the array, keep the unit and return a reference. """ return self.__class__(self.array[item], unit=self.unit, copy=False) def propagate(self, operation, other_nddata, result_data, correlation): """Calculate the resulting uncertainty given an operation on the data. .. versionadded:: 1.2 Parameters ---------- operation : callable The operation that is performed on the `NDData`. Supported are `numpy.add`, `numpy.subtract`, `numpy.multiply` and `numpy.true_divide` (or `numpy.divide`). other_nddata : `NDData` instance The second operand in the arithmetic operation. result_data : `~astropy.units.Quantity` or `numpy.ndarray` The result of the arithmetic operations on the data. correlation : `numpy.ndarray` or number The correlation (rho) is defined between the uncertainties in sigma_AB = sigma_A * sigma_B * rho. A value of ``0`` means uncorrelated operands. Returns ------- resulting_uncertainty : `NDUncertainty` instance Another instance of the same `NDUncertainty` subclass containing the uncertainty of the result. Raises ------ ValueError If the ``operation`` is not supported or if correlation is not zero but the subclass does not support correlated uncertainties. Notes ----- First this method checks if a correlation is given and the subclass implements propagation with correlated uncertainties. Then the second uncertainty is converted (or an Exception is raised) to the same class in order to do the propagation. Then the appropriate propagation method is invoked and the result is returned. """ # Check if the subclass supports correlation if not self.supports_correlated: if isinstance(correlation, np.ndarray) or correlation != 0: raise ValueError("{0} does not support uncertainty propagation" " with correlation." "".format(self.__class__.__name__)) # Get the other uncertainty (and convert it to a matching one) other_uncert = self._convert_uncertainty(other_nddata.uncertainty) if operation.__name__ == 'add': result = self._propagate_add(other_uncert, result_data, correlation) elif operation.__name__ == 'subtract': result = self._propagate_subtract(other_uncert, result_data, correlation) elif operation.__name__ == 'multiply': result = self._propagate_multiply(other_uncert, result_data, correlation) elif operation.__name__ in ['true_divide', 'divide']: result = self._propagate_divide(other_uncert, result_data, correlation) else: raise ValueError('unsupported operation') return self.__class__(result, copy=False) def _convert_uncertainty(self, other_uncert): """Checks if the uncertainties are compatible for propagation. Checks if the other uncertainty is `NDUncertainty`-like and if so verify that the uncertainty_type is equal. If the latter is not the case try returning ``self.__class__(other_uncert)``. Parameters ---------- other_uncert : `NDUncertainty` subclass The other uncertainty. Returns ------- other_uncert : `NDUncertainty` subclass but converted to a compatible `NDUncertainty` subclass if possible and necessary. Raises ------ IncompatibleUncertaintiesException: If the other uncertainty cannot be converted to a compatible `NDUncertainty` subclass. """ if isinstance(other_uncert, NDUncertainty): if self.uncertainty_type == other_uncert.uncertainty_type: return other_uncert else: return self.__class__(other_uncert) else: raise IncompatibleUncertaintiesException @abstractmethod def _propagate_add(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_subtract(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_multiply(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_divide(self, other_uncert, result_data, correlation): return None class UnknownUncertainty(NDUncertainty): """This class implements any unknown uncertainty type. The main purpose of having an unknown uncertainty class is to prevent uncertainty propagation. Parameters ---------- args, kwargs : see `NDUncertainty` """ @property def supports_correlated(self): """`False` : Uncertainty propagation is *not* possible for this class. """ return False @property def uncertainty_type(self): """``"unknown"`` : `UnknownUncertainty` implements any unknown \ uncertainty type. """ return 'unknown' def _convert_uncertainty(self, other_uncert): """Raise an Exception because unknown uncertainty types cannot implement propagation. """ msg = "Uncertainties of unknown type cannot be propagated." raise IncompatibleUncertaintiesException(msg) def _propagate_add(self, other_uncert, result_data, correlation): """Not possible for unknown uncertainty types. """ return None def _propagate_subtract(self, other_uncert, result_data, correlation): return None def _propagate_multiply(self, other_uncert, result_data, correlation): return None def _propagate_divide(self, other_uncert, result_data, correlation): return None class StdDevUncertainty(NDUncertainty): """Standard deviation uncertainty assuming first order gaussian error propagation. This class implements uncertainty propagation for ``addition``, ``subtraction``, ``multiplication`` and ``division`` with other instances of `StdDevUncertainty`. The class can handle if the uncertainty has a unit that differs from (but is convertible to) the parents `NDData` unit. The unit of the resulting uncertainty will have the same unit as the resulting data. Also support for correlation is possible but requires the correlation as input. It cannot handle correlation determination itself. Parameters ---------- args, kwargs : see `NDUncertainty` Examples -------- `StdDevUncertainty` should always be associated with an `NDData`-like instance, either by creating it during initialization:: >>> from astropy.nddata import NDData, StdDevUncertainty >>> ndd = NDData([1,2,3], ... uncertainty=StdDevUncertainty([0.1, 0.1, 0.1])) >>> ndd.uncertainty # doctest: +FLOAT_CMP StdDevUncertainty([0.1, 0.1, 0.1]) or by setting it manually on the `NDData` instance:: >>> ndd.uncertainty = StdDevUncertainty([0.2], unit='m', copy=True) >>> ndd.uncertainty # doctest: +FLOAT_CMP StdDevUncertainty([0.2]) the uncertainty ``array`` can also be set directly:: >>> ndd.uncertainty.array = 2 >>> ndd.uncertainty StdDevUncertainty(2) .. note:: The unit will not be displayed. """ @property def supports_correlated(self): """`True` : `StdDevUncertainty` allows to propagate correlated \ uncertainties. ``correlation`` must be given, this class does not implement computing it by itself. """ return True @property def uncertainty_type(self): """``"std"`` : `StdDevUncertainty` implements standard deviation. """ return 'std' def _convert_uncertainty(self, other_uncert): if isinstance(other_uncert, StdDevUncertainty): return other_uncert else: raise IncompatibleUncertaintiesException def _propagate_add(self, other_uncert, result_data, correlation): if self.array is None: # Formula: sigma = dB if other_uncert.unit is not None and ( result_data.unit != other_uncert.unit): # If the other uncertainty has a unit and this unit differs # from the unit of the result convert it to the results unit return (other_uncert.array * other_uncert.unit).to( result_data.unit).value else: # Copy the result because _propagate will not copy it but for # arithmetic operations users will expect copies. return deepcopy(other_uncert.array) elif other_uncert.array is None: # Formula: sigma = dA if self.unit is not None and self.unit != self.parent_nddata.unit: # If the uncertainty has a different unit than the result we # need to convert it to the results unit. return self.unit.to(result_data.unit, self.array) else: # Copy the result because _propagate will not copy it but for # arithmetic operations users will expect copies. return deepcopy(self.array) else: # Formula: sigma = sqrt(dA**2 + dB**2 + 2*cor*dA*dB) # Calculate: dA (this) and dB (other) if self.unit != other_uncert.unit: # In case the two uncertainties (or data) have different units # we need to use quantity operations. The case where only one # has a unit and the other doesn't is not possible with # addition and would have raised an exception in the data # computation this = self.array * self.unit other = other_uncert.array * other_uncert.unit else: # Since both units are the same or None we can just use # numpy operations this = self.array other = other_uncert.array # Determine the result depending on the correlation if isinstance(correlation, np.ndarray) or correlation != 0: corr = 2 * correlation * this * other result = np.sqrt(this**2 + other**2 + corr) else: result = np.sqrt(this**2 + other**2) if isinstance(result, Quantity): # In case we worked with quantities we need to return the # uncertainty that has the same unit as the resulting data. # Note that this call is fast if the units are the same. return result.to_value(result_data.unit) else: return result def _propagate_subtract(self, other_uncert, result_data, correlation): # Since the formulas are equivalent to addition you should look at the # explanations provided in _propagate_add if self.array is None: if other_uncert.unit is not None and ( result_data.unit != other_uncert.unit): return (other_uncert.array * other_uncert.unit).to( result_data.unit).value else: return deepcopy(other_uncert.array) elif other_uncert.array is None: if self.unit is not None and self.unit != self.parent_nddata.unit: return self.unit.to(result_data.unit, self.array) else: return deepcopy(self.array) else: # Formula: sigma = sqrt(dA**2 + dB**2 - 2*cor*dA*dB) if self.unit != other_uncert.unit: this = self.array * self.unit other = other_uncert.array * other_uncert.unit else: this = self.array other = other_uncert.array if isinstance(correlation, np.ndarray) or correlation != 0: corr = 2 * correlation * this * other # The only difference to addition is that the correlation is # subtracted. result = np.sqrt(this**2 + other**2 - corr) else: result = np.sqrt(this**2 + other**2) if isinstance(result, Quantity): return result.to_value(result_data.unit) else: return result def _propagate_multiply(self, other_uncert, result_data, correlation): # For multiplication we don't need the result as quantity if isinstance(result_data, Quantity): result_data = result_data.value if self.array is None: # Formula: sigma = |A| * dB # We want the result to have the same unit as the parent, so we # only need to convert the unit of the other uncertainty if it is # different from its data's unit. if other_uncert.unit != other_uncert.parent_nddata.unit: other = (other_uncert.array * other_uncert.unit).to( other_uncert.parent_nddata.unit).value else: other = other_uncert.array return np.abs(self.parent_nddata.data * other) elif other_uncert.array is None: # Formula: sigma = dA * |B| # Just the reversed case if self.unit != self.parent_nddata.unit: this = (self.array * self.unit).to( self.parent_nddata.unit).value else: this = self.array return np.abs(other_uncert.parent_nddata.data * this) else: # Formula: sigma = |AB|*sqrt((dA/A)**2+(dB/B)**2+2*dA/A*dB/B*cor) # This formula is not very handy since it generates NaNs for every # zero in A and B. So we rewrite it: # Formula: sigma = sqrt((dA*B)**2 + (dB*A)**2 + (2 * cor * ABdAdB)) # Calculate: dA * B (left) if self.unit != self.parent_nddata.unit: # To get the unit right we need to convert the unit of # each uncertainty to the same unit as it's parent left = ((self.array * self.unit).to( self.parent_nddata.unit).value * other_uncert.parent_nddata.data) else: left = self.array * other_uncert.parent_nddata.data # Calculate: dB * A (right) if other_uncert.unit != other_uncert.parent_nddata.unit: right = ((other_uncert.array * other_uncert.unit).to( other_uncert.parent_nddata.unit).value * self.parent_nddata.data) else: right = other_uncert.array * self.parent_nddata.data if isinstance(correlation, np.ndarray) or correlation != 0: corr = (2 * correlation * left * right) return np.sqrt(left**2 + right**2 + corr) else: return np.sqrt(left**2 + right**2) def _propagate_divide(self, other_uncert, result_data, correlation): # For division we don't need the result as quantity if isinstance(result_data, Quantity): result_data = result_data.value if self.array is None: # Formula: sigma = |(A / B) * (dB / B)| # Calculate: dB / B (right) if other_uncert.unit != other_uncert.parent_nddata.unit: # We need (dB / B) to be dimensionless so we convert # (if necessary) dB to the same unit as B right = ((other_uncert.array * other_uncert.unit).to( other_uncert.parent_nddata.unit).value / other_uncert.parent_nddata.data) else: right = (other_uncert.array / other_uncert.parent_nddata.data) return np.abs(result_data * right) elif other_uncert.array is None: # Formula: sigma = dA / |B|. # Calculate: dA if self.unit != self.parent_nddata.unit: # We need to convert dA to the unit of A to have a result that # matches the resulting data's unit. left = (self.array * self.unit).to( self.parent_nddata.unit).value else: left = self.array return np.abs(left / other_uncert.parent_nddata.data) else: # Formula: sigma = |A/B|*sqrt((dA/A)**2+(dB/B)**2-2*dA/A*dB/B*cor) # As with multiplication this formula creates NaNs where A is zero. # So I'll rewrite it again: # => sigma = sqrt((dA/B)**2 + (AdB/B**2)**2 - 2*cor*AdAdB/B**3) # So we need to calculate dA/B in the same units as the result # and the dimensionless dB/B to get a resulting uncertainty with # the same unit as the data. # Calculate: dA/B (left) if self.unit != self.parent_nddata.unit: left = ((self.array * self.unit).to( self.parent_nddata.unit).value / other_uncert.parent_nddata.data) else: left = self.array / other_uncert.parent_nddata.data # Calculate: dB/B (right) if other_uncert.unit != other_uncert.parent_nddata.unit: right = ((other_uncert.array * other_uncert.unit).to( other_uncert.parent_nddata.unit).value / other_uncert.parent_nddata.data) * result_data else: right = (result_data * other_uncert.array / other_uncert.parent_nddata.data) if isinstance(correlation, np.ndarray) or correlation != 0: corr = 2 * correlation * left * right # This differs from multiplication because the correlation # term needs to be subtracted return np.sqrt(left**2 + right**2 - corr) else: return np.sqrt(left**2 + right**2)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright 2016 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pkg_resources import sys from grpc_tools import _protoc_compiler def main(command_arguments): """Run the protocol buffer compiler with the given command-line arguments. Args: command_arguments: a list of strings representing command line arguments to `protoc`. """ command_arguments = [argument.encode() for argument in command_arguments] return _protoc_compiler.run_main(command_arguments) if __name__ == '__main__': proto_include = pkg_resources.resource_filename('grpc_tools', '_proto') sys.exit(main(sys.argv + ['-I{}'.format(proto_include)]))
unknown
codeparrot/codeparrot-clean
<!--- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> Service Level Authorization Guide ================================= <!-- MACRO{toc|fromDepth=0|toDepth=3} --> Purpose ------- This document describes how to configure and manage Service Level Authorization for Hadoop. Prerequisites ------------- Make sure Hadoop is installed, configured and setup correctly. For more information see: * [Single Node Setup](./SingleCluster.html) for first-time users. * [Cluster Setup](./ClusterSetup.html) for large, distributed clusters. Overview -------- Service Level Authorization is the initial authorization mechanism to ensure clients connecting to a particular Hadoop service have the necessary, pre-configured, permissions and are authorized to access the given service. For example, a MapReduce cluster can use this mechanism to allow a configured list of users/groups to submit jobs. The `$HADOOP_CONF_DIR/hadoop-policy.xml` configuration file is used to define the access control lists for various Hadoop services. Service Level Authorization is performed much before to other access control checks such as file-permission checks, access control on job queues etc. Configuration ------------- This section describes how to configure service-level authorization via the configuration file `$HADOOP_CONF_DIR/hadoop-policy.xml`. ### Enable Service Level Authorization By default, service-level authorization is disabled for Hadoop. To enable it set the configuration property hadoop.security.authorization to true in `$HADOOP_CONF_DIR/core-site.xml`. ### Hadoop Services and Configuration Properties This section lists the various Hadoop services and their configuration knobs: | Property | Service | |:---- |:---- | | security.client.protocol.acl | ACL for ClientProtocol, which is used by user code via the DistributedFileSystem. | | security.client.datanode.protocol.acl | ACL for ClientDatanodeProtocol, the client-to-datanode protocol for block recovery. | | security.datanode.protocol.acl | ACL for DatanodeProtocol, which is used by datanodes to communicate with the namenode. | | security.inter.datanode.protocol.acl | ACL for InterDatanodeProtocol, the inter-datanode protocol for updating generation timestamp. | | security.namenode.protocol.acl | ACL for NamenodeProtocol, the protocol used by the secondary namenode to communicate with the namenode. | | security.job.client.protocol.acl | ACL for JobSubmissionProtocol, used by job clients to communciate with the resourcemanager for job submission, querying job status etc. | | security.job.task.protocol.acl | ACL for TaskUmbilicalProtocol, used by the map and reduce tasks to communicate with the parent nodemanager. | | security.refresh.policy.protocol.acl | ACL for RefreshAuthorizationPolicyProtocol, used by the dfsadmin and rmadmin commands to refresh the security policy in-effect. | | security.ha.service.protocol.acl | ACL for HAService protocol used by HAAdmin to manage the active and stand-by states of namenode. | ### Access Control Lists `$HADOOP_CONF_DIR/hadoop-policy.xml` defines an access control list for each Hadoop service. Every access control list has a simple format: The list of users and groups are both comma separated list of names. The two lists are separated by a space. Example: `user1,user2 group1,group2`. Add a blank at the beginning of the line if only a list of groups is to be provided, equivalently a comma-separated list of users followed by a space or nothing implies only a set of given users. A special value of `*` implies that all users are allowed to access the service. If access control list is not defined for a service, the value of `security.service.authorization.default.acl` is applied. If `security.service.authorization.default.acl` is not defined, `*` is applied. ### Blocked Access Control Lists In some cases, it is required to specify blocked access control list for a service. This specifies the list of users and groups who are not authorized to access the service. The format of the blocked access control list is same as that of access control list. The blocked access control list can be specified via `$HADOOP_CONF_DIR/hadoop-policy.xml`. The property name is derived by suffixing with ".blocked". Example: The property name of blocked access control list for `security.client.protocol.acl` will be `security.client.protocol.acl.blocked` For a service, it is possible to specify both an access control list and a blocked control list. A user is authorized to access the service if the user is in the access control and not in the blocked access control list. If blocked access control list is not defined for a service, the value of `security.service.authorization.default.acl.blocked` is applied. If `security.service.authorization.default.acl.blocked` is not defined, empty blocked access control list is applied. ### Access Control using Lists of IP Addresses, Host Names and IP Ranges Access to a service can be controlled based on the ip address of the client accessing the service. It is possible to restrict access to a service from a set of machines by specifying a list of ip addresses, host names and ip ranges. The property name for each service is derived from the corresponding acl's property name. If the property name of acl is security.client.protocol.acl, property name for the hosts list will be security.client.protocol.hosts. If hosts list is not defined for a service, the value of `security.service.authorization.default.hosts` is applied. If `security.service.authorization.default.hosts` is not defined, `*` is applied. It is possible to specify a blocked list of hosts. Only those machines which are in the hosts list, but not in the blocked hosts list will be granted access to the service. The property name is derived by suffixing with ".blocked". Example: The property name of blocked hosts list for `security.client.protocol.hosts` will be `security.client.protocol.hosts.blocked` If blocked hosts list is not defined for a service, the value of `security.service.authorization.default.hosts.blocked` is applied. If `security.service.authorization.default.hosts.blocked` is not defined, empty blocked hosts list is applied. ### Refreshing Service Level Authorization Configuration The service-level authorization configuration for the NameNode and ResourceManager can be changed without restarting either of the Hadoop master daemons. The cluster administrator can change `$HADOOP_CONF_DIR/hadoop-policy.xml` on the master nodes and instruct the NameNode and ResourceManager to reload their respective configurations via the `-refreshServiceAcl` switch to `dfsadmin` and `rmadmin` commands respectively. Refresh the service-level authorization configuration for the NameNode: $ bin/hdfs dfsadmin -refreshServiceAcl Refresh the service-level authorization configuration for the ResourceManager: $ bin/yarn rmadmin -refreshServiceAcl Of course, one can use the `security.refresh.policy.protocol.acl` property in `$HADOOP_CONF_DIR/hadoop-policy.xml` to restrict access to the ability to refresh the service-level authorization configuration to certain users/groups. ### Examples Allow only users `alice`, `bob` and users in the `mapreduce` group to submit jobs to the MapReduce cluster: <property> <name>security.job.client.protocol.acl</name> <value>alice,bob mapreduce</value> </property> Allow only DataNodes running as the users who belong to the group datanodes to communicate with the NameNode: <property> <name>security.datanode.protocol.acl</name> <value>datanodes</value> </property> Allow any user to talk to the HDFS cluster as a DFSClient: <property> <name>security.client.protocol.acl</name> <value>*</value> </property>
unknown
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt # For license information, please see license.txt from __future__ import unicode_literals import frappe @frappe.whitelist() def get_time_log_list(doctype, txt, searchfield, start, page_len, filters): return frappe.db.get_values("Time Log", filters, ["name", "activity_type", "owner"]) @frappe.whitelist() def query_task(doctype, txt, searchfield, start, page_len, filters): from frappe.widgets.reportview import build_match_conditions search_string = "%%%s%%" % txt order_by_string = "%s%%" % txt match_conditions = build_match_conditions("Task") match_conditions = ("and" + match_conditions) if match_conditions else "" return frappe.db.sql("""select name, subject from `tabTask` where (`%s` like %s or `subject` like %s) %s order by case when `subject` like %s then 0 else 1 end, case when `%s` like %s then 0 else 1 end, `%s`, subject limit %s, %s""" % (searchfield, "%s", "%s", match_conditions, "%s", searchfield, "%s", searchfield, "%s", "%s"), (search_string, search_string, order_by_string, order_by_string, start, page_len))
unknown
codeparrot/codeparrot-clean
// // compose.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_ASIO_COMPOSE_HPP #define BOOST_ASIO_COMPOSE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include <boost/asio/detail/config.hpp> #include <boost/asio/composed.hpp> #include <boost/asio/detail/push_options.hpp> namespace boost { namespace asio { /// Launch an asynchronous operation with a stateful implementation. /** * The async_compose function simplifies the implementation of composed * asynchronous operations automatically wrapping a stateful function object * with a conforming intermediate completion handler. * * @param implementation A function object that contains the implementation of * the composed asynchronous operation. The first argument to the function * object is a non-const reference to the enclosing intermediate completion * handler. The remaining arguments are any arguments that originate from the * completion handlers of any asynchronous operations performed by the * implementation. * * @param token The completion token. * * @param io_objects_or_executors Zero or more I/O objects or I/O executors for * which outstanding work must be maintained. * * @par Per-Operation Cancellation * By default, terminal per-operation cancellation is enabled for * composed operations that are implemented using @c async_compose. To * disable cancellation for the composed operation, or to alter its * supported cancellation types, call the @c self object's @c * reset_cancellation_state function. * * @par Example: * * @code struct async_echo_implementation * { * tcp::socket& socket_; * boost::asio::mutable_buffer buffer_; * enum { starting, reading, writing } state_; * * template <typename Self> * void operator()(Self& self, * boost::system::error_code error = {}, * std::size_t n = 0) * { * switch (state_) * { * case starting: * state_ = reading; * socket_.async_read_some( * buffer_, std::move(self)); * break; * case reading: * if (error) * { * self.complete(error, 0); * } * else * { * state_ = writing; * boost::asio::async_write(socket_, buffer_, * boost::asio::transfer_exactly(n), * std::move(self)); * } * break; * case writing: * self.complete(error, n); * break; * } * } * }; * * template <typename CompletionToken> * auto async_echo(tcp::socket& socket, * boost::asio::mutable_buffer buffer, * CompletionToken&& token) * -> decltype( * boost::asio::async_compose<CompletionToken, * void(boost::system::error_code, std::size_t)>( * std::declval<async_echo_implementation>(), * token, socket)) * { * return boost::asio::async_compose<CompletionToken, * void(boost::system::error_code, std::size_t)>( * async_echo_implementation{socket, buffer, * async_echo_implementation::starting}, * token, socket); * } @endcode */ template <typename CompletionToken, typename Signature, typename Implementation, typename... IoObjectsOrExecutors> inline auto async_compose(Implementation&& implementation, type_identity_t<CompletionToken>& token, IoObjectsOrExecutors&&... io_objects_or_executors) -> decltype( async_initiate<CompletionToken, Signature>( composed<Signature>(static_cast<Implementation&&>(implementation), static_cast<IoObjectsOrExecutors&&>(io_objects_or_executors)...), token)) { return async_initiate<CompletionToken, Signature>( composed<Signature>(static_cast<Implementation&&>(implementation), static_cast<IoObjectsOrExecutors&&>(io_objects_or_executors)...), token); } } // namespace asio } // namespace boost #include <boost/asio/detail/pop_options.hpp> #endif // BOOST_ASIO_COMPOSE_HPP
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/asio/compose.hpp
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy // SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: MIT use super::{ configure_cargo, delete_codegen_vars, ensure_init, env, get_app, get_config, inject_resources, log_finished, open_and_wait, MobileTarget, OptionsHandle, }; use crate::{ build::Options as BuildOptions, error::Context, helpers::{ app_paths::Dirs, config::{get_config as get_tauri_config, ConfigMetadata}, flock, }, interface::{AppInterface, Options as InterfaceOptions}, mobile::{android::generate_tauri_properties, write_options, CliOptions, TargetDevice}, ConfigValue, Error, Result, }; use clap::{ArgAction, Parser}; use cargo_mobile2::{ android::{aab, apk, config::Config as AndroidConfig, env::Env, target::Target}, opts::{NoiseLevel, Profile}, target::TargetTrait, }; use std::env::set_current_dir; use std::path::Path; #[derive(Debug, Clone, Parser)] #[clap( about = "Build your app in release mode for Android and generate APKs and AABs", long_about = "Build your app in release mode for Android and generate APKs and AABs. It makes use of the `build.frontendDist` property from your `tauri.conf.json` file. It also runs your `build.beforeBuildCommand` which usually builds your frontend into `build.frontendDist`." )] pub struct Options { /// Builds with the debug flag #[clap(short, long)] pub debug: bool, /// Which targets to build (all by default). #[clap( short, long = "target", action = ArgAction::Append, num_args(0..), value_parser(clap::builder::PossibleValuesParser::new(Target::name_list())) )] pub targets: Option<Vec<String>>, /// List of cargo features to activate #[clap(short, long, action = ArgAction::Append, num_args(0..), value_delimiter = ',')] pub features: Vec<String>, /// JSON strings or paths to JSON, JSON5 or TOML files to merge with the default configuration file /// /// Configurations are merged in the order they are provided, which means a particular value overwrites previous values when a config key-value pair conflicts. /// /// Note that a platform-specific file is looked up and merged with the default file by default /// (tauri.macos.conf.json, tauri.linux.conf.json, tauri.windows.conf.json, tauri.android.conf.json and tauri.ios.conf.json) /// but you can use this for more specific use cases such as different build flavors. #[clap(short, long)] pub config: Vec<ConfigValue>, /// Whether to split the APKs and AABs per ABIs. #[clap(long)] pub split_per_abi: bool, /// Build APKs. #[clap(long)] pub apk: bool, /// Build AABs. #[clap(long)] pub aab: bool, #[clap(skip)] pub skip_bundle: bool, /// Open Android Studio #[clap(short, long)] pub open: bool, /// Skip prompting for values #[clap(long, env = "CI")] pub ci: bool, /// Command line arguments passed to the runner. /// Use `--` to explicitly mark the start of the arguments. /// e.g. `tauri android build -- [runnerArgs]`. #[clap(last(true))] pub args: Vec<String>, /// Do not error out if a version mismatch is detected on a Tauri package. /// /// Only use this when you are sure the mismatch is incorrectly detected as version mismatched Tauri packages can lead to unknown behavior. #[clap(long)] pub ignore_version_mismatches: bool, /// Target device of this build #[clap(skip)] pub target_device: Option<TargetDevice>, } impl From<Options> for BuildOptions { fn from(options: Options) -> Self { Self { runner: None, debug: options.debug, target: None, features: options.features, bundles: None, no_bundle: false, config: options.config, args: options.args, ci: options.ci, skip_stapling: false, ignore_version_mismatches: options.ignore_version_mismatches, no_sign: false, } } } pub struct BuiltApplication { pub config: AndroidConfig, pub interface: AppInterface, // prevent drop #[allow(dead_code)] options_handle: OptionsHandle, } pub fn command(options: Options, noise_level: NoiseLevel) -> Result<BuiltApplication> { let dirs = crate::helpers::app_paths::resolve_dirs(); let tauri_config = get_tauri_config( tauri_utils::platform::Target::Android, &options .config .iter() .map(|conf| &conf.0) .collect::<Vec<_>>(), dirs.tauri, )?; run(options, noise_level, &dirs, &tauri_config) } pub fn run( options: Options, noise_level: NoiseLevel, dirs: &Dirs, tauri_config: &ConfigMetadata, ) -> Result<BuiltApplication> { delete_codegen_vars(); let mut build_options: BuildOptions = options.clone().into(); let first_target = Target::all() .get( options .targets .as_ref() .and_then(|l| l.first().map(|t| t.as_str())) .unwrap_or(Target::DEFAULT_KEY), ) .unwrap(); build_options.target = Some(first_target.triple.into()); let interface = AppInterface::new(tauri_config, build_options.target.clone(), dirs.tauri)?; interface.build_options(&mut build_options.args, &mut build_options.features, true); let app = get_app(MobileTarget::Android, tauri_config, &interface, dirs.tauri); let (config, metadata) = get_config( &app, tauri_config, &build_options.features, &CliOptions { dev: false, features: build_options.features.clone(), args: build_options.args.clone(), noise_level, vars: Default::default(), config: build_options.config.clone(), target_device: None, }, ); let profile = if options.debug { Profile::Debug } else { Profile::Release }; set_current_dir(dirs.tauri).context("failed to set current directory to Tauri directory")?; ensure_init( tauri_config, config.app(), config.project_dir(), MobileTarget::Android, options.ci, )?; let mut env = env(options.ci)?; configure_cargo(&mut env, &config)?; generate_tauri_properties(&config, tauri_config, false)?; crate::build::setup(&interface, &mut build_options, tauri_config, dirs, true)?; let installed_targets = crate::interface::rust::installation::installed_targets().unwrap_or_default(); if !installed_targets.contains(&first_target.triple().into()) { log::info!("Installing target {}", first_target.triple()); first_target .install() .map_err(|error| Error::CommandFailed { command: "rustup target add".to_string(), error, }) .context("failed to install target")?; } // run an initial build to initialize plugins first_target .build(&config, &metadata, &env, noise_level, true, profile) .context("failed to build Android app")?; let open = options.open; let options_handle = run_build( &interface, options, build_options, tauri_config, profile, &config, &mut env, noise_level, dirs.tauri, )?; if open { open_and_wait(&config, &env); } Ok(BuiltApplication { config, interface, options_handle, }) } #[allow(clippy::too_many_arguments)] fn run_build( interface: &AppInterface, mut options: Options, build_options: BuildOptions, tauri_config: &ConfigMetadata, profile: Profile, config: &AndroidConfig, env: &mut Env, noise_level: NoiseLevel, tauri_dir: &Path, ) -> Result<OptionsHandle> { if !(options.skip_bundle || options.apk || options.aab) { // if the user didn't specify the format to build, we'll do both options.apk = true; options.aab = true; } let interface_options = InterfaceOptions { debug: build_options.debug, target: build_options.target.clone(), args: build_options.args.clone(), ..Default::default() }; let app_settings = interface.app_settings(); let out_dir = app_settings.out_dir(&interface_options, tauri_dir)?; let _lock = flock::open_rw(out_dir.join("lock").with_extension("android"), "Android")?; let cli_options = CliOptions { dev: false, features: build_options.features.clone(), args: build_options.args.clone(), noise_level, vars: Default::default(), config: build_options.config, target_device: options.target_device.clone(), }; let handle = write_options(tauri_config, cli_options)?; inject_resources(config, tauri_config)?; let apk_outputs = if options.apk { apk::build( config, env, noise_level, profile, get_targets_or_all(options.targets.clone().unwrap_or_default())?, options.split_per_abi, ) .context("failed to build APK")? } else { Vec::new() }; let aab_outputs = if options.aab { aab::build( config, env, noise_level, profile, get_targets_or_all(options.targets.unwrap_or_default())?, options.split_per_abi, ) .context("failed to build AAB")? } else { Vec::new() }; if !apk_outputs.is_empty() { log_finished(apk_outputs, "APK"); } if !aab_outputs.is_empty() { log_finished(aab_outputs, "AAB"); } Ok(handle) } fn get_targets_or_all<'a>(targets: Vec<String>) -> Result<Vec<&'a Target<'a>>> { if targets.is_empty() { Ok(Target::all().iter().map(|t| t.1).collect()) } else { let mut outs = Vec::new(); let possible_targets = Target::all() .keys() .map(|key| key.to_string()) .collect::<Vec<String>>() .join(","); for t in targets { let target = Target::for_name(&t).with_context(|| { format!("Target {t} is invalid; the possible targets are {possible_targets}",) })?; outs.push(target); } Ok(outs) } }
rust
github
https://github.com/tauri-apps/tauri
crates/tauri-cli/src/mobile/android/build.rs
#!/usr/bin/env python # Copyright 2014 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse from collections import OrderedDict import sys from common_includes import * def IsSvnNumber(rev): return rev.isdigit() and len(rev) < 8 class Preparation(Step): MESSAGE = "Preparation." def RunStep(self): if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")): if self._options.force: os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE")) elif self._options.step == 0: # pragma: no cover self.Die("A merge is already in progress") open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close() self.InitialEnvironmentChecks(self.default_cwd) if self._options.branch: self["merge_to_branch"] = self._options.branch else: # pragma: no cover self.Die("Please specify a branch to merge to") self.CommonPrepare() self.PrepareBranch() class CreateBranch(Step): MESSAGE = "Create a fresh branch for the patch." def RunStep(self): self.GitCreateBranch(self.Config("BRANCHNAME"), self.vc.RemoteBranch(self["merge_to_branch"])) class SearchArchitecturePorts(Step): MESSAGE = "Search for corresponding architecture ports." def RunStep(self): self["full_revision_list"] = list(OrderedDict.fromkeys( self._options.revisions)) port_revision_list = [] for revision in self["full_revision_list"]: # Search for commits which matches the "Port XXX" pattern. git_hashes = self.GitLog(reverse=True, format="%H", grep="Port %s" % revision, branch=self.vc.RemoteMasterBranch()) for git_hash in git_hashes.splitlines(): revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash) # Is this revision included in the original revision list? if git_hash in self["full_revision_list"]: print("Found port of %s -> %s (already included): %s" % (revision, git_hash, revision_title)) else: print("Found port of %s -> %s: %s" % (revision, git_hash, revision_title)) port_revision_list.append(git_hash) # Do we find any port? if len(port_revision_list) > 0: if self.Confirm("Automatically add corresponding ports (%s)?" % ", ".join(port_revision_list)): #: 'y': Add ports to revision list. self["full_revision_list"].extend(port_revision_list) class CreateCommitMessage(Step): MESSAGE = "Create commit message." def RunStep(self): # Stringify: ["abcde", "12345"] -> "abcde, 12345" self["revision_list"] = ", ".join(self["full_revision_list"]) if not self["revision_list"]: # pragma: no cover self.Die("Revision list is empty.") action_text = "Merged %s" # The commit message title is added below after the version is specified. msg_pieces = [ "\n".join(action_text % s for s in self["full_revision_list"]), ] msg_pieces.append("\n\n") for commit_hash in self["full_revision_list"]: patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash) msg_pieces.append("%s\n\n" % patch_merge_desc) bugs = [] for commit_hash in self["full_revision_list"]: msg = self.GitLog(n=1, git_hash=commit_hash) for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg, re.M): bugs.extend(s.strip() for s in bug.split(",")) bug_aggregate = ",".join(sorted(filter(lambda s: s and s != "none", bugs))) if bug_aggregate: msg_pieces.append("BUG=%s\nLOG=N\n" % bug_aggregate) self["new_commit_msg"] = "".join(msg_pieces) class ApplyPatches(Step): MESSAGE = "Apply patches for selected revisions." def RunStep(self): for commit_hash in self["full_revision_list"]: print("Applying patch for %s to %s..." % (commit_hash, self["merge_to_branch"])) patch = self.GitGetPatch(commit_hash) TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE")) self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE")) if self._options.patch: self.ApplyPatch(self._options.patch) class PrepareVersion(Step): MESSAGE = "Prepare version file." def RunStep(self): # This is used to calculate the patch level increment. self.ReadAndPersistVersion() class IncrementVersion(Step): MESSAGE = "Increment version number." def RunStep(self): new_patch = str(int(self["patch"]) + 1) if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will " "fire up your EDITOR on %s so you can make arbitrary " "changes. When you're done, save the file and exit your " "EDITOR.)" % VERSION_FILE): text = FileToText(os.path.join(self.default_cwd, VERSION_FILE)) text = MSub(r"(?<=#define V8_PATCH_LEVEL)(?P<space>\s+)\d*$", r"\g<space>%s" % new_patch, text) TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE)) else: self.Editor(os.path.join(self.default_cwd, VERSION_FILE)) self.ReadAndPersistVersion("new_") self["version"] = "%s.%s.%s.%s" % (self["new_major"], self["new_minor"], self["new_build"], self["new_patch"]) class CommitLocal(Step): MESSAGE = "Commit to local branch." def RunStep(self): # Add a commit message title. self["commit_title"] = "Version %s (cherry-pick)" % self["version"] self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"], self["new_commit_msg"]) TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE")) self.GitCommit(file_name=self.Config("COMMITMSG_FILE")) class CommitRepository(Step): MESSAGE = "Commit to the repository." def RunStep(self): self.GitCheckout(self.Config("BRANCHNAME")) self.WaitForLGTM() self.GitPresubmit() self.vc.CLLand() class TagRevision(Step): MESSAGE = "Create the tag." def RunStep(self): print "Creating tag %s" % self["version"] self.vc.Tag(self["version"], self.vc.RemoteBranch(self["merge_to_branch"]), self["commit_title"]) class CleanUp(Step): MESSAGE = "Cleanup." def RunStep(self): self.CommonCleanup() print "*** SUMMARY ***" print "version: %s" % self["version"] print "branch: %s" % self["merge_to_branch"] if self["revision_list"]: print "patches: %s" % self["revision_list"] class MergeToBranch(ScriptsBase): def _Description(self): return ("Performs the necessary steps to merge revisions from " "master to other branches, including candidates.") def _PrepareOptions(self, parser): group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--branch", help="The branch to merge to.") parser.add_argument("revisions", nargs="*", help="The revisions to merge.") parser.add_argument("-f", "--force", help="Delete sentinel file.", default=False, action="store_true") parser.add_argument("-m", "--message", help="A commit message for the patch.") parser.add_argument("-p", "--patch", help="A patch file to apply as part of the merge.") def _ProcessOptions(self, options): if len(options.revisions) < 1: if not options.patch: print "Either a patch file or revision numbers must be specified" return False if not options.message: print "You must specify a merge comment if no patches are specified" return False options.bypass_upload_hooks = True # CC ulan to make sure that fixes are merged to Google3. options.cc = "ulan@chromium.org" # Make sure to use git hashes in the new workflows. for revision in options.revisions: if (IsSvnNumber(revision) or (revision[0:1] == "r" and IsSvnNumber(revision[1:]))): print "Please provide full git hashes of the patches to merge." print "Got: %s" % revision return False return True def _Config(self): return { "BRANCHNAME": "prepare-merge", "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile", "ALREADY_MERGING_SENTINEL_FILE": "/tmp/v8-merge-to-branch-tempfile-already-merging", "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch", "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg", } def _Steps(self): return [ Preparation, CreateBranch, SearchArchitecturePorts, CreateCommitMessage, ApplyPatches, PrepareVersion, IncrementVersion, CommitLocal, UploadStep, CommitRepository, TagRevision, CleanUp, ] if __name__ == "__main__": # pragma: no cover sys.exit(MergeToBranch().Run())
unknown
codeparrot/codeparrot-clean
import pprint import unittest import test_support try: uni = unicode except NameError: def uni(x):return x class QueryTestCase(unittest.TestCase): def setUp(self): self.a = range(100) self.b = range(200) self.a[-12] = self.b def test_basic(self): """Verify .isrecursive() and .isreadable() w/o recursion.""" verify = self.assert_ for safe in (2, 2.0, 2j, "abc", [3], (2,2), {3: 3}, uni("yaddayadda"), self.a, self.b): verify(not pprint.isrecursive(safe), "expected not isrecursive for " + `safe`) verify(pprint.isreadable(safe), "expected isreadable for " + `safe`) def test_knotted(self): """Verify .isrecursive() and .isreadable() w/ recursion.""" # Tie a knot. self.b[67] = self.a # Messy dict. self.d = {} self.d[0] = self.d[1] = self.d[2] = self.d verify = self.assert_ for icky in self.a, self.b, self.d, (self.d, self.d): verify(pprint.isrecursive(icky), "expected isrecursive") verify(not pprint.isreadable(icky), "expected not isreadable") # Break the cycles. self.d.clear() del self.a[:] del self.b[:] for safe in self.a, self.b, self.d, (self.d, self.d): verify(not pprint.isrecursive(safe), "expected not isrecursive for " + `safe`) verify(pprint.isreadable(safe), "expected isreadable for " + `safe`) def test_unreadable(self): """Not recursive but not readable anyway.""" verify = self.assert_ for unreadable in type(3), pprint, pprint.isrecursive: verify(not pprint.isrecursive(unreadable), "expected not isrecursive for " + `unreadable`) verify(not pprint.isreadable(unreadable), "expected not isreadable for " + `unreadable`) def test_same_as_repr(self): "Simple objects and small containers that should be same as repr()." verify = self.assert_ for simple in (0, 0L, 0+0j, 0.0, "", uni(""), (), [], {}, verify, pprint, -6, -6L, -6-6j, -1.5, "x", uni("x"), (3,), [3], {3: 6}, (1,2), [3,4], {5: 6, 7: 8}, {"xy\tab\n": (3,), 5: [[]], (): {}}, range(10, -11, -1) ): native = repr(simple) for function in "pformat", "saferepr": f = getattr(pprint, function) got = f(simple) verify(native == got, "expected %s got %s from pprint.%s" % (native, got, function)) def test_basic_line_wrap(self): """verify basic line-wrapping operation""" o = {'RPM_cal': 0, 'RPM_cal2': 48059, 'Speed_cal': 0, 'controldesk_runtime_us': 0, 'main_code_runtime_us': 0, 'read_io_runtime_us': 0, 'write_io_runtime_us': 43690} exp = """\ {'RPM_cal': 0, 'RPM_cal2': 48059, 'Speed_cal': 0, 'controldesk_runtime_us': 0, 'main_code_runtime_us': 0, 'read_io_runtime_us': 0, 'write_io_runtime_us': 43690}""" self.assertEqual(pprint.pformat(o), exp) def test_main(): test_support.run_unittest(QueryTestCase) if __name__ == "__main__": test_main()
unknown
codeparrot/codeparrot-clean
package benchmarks.flow.scrabble import java.util.* object IterableSpliterator { @JvmStatic public fun <T> of(spliterator: Spliterator<T>): Iterable<T> = Iterable { Spliterators.iterator(spliterator) } }
kotlin
github
https://github.com/Kotlin/kotlinx.coroutines
benchmarks/src/jmh/kotlin/benchmarks/flow/scrabble/IterableSpliterator.kt
# -*- coding: utf-8 -*- # # Copyright 2015-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module provide the function :py:func:`summary` that is used for printing an `execution summary <https://github.com/spotify/luigi/blob/master/examples/execution_summary_example.py>`_ at the end of luigi invocations. """ import textwrap import collections import functools import luigi class execution_summary(luigi.Config): summary_length = luigi.IntParameter(default=5) def _partition_tasks(worker): """ Takes a worker and sorts out tasks based on their status. Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker """ task_history = worker._add_task_history pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'} set_tasks = {} set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks} set_tasks["already_done"] = {task for (task, status, ext) in task_history if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]} set_tasks["ever_failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'} set_tasks["failed"] = set_tasks["ever_failed"] - set_tasks["completed"] set_tasks["scheduling_error"] = {task for(task, status, ext) in task_history if status == 'UNKNOWN'} set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and not ext} set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and ext} set_tasks["run_by_other_worker"] = set() set_tasks["upstream_failure"] = set() set_tasks["upstream_missing_dependency"] = set() set_tasks["upstream_run_by_other_worker"] = set() set_tasks["upstream_scheduling_error"] = set() set_tasks["not_run"] = set() return set_tasks def _root_task(worker): """ Return the first task scheduled by the worker, corresponding to the root task """ return worker._add_task_history[0][0] def _populate_unknown_statuses(set_tasks): """ Add the "upstream_*" and "not_run" statuses my mutating set_tasks. """ visited = set() for task in set_tasks["still_pending_not_ext"]: _depth_first_search(set_tasks, task, visited) def _depth_first_search(set_tasks, current_task, visited): """ This dfs checks why tasks are still pending. """ visited.add(current_task) if current_task in set_tasks["still_pending_not_ext"]: upstream_failure = False upstream_missing_dependency = False upstream_run_by_other_worker = False upstream_scheduling_error = False for task in current_task._requires(): if task not in visited: _depth_first_search(set_tasks, task, visited) if task in set_tasks["ever_failed"] or task in set_tasks["upstream_failure"]: set_tasks["upstream_failure"].add(current_task) upstream_failure = True if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]: set_tasks["upstream_missing_dependency"].add(current_task) upstream_missing_dependency = True if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]: set_tasks["upstream_run_by_other_worker"].add(current_task) upstream_run_by_other_worker = True if task in set_tasks["scheduling_error"]: set_tasks["upstream_scheduling_error"].add(current_task) upstream_scheduling_error = True if not upstream_failure and not upstream_missing_dependency and \ not upstream_run_by_other_worker and not upstream_scheduling_error and \ current_task not in set_tasks["run_by_other_worker"]: set_tasks["not_run"].add(current_task) def _get_str(task_dict, extra_indent): """ This returns a string for each status """ summary_length = execution_summary().summary_length lines = [] task_names = sorted(task_dict.keys()) for task_family in task_names: tasks = task_dict[task_family] tasks = sorted(tasks, key=lambda x: str(x)) prefix_size = 8 if extra_indent else 4 prefix = ' ' * prefix_size line = None if summary_length > 0 and len(lines) >= summary_length: line = prefix + "..." lines.append(line) break if len(tasks[0].get_params()) == 0: line = prefix + '- {0} {1}()'.format(len(tasks), str(task_family)) elif _get_len_of_params(tasks[0]) > 60 or len(str(tasks[0])) > 200 or \ (len(tasks) == 2 and len(tasks[0].get_params()) > 1 and (_get_len_of_params(tasks[0]) > 40 or len(str(tasks[0])) > 100)): """ This is to make sure that there is no really long task in the output """ line = prefix + '- {0} {1}(...)'.format(len(tasks), task_family) elif len((tasks[0].get_params())) == 1: attributes = {getattr(task, tasks[0].get_params()[0][0]) for task in tasks} param_class = tasks[0].get_params()[0][1] first, last = _ranging_attributes(attributes, param_class) if first is not None and last is not None and len(attributes) > 3: param_str = '{0}...{1}'.format(param_class.serialize(first), param_class.serialize(last)) else: param_str = '{0}'.format(_get_str_one_parameter(tasks)) line = prefix + '- {0} {1}({2}={3})'.format(len(tasks), task_family, tasks[0].get_params()[0][0], param_str) else: ranging = False params = _get_set_of_params(tasks) unique_param_keys = list(_get_unique_param_keys(params)) if len(unique_param_keys) == 1: unique_param, = unique_param_keys attributes = params[unique_param] param_class = unique_param[1] first, last = _ranging_attributes(attributes, param_class) if first is not None and last is not None and len(attributes) > 2: ranging = True line = prefix + '- {0} {1}({2}'.format(len(tasks), task_family, _get_str_ranging_multiple_parameters(first, last, tasks, unique_param)) if not ranging: if len(tasks) == 1: line = prefix + '- {0} {1}'.format(len(tasks), tasks[0]) if len(tasks) == 2: line = prefix + '- {0} {1} and {2}'.format(len(tasks), tasks[0], tasks[1]) if len(tasks) > 2: line = prefix + '- {0} {1} ...'.format(len(tasks), tasks[0]) lines.append(line) return '\n'.join(lines) def _get_len_of_params(task): return sum(len(param[0]) for param in task.get_params()) def _get_str_ranging_multiple_parameters(first, last, tasks, unique_param): row = '' str_unique_param = '{0}...{1}'.format(unique_param[1].serialize(first), unique_param[1].serialize(last)) for param in tasks[0].get_params(): row += '{0}='.format(param[0]) if param[0] == unique_param[0]: row += '{0}'.format(str_unique_param) else: row += '{0}'.format(param[1].serialize(getattr(tasks[0], param[0]))) if param != tasks[0].get_params()[-1]: row += ", " row += ')' return row def _get_set_of_params(tasks): params = {} for param in tasks[0].get_params(): params[param] = {getattr(task, param[0]) for task in tasks} return params def _get_unique_param_keys(params): for param_key, param_values in params.items(): if len(param_values) > 1: yield param_key def _ranging_attributes(attributes, param_class): """ Checks if there is a continuous range """ next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes} in_first = attributes.difference(next_attributes) in_second = next_attributes.difference(attributes) if len(in_first) == 1 and len(in_second) == 1: for x in attributes: if {param_class.next_in_enumeration(x)} == in_second: return next(iter(in_first)), x return None, None def _get_str_one_parameter(tasks): row = '' count = 0 for task in tasks: if (len(row) >= 30 and count > 2 and count != len(tasks) - 1) or len(row) > 200: row += '...' break param = task.get_params()[0] row += '{0}'.format(param[1].serialize(getattr(task, param[0]))) if count < len(tasks) - 1: row += ',' count += 1 return row def _serialize_first_param(task): return task.get_params()[0][1].serialize(getattr(task, task.get_params()[0][0])) def _get_number_of_tasks_for(status, group_tasks): if status == "still_pending": return (_get_number_of_tasks(group_tasks["still_pending_ext"]) + _get_number_of_tasks(group_tasks["still_pending_not_ext"])) return _get_number_of_tasks(group_tasks[status]) def _get_number_of_tasks(task_dict): return sum(len(tasks) for tasks in task_dict.values()) def _get_comments(group_tasks): """ Get the human readable comments and quantities for the task types. """ comments = {} for status, human in _COMMENTS: num_tasks = _get_number_of_tasks_for(status, group_tasks) if num_tasks: space = " " if status in _PENDING_SUB_STATUSES else "" comments[status] = '{space}* {num_tasks} {human}:\n'.format( space=space, num_tasks=num_tasks, human=human) return comments # Oredered in the sense that they'll be printed in this order _ORDERED_STATUSES = ( "already_done", "completed", "ever_failed", "failed", "scheduling_error", "still_pending", "still_pending_ext", "run_by_other_worker", "upstream_failure", "upstream_missing_dependency", "upstream_run_by_other_worker", "upstream_scheduling_error", "not_run", ) _PENDING_SUB_STATUSES = set(_ORDERED_STATUSES[_ORDERED_STATUSES.index("still_pending_ext"):]) _COMMENTS = set(( ("already_done", 'present dependencies were encountered'), ("completed", 'ran successfully'), ("failed", 'failed'), ("scheduling_error", 'failed scheduling'), ("still_pending", 'were left pending, among these'), ("still_pending_ext", 'were missing external dependencies'), ("run_by_other_worker", 'were being run by another worker'), ("upstream_failure", 'had failed dependencies'), ("upstream_missing_dependency", 'had missing external dependencies'), ("upstream_run_by_other_worker", 'had dependencies that were being run by other worker'), ("upstream_scheduling_error", 'had dependencies whose scheduling failed'), ("not_run", 'was not granted run permission by the scheduler'), )) def _get_run_by_other_worker(worker): """ This returns a set of the tasks that are being run by other worker """ task_sets = _get_external_workers(worker).values() return functools.reduce(lambda a, b: a | b, task_sets, set()) def _get_external_workers(worker): """ This returns a dict with a set of tasks for all of the other workers """ worker_that_blocked_task = collections.defaultdict(set) get_work_response_history = worker._get_work_response_history for get_work_response in get_work_response_history: if get_work_response['task_id'] is None: for running_task in get_work_response['running_tasks']: other_worker_id = running_task['worker'] other_task_id = running_task['task_id'] other_task = worker._scheduled_tasks.get(other_task_id) if other_worker_id == worker._id or not other_task: continue worker_that_blocked_task[other_worker_id].add(other_task) return worker_that_blocked_task def _group_tasks_by_name_and_status(task_dict): """ Takes a dictionary with sets of tasks grouped by their status and returns a dictionary with dictionaries with an array of tasks grouped by their status and task name """ group_status = {} for task in task_dict: if task.task_family not in group_status: group_status[task.task_family] = [] group_status[task.task_family].append(task) return group_status def _summary_dict(worker): set_tasks = _partition_tasks(worker) set_tasks["run_by_other_worker"] = _get_run_by_other_worker(worker) _populate_unknown_statuses(set_tasks) return set_tasks def _summary_format(set_tasks, worker): group_tasks = {} for status, task_dict in set_tasks.items(): group_tasks[status] = _group_tasks_by_name_and_status(task_dict) comments = _get_comments(group_tasks) num_all_tasks = sum([len(set_tasks["already_done"]), len(set_tasks["completed"]), len(set_tasks["failed"]), len(set_tasks["scheduling_error"]), len(set_tasks["still_pending_ext"]), len(set_tasks["still_pending_not_ext"])]) str_output = '' str_output += 'Scheduled {0} tasks of which:\n'.format(num_all_tasks) for status in _ORDERED_STATUSES: if status not in comments: continue str_output += '{0}'.format(comments[status]) if status != 'still_pending': str_output += '{0}\n'.format(_get_str(group_tasks[status], status in _PENDING_SUB_STATUSES)) ext_workers = _get_external_workers(worker) group_tasks_ext_workers = {} for ext_worker, task_dict in ext_workers.items(): group_tasks_ext_workers[ext_worker] = _group_tasks_by_name_and_status(task_dict) if len(ext_workers) > 0: str_output += "\nThe other workers were:\n" count = 0 for ext_worker, task_dict in ext_workers.items(): if count > 3 and count < len(ext_workers) - 1: str_output += " and {0} other workers".format(len(ext_workers) - count) break str_output += " - {0} ran {1} tasks\n".format(ext_worker, len(task_dict)) count += 1 str_output += '\n' if num_all_tasks == sum([len(set_tasks["already_done"]), len(set_tasks["scheduling_error"]), len(set_tasks["still_pending_ext"]), len(set_tasks["still_pending_not_ext"])]): if len(ext_workers) == 0: str_output += '\n' str_output += 'Did not run any tasks' smiley = "" reason = "" if set_tasks["ever_failed"]: if not set_tasks["failed"]: smiley = ":)" reason = "there were failed tasks but they all suceeded in a retry" else: smiley = ":(" reason = "there were failed tasks" if set_tasks["scheduling_error"]: reason += " and tasks whose scheduling failed" elif set_tasks["scheduling_error"]: smiley = ":(" reason = "there were tasks whose scheduling failed" elif set_tasks["not_run"]: smiley = ":|" reason = "there were tasks that were not granted run permission by the scheduler" elif set_tasks["still_pending_ext"]: smiley = ":|" reason = "there were missing external dependencies" else: smiley = ":)" reason = "there were no failed tasks or missing external dependencies" str_output += "\nThis progress looks {0} because {1}".format(smiley, reason) if num_all_tasks == 0: str_output = 'Did not schedule any tasks' return str_output def _summary_wrap(str_output): return textwrap.dedent(""" ===== Luigi Execution Summary ===== {str_output} ===== Luigi Execution Summary ===== """).format(str_output=str_output) def summary(worker): """ Given a worker, return a human readable summary of what the worker have done. """ return _summary_wrap(_summary_format(_summary_dict(worker), worker)) # 5
unknown
codeparrot/codeparrot-clean
import pygame from globals import * from math import ceil from random import randrange, random class Renderer: def __init__(self, logger, config): self.config = config self.background_block_grid = [] self.colors = [ (255, 0, 0), (0, 255, 0), (0, 0, 255) ] self.resize() def update(self): pygame.display.update() def resize(self): pygame.display.set_caption('BLOCK BUSTER (v%s)' % self.config.get_const('version_number')) self.display = pygame.display.set_mode(self.config.get_setting('window_size')) self.config.determine_size_variables() if self.background_block_grid: self.background_block_grid = [] if self.config.get('window_size')[0] == 450: self.big_font = pygame.font.Font(None, 30) self.small_font = pygame.font.Font(None, 20) else: self.big_font = pygame.font.Font(None, 60) self.small_font = pygame.font.Font(None, 40) def fill(self, color): self.display.fill(color) def draw_rect(self, rect, color, border_width=0): if border_width == 0: # filled rectangles can use the fill function which can be hardware accelerated self.display.fill(color, rect) else: pygame.draw.rect(self.display, color, rect, border_width) def draw_block(self, rect, color): self.draw_rect(rect, color) # fill a colored rect self.draw_rect(rect, self.config.get_const('black'), self.config.get('block_border_size')) # draw a black border on it # todo generate rects once instead of for every draw call def draw_block_background(self): # initialize a background block grid if none is present if not len(self.background_block_grid): window_size = self.config.get('window_size') block_size = self.config.get('block_size') number_of_columns = ceil(window_size[0] / block_size) number_of_rows = ceil(window_size[1] / block_size) current_position = [0, 0] for row in range(number_of_rows): color_row = self.pick_random_colors(number_of_columns) self.background_block_grid.append([]) for current_color in color_row: rect = pygame.Rect(current_position, (block_size, block_size)) self.background_block_grid[row].append({ 'color': current_color, 'rect': rect, 'obfuscated': False }) current_position[0] += block_size current_position[0] = 0 current_position[1] += block_size for row in self.background_block_grid: for block in row: if not block['obfuscated']: self.draw_block(block['rect'], block['color']) def draw_splash_background(self, outer_margin=30, inner_margin=10, border_color=(255, 255, 255)): window_size = self.config.get('window_size') width = window_size[0] - outer_margin height = window_size[1] - outer_margin background_rect = pygame.Rect((0, 0), (width, height)) background_rect.center = (window_size[0] / 2, window_size[1] / 2) expanded_area = pygame.Rect(background_rect) expanded_area.width += inner_margin expanded_area.height += inner_margin expanded_area.center = background_rect.center self.draw_rect(background_rect, self.config.get('black')) self.draw_rect(background_rect, border_color, 3) self.draw_rect(expanded_area, border_color, 3) # If we have a background block grid, mark the obscured blocks if len(self.background_block_grid): for row in self.background_block_grid: for block_desc in row: if background_rect.contains(block_desc['rect']): block_desc['obfuscated'] = True return background_rect def get_distance_from_center(self, position=(0, 0), surface=None): if surface is None: surface = self.display.get_rect() center = surface.center result = [0, 0] result[0] = center[0] - position[0] result[1] = center[1] - position[1] return result # todo generate surface once and store it instead of doing it every update tick def draw_centered_text(self, text, offset=(0, 0), small=False, render_surface=None): """Draw some text (small or large) on screen.""" text_surfaces = [] # Turn single string into list of strings. if isinstance(text, str): text = [text] # Generate the text surfaces, keeping track of how much vertical and horizontal space it occupies. total_text_dimensions = [0, 0] for line in text: if small: current_surface = self.small_font.render(line, True, (200, 200, 200)) else: current_surface = self.big_font.render(line, True, (200, 200, 200)) text_surfaces.append(current_surface) text_rect = current_surface.get_rect() total_text_dimensions[1] += text_rect.height if text_rect.width > total_text_dimensions[0]: total_text_dimensions[0] = text_rect.width # If no surface to center on was passed, use the entire display. if render_surface is None: render_surface = self.display.get_rect() # Determine the starting point by finding the surface center and # offsetting vertically by half the text height . surface_center_x = (render_surface.width / 2) + render_surface.left surface_center_y = (render_surface.height / 2) + render_surface.top total_text_height_half = total_text_dimensions[1] / 2 text_position = [surface_center_x + offset[0], (surface_center_y - total_text_height_half) + offset[1]] result = [] # Render each line, adjusting the vertical text position. for text_to_render in text_surfaces: text_rect = text_to_render.get_rect() text_rect.center = text_position text_position[1] += text_rect.height result.append(text_rect) self.display.blit(text_to_render, text_rect) return result def draw_text_table(self, table_area=None, headers=(), entries=()): """Draw a table of text on screen.""" if table_area is None: table_area = pygame.Rect((0, 0), self.config.get('window_size')) line_rects = [] number_of_columns = 0 # calculate the height of the rows (max entry height + margin) row_height = 0 row_margin = 20 if len(headers): row_height = self.big_font.size(headers[0])[1] number_of_columns = len(headers) elif len(entries): number_of_columns = len(entries[0]) row_height = self.small_font.size(entries[0][0])[1] # calculate the width of the columns (width / number_of_columns) column_width = table_area.width / number_of_columns column_width_half = column_width / 2 row_height += row_margin row_height_half = row_height / 2 current_draw_position = [column_width_half, row_height_half] remaining_vertical_space = table_area.height # render the headers for header in headers: header = str(header) text_size = self.big_font.size(header) # determine the offset needed to center the text text_width_offset = text_size[0] / 2 # subtract row from remaining vertical space during rendering of first header if current_draw_position[0] == column_width_half: remaining_vertical_space -= row_height # center the text by offsetting the draw position offset_position = current_draw_position[:] offset_position[0] -= text_width_offset self.draw_text(header, offset_position) current_draw_position[0] += column_width # render the table entries header_count = len(headers) for entry in entries: # reset the draw cursor current_draw_position[0] = column_width_half current_draw_position[1] += row_height # Construct a rect enclosing the entire line (we return this to allow the caller to do formatting). line_rects.append(pygame.Rect(current_draw_position[0], current_draw_position[1], column_width * len(entry), row_height)) for index, column_entry in enumerate(entry): # If headers were supplied we want to truncate columns that # were not given a header. if header_count and index >= header_count: break column_entry = str(column_entry) text_size = self.small_font.size(column_entry) text_width_offset = text_size[0] / 2 # Offset the current draw position to center the text. offset_draw_position = current_draw_position[:] offset_draw_position[0] -= text_width_offset if remaining_vertical_space >= self.small_font.size(column_entry)[1]: self.draw_text(column_entry, offset_draw_position, True) current_draw_position[0] += column_width current_draw_position[0] += row_height remaining_vertical_space -= row_height return line_rects def draw_text(self, text, position=None, small=False): '''Draw some text (small or large) on screen.''' text_surface = None if small: text_surface = self.small_font.render(text.rstrip(), True, (200, 200, 200)) else: text_surface = self.big_font.render(text.rstrip(), True, (200, 200, 200)) text_rect = text_surface.get_rect() # if no position was given, center text on board if position is None: position = [MARGIN_LEFT + ((BLOCK_SIZE * BOARD_WIDTH) / 2), MARGIN_TOP + ((BLOCK_SIZE * BOARD_HEIGHT) / 2)] position[0] -= text_surface.get_width() / 2 position[1] -= text_surface.get_height() / 2 text_rect.topleft = position self.display.blit(text_surface, text_rect) return text_rect def pick_random_colors(self, amount=1, allow_streaks=True): """Generate an array of random colors (we placed this function in renderer because we also want to use it to draw backgrounds.""" # global COLORS # select a few random colors result = [] latest_color = None color_streak_count = 0 for i in range(amount): color_accepted = False while not color_accepted: random_color = self.colors[randrange(0, len(self.colors))] # 5 percent chance of getting the white color if allow_streaks and random() <= 0.05: random_color = self.config.get_const('white') if random_color != latest_color: color_streak_count = 0 latest_color = random_color else: color_streak_count += 1 if allow_streaks or color_streak_count < 3: result.append(random_color) color_accepted = True return result
unknown
codeparrot/codeparrot-clean
"""Component to embed TP-Link smart home devices.""" import logging import voluptuous as vol from homeassistant.const import CONF_HOST from homeassistant import config_entries import homeassistant.helpers.config_validation as cv from homeassistant.helpers.typing import ConfigType, HomeAssistantType from .common import ( async_discover_devices, get_static_devices, ATTR_CONFIG, CONF_DIMMER, CONF_DISCOVERY, CONF_LIGHT, CONF_SWITCH, SmartDevices ) _LOGGER = logging.getLogger(__name__) DOMAIN = 'tplink' TPLINK_HOST_SCHEMA = vol.Schema({ vol.Required(CONF_HOST): cv.string }) CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Optional(CONF_LIGHT, default=[]): vol.All( cv.ensure_list, [TPLINK_HOST_SCHEMA] ), vol.Optional(CONF_SWITCH, default=[]): vol.All( cv.ensure_list, [TPLINK_HOST_SCHEMA] ), vol.Optional(CONF_DIMMER, default=[]): vol.All( cv.ensure_list, [TPLINK_HOST_SCHEMA] ), vol.Optional(CONF_DISCOVERY, default=True): cv.boolean, }), }, extra=vol.ALLOW_EXTRA) async def async_setup(hass, config): """Set up the TP-Link component.""" conf = config.get(DOMAIN) hass.data[DOMAIN] = {} hass.data[DOMAIN][ATTR_CONFIG] = conf if conf is not None: hass.async_create_task(hass.config_entries.flow.async_init( DOMAIN, context={'source': config_entries.SOURCE_IMPORT})) return True async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigType): """Set up TPLink from a config entry.""" config_data = hass.data[DOMAIN].get(ATTR_CONFIG) # These will contain the initialized devices lights = hass.data[DOMAIN][CONF_LIGHT] = [] switches = hass.data[DOMAIN][CONF_SWITCH] = [] # Add static devices static_devices = SmartDevices() if config_data is not None: static_devices = get_static_devices( config_data, ) lights.extend(static_devices.lights) switches.extend(static_devices.switches) # Add discovered devices if config_data is None or config_data[CONF_DISCOVERY]: discovered_devices = await async_discover_devices(hass, static_devices) lights.extend(discovered_devices.lights) switches.extend(discovered_devices.switches) forward_setup = hass.config_entries.async_forward_entry_setup if lights: _LOGGER.debug( "Got %s lights: %s", len(lights), ", ".join([d.host for d in lights]) ) hass.async_create_task(forward_setup(config_entry, 'light')) if switches: _LOGGER.debug( "Got %s switches: %s", len(switches), ", ".join([d.host for d in switches]) ) hass.async_create_task(forward_setup(config_entry, 'switch')) return True async def async_unload_entry(hass, entry): """Unload a config entry.""" forward_unload = hass.config_entries.async_forward_entry_unload remove_lights = remove_switches = False if hass.data[DOMAIN][CONF_LIGHT]: remove_lights = await forward_unload(entry, 'light') if hass.data[DOMAIN][CONF_SWITCH]: remove_switches = await forward_unload(entry, 'switch') if remove_lights or remove_switches: hass.data[DOMAIN].clear() return True # We were not able to unload the platforms, either because there # were none or one of the forward_unloads failed. return False
unknown
codeparrot/codeparrot-clean
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MPNetConfig, is_tf_available from transformers.testing_utils import require_tf, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers.models.mpnet.modeling_tf_mpnet import ( TFMPNetForMaskedLM, TFMPNetForMultipleChoice, TFMPNetForQuestionAnswering, TFMPNetForSequenceClassification, TFMPNetForTokenClassification, TFMPNetModel, ) class TFMPNetModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=5, num_attention_heads=4, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = MPNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_mpnet_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMPNetModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_mpnet_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMPNetForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_mpnet_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMPNetForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_mpnet_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFMPNetForSequenceClassification(config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_mpnet_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFMPNetForMultipleChoice(config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_mpnet_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFMPNetForTokenClassification(config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFMPNetModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ( ( TFMPNetForMaskedLM, TFMPNetForMultipleChoice, TFMPNetForQuestionAnswering, TFMPNetForSequenceClassification, TFMPNetForTokenClassification, TFMPNetModel, ) if is_tf_available() else () ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFMPNetModelTester(self) self.config_tester = ConfigTester(self, config_class=MPNetConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_mpnet_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ["microsoft/mpnet-base"]: model = TFMPNetModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFMPNetModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFMPNetModel.from_pretrained("microsoft/mpnet-base") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 768] self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [ [ [-0.1067172, 0.08216473, 0.0024543], [-0.03465879, 0.8354118, -0.03252288], [-0.06569476, -0.12424111, -0.0494436], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
unknown
codeparrot/codeparrot-clean
# Copyright 2005 Divmod, Inc. See LICENSE file for details # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.internet._sslverify}. """ from __future__ import division, absolute_import import itertools try: from OpenSSL import SSL from OpenSSL.crypto import PKey, X509 from OpenSSL.crypto import TYPE_RSA from twisted.internet import _sslverify as sslverify except ImportError: pass from twisted.python.compat import nativeString from twisted.trial import unittest from twisted.internet import protocol, defer, reactor from twisted.internet.error import CertificateError, ConnectionLost from twisted.internet import interfaces # A couple of static PEM-format certificates to be used by various tests. A_HOST_CERTIFICATE_PEM = """ -----BEGIN CERTIFICATE----- MIIC2jCCAkMCAjA5MA0GCSqGSIb3DQEBBAUAMIG0MQswCQYDVQQGEwJVUzEiMCAG A1UEAxMZZXhhbXBsZS50d2lzdGVkbWF0cml4LmNvbTEPMA0GA1UEBxMGQm9zdG9u MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMRYwFAYDVQQIEw1NYXNzYWNo dXNldHRzMScwJQYJKoZIhvcNAQkBFhhub2JvZHlAdHdpc3RlZG1hdHJpeC5jb20x ETAPBgNVBAsTCFNlY3VyaXR5MB4XDTA2MDgxNjAxMDEwOFoXDTA3MDgxNjAxMDEw OFowgbQxCzAJBgNVBAYTAlVTMSIwIAYDVQQDExlleGFtcGxlLnR3aXN0ZWRtYXRy aXguY29tMQ8wDQYDVQQHEwZCb3N0b24xHDAaBgNVBAoTE1R3aXN0ZWQgTWF0cml4 IExhYnMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxJzAlBgkqhkiG9w0BCQEWGG5v Ym9keUB0d2lzdGVkbWF0cml4LmNvbTERMA8GA1UECxMIU2VjdXJpdHkwgZ8wDQYJ KoZIhvcNAQEBBQADgY0AMIGJAoGBAMzH8CDF/U91y/bdbdbJKnLgnyvQ9Ig9ZNZp 8hpsu4huil60zF03+Lexg2l1FIfURScjBuaJMR6HiMYTMjhzLuByRZ17KW4wYkGi KXstz03VIKy4Tjc+v4aXFI4XdRw10gGMGQlGGscXF/RSoN84VoDKBfOMWdXeConJ VyC4w3iJAgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAviMT4lBoxOgQy32LIgZ4lVCj JNOiZYg8GMQ6y0ugp86X80UjOvkGtNf/R7YgED/giKRN/q/XJiLJDEhzknkocwmO S+4b2XpiaZYxRyKWwL221O7CGmtWYyZl2+92YYmmCiNzWQPfP6BOMlfax0AGLHls fXzCWdG0O/3Lk2SRM0I= -----END CERTIFICATE----- """ A_PEER_CERTIFICATE_PEM = """ -----BEGIN CERTIFICATE----- MIIC3jCCAkcCAjA6MA0GCSqGSIb3DQEBBAUAMIG2MQswCQYDVQQGEwJVUzEiMCAG A1UEAxMZZXhhbXBsZS50d2lzdGVkbWF0cml4LmNvbTEPMA0GA1UEBxMGQm9zdG9u MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMRYwFAYDVQQIEw1NYXNzYWNo dXNldHRzMSkwJwYJKoZIhvcNAQkBFhpzb21lYm9keUB0d2lzdGVkbWF0cml4LmNv bTERMA8GA1UECxMIU2VjdXJpdHkwHhcNMDYwODE2MDEwMTU2WhcNMDcwODE2MDEw MTU2WjCBtjELMAkGA1UEBhMCVVMxIjAgBgNVBAMTGWV4YW1wbGUudHdpc3RlZG1h dHJpeC5jb20xDzANBgNVBAcTBkJvc3RvbjEcMBoGA1UEChMTVHdpc3RlZCBNYXRy aXggTGFiczEWMBQGA1UECBMNTWFzc2FjaHVzZXR0czEpMCcGCSqGSIb3DQEJARYa c29tZWJvZHlAdHdpc3RlZG1hdHJpeC5jb20xETAPBgNVBAsTCFNlY3VyaXR5MIGf MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCnm+WBlgFNbMlHehib9ePGGDXF+Nz4 CjGuUmVBaXCRCiVjg3kSDecwqfb0fqTksBZ+oQ1UBjMcSh7OcvFXJZnUesBikGWE JE4V8Bjh+RmbJ1ZAlUPZ40bAkww0OpyIRAGMvKG+4yLFTO4WDxKmfDcrOb6ID8WJ e1u+i3XGkIf/5QIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAD4Oukm3YYkhedUepBEA vvXIQhVDqL7mk6OqYdXmNj6R7ZMC8WWvGZxrzDI1bZuB+4aIxxd1FXC3UOHiR/xg i9cDl1y8P/qRp4aEBNF6rI0D4AxTbfnHQx4ERDAOShJdYZs/2zifPJ6va6YvrEyr yqDtGhklsWW3ZwBzEh5VEOUp -----END CERTIFICATE----- """ def counter(counter=itertools.count()): """ Each time we're called, return the next integer in the natural numbers. """ return next(counter) def makeCertificate(**kw): keypair = PKey() keypair.generate_key(TYPE_RSA, 512) certificate = X509() certificate.gmtime_adj_notBefore(0) certificate.gmtime_adj_notAfter(60 * 60 * 24 * 365) # One year for xname in certificate.get_issuer(), certificate.get_subject(): for (k, v) in kw.items(): setattr(xname, k, nativeString(v)) certificate.set_serial_number(counter()) certificate.set_pubkey(keypair) certificate.sign(keypair, "md5") return keypair, certificate class DataCallbackProtocol(protocol.Protocol): def dataReceived(self, data): d, self.factory.onData = self.factory.onData, None if d is not None: d.callback(data) def connectionLost(self, reason): d, self.factory.onLost = self.factory.onLost, None if d is not None: d.errback(reason) class WritingProtocol(protocol.Protocol): byte = b'x' def connectionMade(self): self.transport.write(self.byte) def connectionLost(self, reason): self.factory.onLost.errback(reason) class FakeContext(object): """ Introspectable fake of an C{OpenSSL.SSL.Context}. Saves call arguments for later introspection. Necessary because C{Context} offers poor introspection. cf. this U{pyOpenSSL bug<https://bugs.launchpad.net/pyopenssl/+bug/1173899>}. @ivar _method: See C{method} parameter of L{__init__}. @ivar _options: C{int} of C{OR}ed values from calls of L{set_options}. @ivar _certificate: Set by L{use_certificate}. @ivar _privateKey: Set by L{use_privatekey}. @ivar _verify: Set by L{set_verify}. @ivar _verifyDepth: Set by L{set_verify_depth}. @ivar _sessionID: Set by L{set_session_id}. @ivar _extraCertChain: Accumulated C{list} of all extra certificates added by L{add_extra_chain_cert}. """ _options = 0 def __init__(self, method): self._method = method self._extraCertChain = [] def set_options(self, options): self._options |= options def use_certificate(self, certificate): self._certificate = certificate def use_privatekey(self, privateKey): self._privateKey = privateKey def check_privatekey(self): return None def set_verify(self, flags, callback): self._verify = flags, callback def set_verify_depth(self, depth): self._verifyDepth = depth def set_session_id(self, sessionID): self._sessionID = sessionID def add_extra_chain_cert(self, cert): self._extraCertChain.append(cert) class OpenSSLOptions(unittest.TestCase): serverPort = clientConn = None onServerLost = onClientLost = None sKey = None sCert = None cKey = None cCert = None def setUp(self): """ Create class variables of client and server certificates. """ self.sKey, self.sCert = makeCertificate( O=b"Server Test Certificate", CN=b"server") self.cKey, self.cCert = makeCertificate( O=b"Client Test Certificate", CN=b"client") self.caCert1 = makeCertificate( O=b"CA Test Certificate 1", CN=b"ca1")[1] self.caCert2 = makeCertificate( O=b"CA Test Certificate", CN=b"ca2")[1] self.caCerts = [self.caCert1, self.caCert2] self.extraCertChain = self.caCerts def tearDown(self): if self.serverPort is not None: self.serverPort.stopListening() if self.clientConn is not None: self.clientConn.disconnect() L = [] if self.onServerLost is not None: L.append(self.onServerLost) if self.onClientLost is not None: L.append(self.onClientLost) return defer.DeferredList(L, consumeErrors=True) def loopback(self, serverCertOpts, clientCertOpts, onServerLost=None, onClientLost=None, onData=None): if onServerLost is None: self.onServerLost = onServerLost = defer.Deferred() if onClientLost is None: self.onClientLost = onClientLost = defer.Deferred() if onData is None: onData = defer.Deferred() serverFactory = protocol.ServerFactory() serverFactory.protocol = DataCallbackProtocol serverFactory.onLost = onServerLost serverFactory.onData = onData clientFactory = protocol.ClientFactory() clientFactory.protocol = WritingProtocol clientFactory.onLost = onClientLost self.serverPort = reactor.listenSSL(0, serverFactory, serverCertOpts) self.clientConn = reactor.connectSSL('127.0.0.1', self.serverPort.getHost().port, clientFactory, clientCertOpts) def test_constructorWithOnlyPrivateKey(self): """ C{privateKey} and C{certificate} make only sense if both are set. """ self.assertRaises( ValueError, sslverify.OpenSSLCertificateOptions, privateKey=self.sKey ) def test_constructorWithOnlyCertificate(self): """ C{privateKey} and C{certificate} make only sense if both are set. """ self.assertRaises( ValueError, sslverify.OpenSSLCertificateOptions, certificate=self.sCert ) def test_constructorWithCertificateAndPrivateKey(self): """ Specifying C{privateKey} and C{certificate} initializes correctly. """ opts = sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert) self.assertEqual(opts.privateKey, self.sKey) self.assertEqual(opts.certificate, self.sCert) self.assertEqual(opts.extraCertChain, []) def test_constructorDoesNotAllowVerifyWithoutCACerts(self): """ C{verify} must not be C{True} without specifying C{caCerts}. """ self.assertRaises( ValueError, sslverify.OpenSSLCertificateOptions, privateKey=self.sKey, certificate=self.sCert, verify=True ) def test_constructorAllowsCACertsWithoutVerify(self): """ It's currently a NOP, but valid. """ opts = sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, caCerts=self.caCerts) self.assertFalse(opts.verify) self.assertEqual(self.caCerts, opts.caCerts) def test_constructorWithVerifyAndCACerts(self): """ Specifying C{verify} and C{caCerts} initializes correctly. """ opts = sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=True, caCerts=self.caCerts) self.assertTrue(opts.verify) self.assertEqual(self.caCerts, opts.caCerts) def test_constructorSetsExtraChain(self): """ Setting C{extraCertChain} works if C{certificate} and C{privateKey} are set along with it. """ opts = sslverify.OpenSSLCertificateOptions( privateKey=self.sKey, certificate=self.sCert, extraCertChain=self.extraCertChain, ) self.assertEqual(self.extraCertChain, opts.extraCertChain) def test_constructorDoesNotAllowExtraChainWithoutPrivateKey(self): """ A C{extraCertChain} without C{privateKey} doesn't make sense and is thus rejected. """ self.assertRaises( ValueError, sslverify.OpenSSLCertificateOptions, certificate=self.sCert, extraCertChain=self.extraCertChain, ) def test_constructorDoesNotAllowExtraChainWithOutPrivateKey(self): """ A C{extraCertChain} without C{certificate} doesn't make sense and is thus rejected. """ self.assertRaises( ValueError, sslverify.OpenSSLCertificateOptions, privateKey=self.sKey, extraCertChain=self.extraCertChain, ) def test_extraChainFilesAreAddedIfSupplied(self): """ If C{extraCertChain} is set and all prerequisites are met, the specified chain certificates are added to C{Context}s that get created. """ opts = sslverify.OpenSSLCertificateOptions( privateKey=self.sKey, certificate=self.sCert, extraCertChain=self.extraCertChain, ) opts._contextFactory = FakeContext ctx = opts.getContext() self.assertEqual(self.sKey, ctx._privateKey) self.assertEqual(self.sCert, ctx._certificate) self.assertEqual(self.extraCertChain, ctx._extraCertChain) def test_extraChainDoesNotBreakPyOpenSSL(self): """ C{extraCertChain} doesn't break C{OpenSSL.SSL.Context} creation. """ opts = sslverify.OpenSSLCertificateOptions( privateKey=self.sKey, certificate=self.sCert, extraCertChain=self.extraCertChain, ) ctx = opts.getContext() self.assertIsInstance(ctx, SSL.Context) def test_abbreviatingDistinguishedNames(self): """ Check that abbreviations used in certificates correctly map to complete names. """ self.assertEqual( sslverify.DN(CN=b'a', OU=b'hello'), sslverify.DistinguishedName(commonName=b'a', organizationalUnitName=b'hello')) self.assertNotEquals( sslverify.DN(CN=b'a', OU=b'hello'), sslverify.DN(CN=b'a', OU=b'hello', emailAddress=b'xxx')) dn = sslverify.DN(CN=b'abcdefg') self.assertRaises(AttributeError, setattr, dn, 'Cn', b'x') self.assertEqual(dn.CN, dn.commonName) dn.CN = b'bcdefga' self.assertEqual(dn.CN, dn.commonName) def testInspectDistinguishedName(self): n = sslverify.DN(commonName=b'common name', organizationName=b'organization name', organizationalUnitName=b'organizational unit name', localityName=b'locality name', stateOrProvinceName=b'state or province name', countryName=b'country name', emailAddress=b'email address') s = n.inspect() for k in [ 'common name', 'organization name', 'organizational unit name', 'locality name', 'state or province name', 'country name', 'email address']: self.assertIn(k, s, "%r was not in inspect output." % (k,)) self.assertIn(k.title(), s, "%r was not in inspect output." % (k,)) def testInspectDistinguishedNameWithoutAllFields(self): n = sslverify.DN(localityName=b'locality name') s = n.inspect() for k in [ 'common name', 'organization name', 'organizational unit name', 'state or province name', 'country name', 'email address']: self.assertNotIn(k, s, "%r was in inspect output." % (k,)) self.assertNotIn(k.title(), s, "%r was in inspect output." % (k,)) self.assertIn('locality name', s) self.assertIn('Locality Name', s) def test_inspectCertificate(self): """ Test that the C{inspect} method of L{sslverify.Certificate} returns a human-readable string containing some basic information about the certificate. """ c = sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM) self.assertEqual( c.inspect().split('\n'), ["Certificate For Subject:", " Common Name: example.twistedmatrix.com", " Country Name: US", " Email Address: nobody@twistedmatrix.com", " Locality Name: Boston", " Organization Name: Twisted Matrix Labs", " Organizational Unit Name: Security", " State Or Province Name: Massachusetts", "", "Issuer:", " Common Name: example.twistedmatrix.com", " Country Name: US", " Email Address: nobody@twistedmatrix.com", " Locality Name: Boston", " Organization Name: Twisted Matrix Labs", " Organizational Unit Name: Security", " State Or Province Name: Massachusetts", "", "Serial Number: 12345", "Digest: C4:96:11:00:30:C3:EC:EE:A3:55:AA:ED:8C:84:85:18", "Public Key with Hash: ff33994c80812aa95a79cdb85362d054"]) def test_certificateOptionsSerialization(self): """ Test that __setstate__(__getstate__()) round-trips properly. """ firstOpts = sslverify.OpenSSLCertificateOptions( privateKey=self.sKey, certificate=self.sCert, method=SSL.SSLv3_METHOD, verify=True, caCerts=[self.sCert], verifyDepth=2, requireCertificate=False, verifyOnce=False, enableSingleUseKeys=False, enableSessions=False, fixBrokenPeers=True, enableSessionTickets=True) context = firstOpts.getContext() self.assertIdentical(context, firstOpts._context) self.assertNotIdentical(context, None) state = firstOpts.__getstate__() self.assertNotIn("_context", state) opts = sslverify.OpenSSLCertificateOptions() opts.__setstate__(state) self.assertEqual(opts.privateKey, self.sKey) self.assertEqual(opts.certificate, self.sCert) self.assertEqual(opts.method, SSL.SSLv3_METHOD) self.assertEqual(opts.verify, True) self.assertEqual(opts.caCerts, [self.sCert]) self.assertEqual(opts.verifyDepth, 2) self.assertEqual(opts.requireCertificate, False) self.assertEqual(opts.verifyOnce, False) self.assertEqual(opts.enableSingleUseKeys, False) self.assertEqual(opts.enableSessions, False) self.assertEqual(opts.fixBrokenPeers, True) self.assertEqual(opts.enableSessionTickets, True) def test_certificateOptionsSessionTickets(self): """ Enabling session tickets should not set the OP_NO_TICKET option. """ opts = sslverify.OpenSSLCertificateOptions(enableSessionTickets=True) ctx = opts.getContext() self.assertEqual(0, ctx.set_options(0) & 0x00004000) def test_certificateOptionsSessionTicketsDisabled(self): """ Enabling session tickets should set the OP_NO_TICKET option. """ opts = sslverify.OpenSSLCertificateOptions(enableSessionTickets=False) ctx = opts.getContext() self.assertEqual(0x00004000, ctx.set_options(0) & 0x00004000) def test_allowedAnonymousClientConnection(self): """ Check that anonymous connections are allowed when certificates aren't required on the server. """ onData = defer.Deferred() self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, requireCertificate=False), sslverify.OpenSSLCertificateOptions( requireCertificate=False), onData=onData) return onData.addCallback( lambda result: self.assertEqual(result, WritingProtocol.byte)) def test_refusedAnonymousClientConnection(self): """ Check that anonymous connections are refused when certificates are required on the server. """ onServerLost = defer.Deferred() onClientLost = defer.Deferred() self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=True, caCerts=[self.sCert], requireCertificate=True), sslverify.OpenSSLCertificateOptions( requireCertificate=False), onServerLost=onServerLost, onClientLost=onClientLost) d = defer.DeferredList([onClientLost, onServerLost], consumeErrors=True) def afterLost(result): ((cSuccess, cResult), (sSuccess, sResult)) = result self.failIf(cSuccess) self.failIf(sSuccess) # Win32 fails to report the SSL Error, and report a connection lost # instead: there is a race condition so that's not totally # surprising (see ticket #2877 in the tracker) self.assertIsInstance(cResult.value, (SSL.Error, ConnectionLost)) self.assertIsInstance(sResult.value, SSL.Error) return d.addCallback(afterLost) def test_failedCertificateVerification(self): """ Check that connecting with a certificate not accepted by the server CA fails. """ onServerLost = defer.Deferred() onClientLost = defer.Deferred() self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=False, requireCertificate=False), sslverify.OpenSSLCertificateOptions(verify=True, requireCertificate=False, caCerts=[self.cCert]), onServerLost=onServerLost, onClientLost=onClientLost) d = defer.DeferredList([onClientLost, onServerLost], consumeErrors=True) def afterLost(result): ((cSuccess, cResult), (sSuccess, sResult)) = result self.failIf(cSuccess) self.failIf(sSuccess) return d.addCallback(afterLost) def test_successfulCertificateVerification(self): """ Test a successful connection with client certificate validation on server side. """ onData = defer.Deferred() self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=False, requireCertificate=False), sslverify.OpenSSLCertificateOptions(verify=True, requireCertificate=True, caCerts=[self.sCert]), onData=onData) return onData.addCallback( lambda result: self.assertEqual(result, WritingProtocol.byte)) def test_successfulSymmetricSelfSignedCertificateVerification(self): """ Test a successful connection with validation on both server and client sides. """ onData = defer.Deferred() self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=True, requireCertificate=True, caCerts=[self.cCert]), sslverify.OpenSSLCertificateOptions(privateKey=self.cKey, certificate=self.cCert, verify=True, requireCertificate=True, caCerts=[self.sCert]), onData=onData) return onData.addCallback( lambda result: self.assertEqual(result, WritingProtocol.byte)) def test_verification(self): """ Check certificates verification building custom certificates data. """ clientDN = sslverify.DistinguishedName(commonName='client') clientKey = sslverify.KeyPair.generate() clientCertReq = clientKey.certificateRequest(clientDN) serverDN = sslverify.DistinguishedName(commonName='server') serverKey = sslverify.KeyPair.generate() serverCertReq = serverKey.certificateRequest(serverDN) clientSelfCertReq = clientKey.certificateRequest(clientDN) clientSelfCertData = clientKey.signCertificateRequest( clientDN, clientSelfCertReq, lambda dn: True, 132) clientSelfCert = clientKey.newCertificate(clientSelfCertData) serverSelfCertReq = serverKey.certificateRequest(serverDN) serverSelfCertData = serverKey.signCertificateRequest( serverDN, serverSelfCertReq, lambda dn: True, 516) serverSelfCert = serverKey.newCertificate(serverSelfCertData) clientCertData = serverKey.signCertificateRequest( serverDN, clientCertReq, lambda dn: True, 7) clientCert = clientKey.newCertificate(clientCertData) serverCertData = clientKey.signCertificateRequest( clientDN, serverCertReq, lambda dn: True, 42) serverCert = serverKey.newCertificate(serverCertData) onData = defer.Deferred() serverOpts = serverCert.options(serverSelfCert) clientOpts = clientCert.options(clientSelfCert) self.loopback(serverOpts, clientOpts, onData=onData) return onData.addCallback( lambda result: self.assertEqual(result, WritingProtocol.byte)) def test_SSLv2IsDisabledForSSLv23(self): """ SSLv2 is insecure and should be disabled so when users use SSLv23_METHOD, they get at least SSLV3. It does nothing if SSLv2_METHOD chosen explicitly. """ opts = sslverify.OpenSSLCertificateOptions() ctx = opts.getContext() self.assertEqual(SSL.OP_NO_SSLv2, ctx.set_options(0) & SSL.OP_NO_SSLv2) if interfaces.IReactorSSL(reactor, None) is None: OpenSSLOptions.skip = "Reactor does not support SSL, cannot run SSL tests" class _NotSSLTransport: def getHandle(self): return self class _MaybeSSLTransport: def getHandle(self): return self def get_peer_certificate(self): return None def get_host_certificate(self): return None class _ActualSSLTransport: def getHandle(self): return self def get_host_certificate(self): return sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM).original def get_peer_certificate(self): return sslverify.Certificate.loadPEM(A_PEER_CERTIFICATE_PEM).original class Constructors(unittest.TestCase): def test_peerFromNonSSLTransport(self): """ Verify that peerFromTransport raises an exception if the transport passed is not actually an SSL transport. """ x = self.assertRaises(CertificateError, sslverify.Certificate.peerFromTransport, _NotSSLTransport()) self.failUnless(str(x).startswith("non-TLS")) def test_peerFromBlankSSLTransport(self): """ Verify that peerFromTransport raises an exception if the transport passed is an SSL transport, but doesn't have a peer certificate. """ x = self.assertRaises(CertificateError, sslverify.Certificate.peerFromTransport, _MaybeSSLTransport()) self.failUnless(str(x).startswith("TLS")) def test_hostFromNonSSLTransport(self): """ Verify that hostFromTransport raises an exception if the transport passed is not actually an SSL transport. """ x = self.assertRaises(CertificateError, sslverify.Certificate.hostFromTransport, _NotSSLTransport()) self.failUnless(str(x).startswith("non-TLS")) def test_hostFromBlankSSLTransport(self): """ Verify that hostFromTransport raises an exception if the transport passed is an SSL transport, but doesn't have a host certificate. """ x = self.assertRaises(CertificateError, sslverify.Certificate.hostFromTransport, _MaybeSSLTransport()) self.failUnless(str(x).startswith("TLS")) def test_hostFromSSLTransport(self): """ Verify that hostFromTransport successfully creates the correct certificate if passed a valid SSL transport. """ self.assertEqual( sslverify.Certificate.hostFromTransport( _ActualSSLTransport()).serialNumber(), 12345) def test_peerFromSSLTransport(self): """ Verify that peerFromTransport successfully creates the correct certificate if passed a valid SSL transport. """ self.assertEqual( sslverify.Certificate.peerFromTransport( _ActualSSLTransport()).serialNumber(), 12346) if interfaces.IReactorSSL(reactor, None) is None: Constructors.skip = "Reactor does not support SSL, cannot run SSL tests"
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # # Copyright 2011-2015 Splunk, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """An example that shows how to use the Splunk binding module to create a convenient 'wrapper' interface around the Splunk REST APIs. The example binds to a sampling of endpoints showing how to access collections, entities and 'method-like' endpoints.""" from __future__ import absolute_import import sys, os sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) from splunklib.binding import connect try: from utils import parse except ImportError: raise Exception("Add the SDK repository to your PYTHONPATH to run the examples " "(e.g., export PYTHONPATH=~/splunk-sdk-python.") class Service: def __init__(self, context): self.context = context def apps(self): return self.context.get("apps/local") def indexes(self): return self.context.get("data/indexes") def info(self): return self.context.get("/services/server/info") def settings(self): return self.context.get("/services/server/settings") def search(self, query, **kwargs): return self.context.post("search/jobs/export", search=query, **kwargs) def main(argv): opts = parse(argv, {}, ".splunkrc") context = connect(**opts.kwargs) service = Service(context) assert service.apps().status == 200 assert service.indexes().status == 200 assert service.info().status == 200 assert service.settings().status == 200 assert service.search("search 404").status == 200 if __name__ == "__main__": main(sys.argv[1:])
unknown
codeparrot/codeparrot-clean
'use strict'; { const $ = django.jQuery; $.fn.djangoAdminSelect2 = function() { $.each(this, function(i, element) { $(element).select2({ ajax: { data: (params) => { return { term: params.term, page: params.page, app_label: element.dataset.appLabel, model_name: element.dataset.modelName, field_name: element.dataset.fieldName }; } } }); }); return this; }; $(function() { // Initialize all autocomplete widgets except the one in the template // form used when a new formset is added. $('.admin-autocomplete').not('[name*=__prefix__]').djangoAdminSelect2(); }); document.addEventListener('formset:added', (event) => { $(event.target).find('.admin-autocomplete').djangoAdminSelect2(); }); }
javascript
github
https://github.com/django/django
django/contrib/admin/static/admin/js/autocomplete.js
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Bridge\PhpUnit\Legacy; /** * @internal */ trait ConstraintTraitForV9 { use ConstraintLogicTrait; public function evaluate($other, string $description = '', bool $returnResult = false): ?bool { return $this->doEvaluate($other, $description, $returnResult); } public function count(): int { return $this->doCount(); } public function toString(): string { return $this->doToString(); } protected function additionalFailureDescription($other): string { return $this->doAdditionalFailureDescription($other); } protected function failureDescription($other): string { return $this->doFailureDescription($other); } protected function matches($other): bool { return $this->doMatches($other); } }
php
github
https://github.com/symfony/symfony
src/Symfony/Bridge/PhpUnit/Legacy/ConstraintTraitForV9.php
## Module statistics.py ## ## Copyright (c) 2013 Steven D'Aprano <steve+python@pearwood.info>. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. """ Basic statistics module. This module provides functions for calculating statistics of data, including averages, variance, and standard deviation. Calculating averages -------------------- ================== ============================================= Function Description ================== ============================================= mean Arithmetic mean (average) of data. median Median (middle value) of data. median_low Low median of data. median_high High median of data. median_grouped Median, or 50th percentile, of grouped data. mode Mode (most common value) of data. ================== ============================================= Calculate the arithmetic mean ("the average") of data: >>> mean([-1.0, 2.5, 3.25, 5.75]) 2.625 Calculate the standard median of discrete data: >>> median([2, 3, 4, 5]) 3.5 Calculate the median, or 50th percentile, of data grouped into class intervals centred on the data values provided. E.g. if your data points are rounded to the nearest whole number: >>> median_grouped([2, 2, 3, 3, 3, 4]) #doctest: +ELLIPSIS 2.8333333333... This should be interpreted in this way: you have two data points in the class interval 1.5-2.5, three data points in the class interval 2.5-3.5, and one in the class interval 3.5-4.5. The median of these data points is 2.8333... Calculating variability or spread --------------------------------- ================== ============================================= Function Description ================== ============================================= pvariance Population variance of data. variance Sample variance of data. pstdev Population standard deviation of data. stdev Sample standard deviation of data. ================== ============================================= Calculate the standard deviation of sample data: >>> stdev([2.5, 3.25, 5.5, 11.25, 11.75]) #doctest: +ELLIPSIS 4.38961843444... If you have previously calculated the mean, you can pass it as the optional second argument to the four "spread" functions to avoid recalculating it: >>> data = [1, 2, 2, 4, 4, 4, 5, 6] >>> mu = mean(data) >>> pvariance(data, mu) 2.5 Exceptions ---------- A single exception is defined: StatisticsError is a subclass of ValueError. """ __all__ = [ 'StatisticsError', 'pstdev', 'pvariance', 'stdev', 'variance', 'median', 'median_low', 'median_high', 'median_grouped', 'mean', 'mode', ] import collections import math from fractions import Fraction from decimal import Decimal # === Exceptions === class StatisticsError(ValueError): pass # === Private utilities === def _sum(data, start=0): """_sum(data [, start]) -> value Return a high-precision sum of the given numeric data. If optional argument ``start`` is given, it is added to the total. If ``data`` is empty, ``start`` (defaulting to 0) is returned. Examples -------- >>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75) 11.0 Some sources of round-off error will be avoided: >>> _sum([1e50, 1, -1e50] * 1000) # Built-in sum returns zero. 1000.0 Fractions and Decimals are also supported: >>> from fractions import Fraction as F >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)]) Fraction(63, 20) >>> from decimal import Decimal as D >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")] >>> _sum(data) Decimal('0.6963') Mixed types are currently treated as an error, except that int is allowed. """ # We fail as soon as we reach a value that is not an int or the type of # the first value which is not an int. E.g. _sum([int, int, float, int]) # is okay, but sum([int, int, float, Fraction]) is not. allowed_types = set([int, type(start)]) n, d = _exact_ratio(start) partials = {d: n} # map {denominator: sum of numerators} # Micro-optimizations. exact_ratio = _exact_ratio partials_get = partials.get # Add numerators for each denominator. for x in data: _check_type(type(x), allowed_types) n, d = exact_ratio(x) partials[d] = partials_get(d, 0) + n # Find the expected result type. If allowed_types has only one item, it # will be int; if it has two, use the one which isn't int. assert len(allowed_types) in (1, 2) if len(allowed_types) == 1: assert allowed_types.pop() is int T = int else: T = (allowed_types - set([int])).pop() if None in partials: assert issubclass(T, (float, Decimal)) assert not math.isfinite(partials[None]) return T(partials[None]) total = Fraction() for d, n in sorted(partials.items()): total += Fraction(n, d) if issubclass(T, int): assert total.denominator == 1 return T(total.numerator) if issubclass(T, Decimal): return T(total.numerator)/total.denominator return T(total) def _check_type(T, allowed): if T not in allowed: if len(allowed) == 1: allowed.add(T) else: types = ', '.join([t.__name__ for t in allowed] + [T.__name__]) raise TypeError("unsupported mixed types: %s" % types) def _exact_ratio(x): """Convert Real number x exactly to (numerator, denominator) pair. >>> _exact_ratio(0.25) (1, 4) x is expected to be an int, Fraction, Decimal or float. """ try: try: # int, Fraction return (x.numerator, x.denominator) except AttributeError: # float try: return x.as_integer_ratio() except AttributeError: # Decimal try: return _decimal_to_ratio(x) except AttributeError: msg = "can't convert type '{}' to numerator/denominator" exc = TypeError(msg.format(type(x).__name__)) exc.__cause__ = None raise exc except (OverflowError, ValueError): # INF or NAN if __debug__: # Decimal signalling NANs cannot be converted to float :-( if isinstance(x, Decimal): assert not x.is_finite() else: assert not math.isfinite(x) return (x, None) # FIXME This is faster than Fraction.from_decimal, but still too slow. def _decimal_to_ratio(d): """Convert Decimal d to exact integer ratio (numerator, denominator). >>> from decimal import Decimal >>> _decimal_to_ratio(Decimal("2.6")) (26, 10) """ sign, digits, exp = d.as_tuple() if exp in ('F', 'n', 'N'): # INF, NAN, sNAN assert not d.is_finite() raise ValueError num = 0 for digit in digits: num = num*10 + digit if exp < 0: den = 10**-exp else: num *= 10**exp den = 1 if sign: num = -num return (num, den) def _counts(data): # Generate a table of sorted (value, frequency) pairs. table = collections.Counter(iter(data)).most_common() if not table: return table # Extract the values with the highest frequency. maxfreq = table[0][1] for i in range(1, len(table)): if table[i][1] != maxfreq: table = table[:i] break return table # === Measures of central tendency (averages) === def mean(data): """Return the sample arithmetic mean of data. >>> mean([1, 2, 3, 4, 4]) 2.8 >>> from fractions import Fraction as F >>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)]) Fraction(13, 21) >>> from decimal import Decimal as D >>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")]) Decimal('0.5625') If ``data`` is empty, StatisticsError will be raised. """ if iter(data) is data: data = list(data) n = len(data) if n < 1: raise StatisticsError('mean requires at least one data point') only_int = True for item in data: if not type(item) is int: only_int = False break if (only_int): return _sum(data,0.0)/n else:return _sum(data)/n def sort_and_convert(data): newdata = [] for i in data: newdata.append(float(i)) return sorted(newdata) # FIXME: investigate ways to calculate medians without sorting? Quickselect? def median(data): """Return the median (middle value) of numeric data. When the number of data points is odd, return the middle data point. When the number of data points is even, the median is interpolated by taking the average of the two middle values: >>> median([1, 3, 5]) 3 >>> median([1, 3, 5, 7]) 4.0 """ data = sorted(data) n = len(data) if n == 0: raise StatisticsError("no median for empty data") if n%2 == 1: return data[n//2] else: i = n//2 return (float(data[i - 1]) + data[i])/2 def median_low(data): """Return the low median of numeric data. When the number of data points is odd, the middle value is returned. When it is even, the smaller of the two middle values is returned. >>> median_low([1, 3, 5]) 3 >>> median_low([1, 3, 5, 7]) 3 """ data = sorted(data) n = len(data) if n == 0: raise StatisticsError("no median for empty data") if n%2 == 1: return data[n//2] else: return data[n//2 - 1] def median_high(data): """Return the high median of data. When the number of data points is odd, the middle value is returned. When it is even, the larger of the two middle values is returned. >>> median_high([1, 3, 5]) 3 >>> median_high([1, 3, 5, 7]) 5 """ data = sorted(data) n = len(data) if n == 0: raise StatisticsError("no median for empty data") return data[n//2] def median_grouped(data, interval=1): """"Return the 50th percentile (median) of grouped continuous data. >>> median_grouped([1, 2, 2, 3, 4, 4, 4, 4, 4, 5]) 3.7 >>> median_grouped([52, 52, 53, 54]) 52.5 This calculates the median as the 50th percentile, and should be used when your data is continuous and grouped. In the above example, the values 1, 2, 3, etc. actually represent the midpoint of classes 0.5-1.5, 1.5-2.5, 2.5-3.5, etc. The middle value falls somewhere in class 3.5-4.5, and interpolation is used to estimate it. Optional argument ``interval`` represents the class interval, and defaults to 1. Changing the class interval naturally will change the interpolated 50th percentile value: >>> median_grouped([1, 3, 3, 5, 7], interval=1) 3.25 >>> median_grouped([1, 3, 3, 5, 7], interval=2) 3.5 This function does not check whether the data points are at least ``interval`` apart. """ data = sorted(data) n = len(data) if n == 0: raise StatisticsError("no median for empty data") elif n == 1: return data[0] # Find the value at the midpoint. Remember this corresponds to the # centre of the class interval. x = data[n//2] for obj in (x, interval): if isinstance(obj, (str, bytes)): raise TypeError('expected number but got %r' % obj) try: L = x - interval/2 # The lower limit of the median interval. except TypeError: # Mixed type. For now we just coerce to float. L = float(x) - float(interval)/2 print L cf = data.index(x) # Number of values below the median interval. print cf # FIXME The following line could be more efficient for big lists. f = data.count(x) # Number of data points in the median interval. print f return L + interval*(n/2 - cf)/f def mode(data): """Return the most common data point from discrete or nominal data. ``mode`` assumes discrete data, and returns a single value. This is the standard treatment of the mode as commonly taught in schools: >>> mode([1, 1, 2, 3, 3, 3, 3, 4]) 3 This also works with nominal (non-numeric) data: >>> mode(["red", "blue", "blue", "red", "green", "red", "red"]) 'red' If there is not exactly one most common value, ``mode`` will raise StatisticsError. """ # Generate a table of sorted (value, frequency) pairs. table = _counts(data) if len(table) == 1: return table[0][0] elif table: raise StatisticsError( 'no unique mode; found %d equally common values' % len(table) ) else: raise StatisticsError('no mode for empty data') # === Measures of spread === # See http://mathworld.wolfram.com/Variance.html # http://mathworld.wolfram.com/SampleVariance.html # http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance # # Under no circumstances use the so-called "computational formula for # variance", as that is only suitable for hand calculations with a small # amount of low-precision data. It has terrible numeric properties. # # See a comparison of three computational methods here: # http://www.johndcook.com/blog/2008/09/26/comparing-three-methods-of-computing-standard-deviation/ def _ss(data, c=None): """Return sum of square deviations of sequence data. If ``c`` is None, the mean is calculated in one pass, and the deviations from the mean are calculated in a second pass. Otherwise, deviations are calculated from ``c`` as given. Use the second case with care, as it can lead to garbage results. """ if c is None: c = mean(data) ss = _sum((x-c)**2 for x in data) # The following sum should mathematically equal zero, but due to rounding # error may not. ss -= _sum((x-c) for x in data)**2/len(data) assert not ss < 0, 'negative sum of square deviations: %f' % ss return ss def variance(data, xbar=None): """Return the sample variance of data. data should be an iterable of Real-valued numbers, with at least two values. The optional argument xbar, if given, should be the mean of the data. If it is missing or None, the mean is automatically calculated. Use this function when your data is a sample from a population. To calculate the variance from the entire population, see ``pvariance``. Examples: >>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5] >>> variance(data) 1.3720238095238095 If you have already calculated the mean of your data, you can pass it as the optional second argument ``xbar`` to avoid recalculating it: >>> m = mean(data) >>> variance(data, m) 1.3720238095238095 This function does not check that ``xbar`` is actually the mean of ``data``. Giving arbitrary values for ``xbar`` may lead to invalid or impossible results. Decimals and Fractions are supported: >>> from decimal import Decimal as D >>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")]) Decimal('31.01875') >>> from fractions import Fraction as F >>> variance([F(1, 6), F(1, 2), F(5, 3)]) Fraction(67, 108) """ if iter(data) is data: data = list(data) n = len(data) if n < 2: raise StatisticsError('variance requires at least two data points') ss = _ss(data, xbar) return ss/(n-1) def pvariance(data, mu=None): """Return the population variance of ``data``. data should be an iterable of Real-valued numbers, with at least one value. The optional argument mu, if given, should be the mean of the data. If it is missing or None, the mean is automatically calculated. Use this function to calculate the variance from the entire population. To estimate the variance from a sample, the ``variance`` function is usually a better choice. Examples: >>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25] >>> pvariance(data) 1.25 If you have already calculated the mean of the data, you can pass it as the optional second argument to avoid recalculating it: >>> mu = mean(data) >>> pvariance(data, mu) 1.25 This function does not check that ``mu`` is actually the mean of ``data``. Giving arbitrary values for ``mu`` may lead to invalid or impossible results. Decimals and Fractions are supported: >>> from decimal import Decimal as D >>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")]) Decimal('24.815') >>> from fractions import Fraction as F >>> pvariance([F(1, 4), F(5, 4), F(1, 2)]) Fraction(13, 72) """ if iter(data) is data: data = list(data) n = len(data) if n < 1: raise StatisticsError('pvariance requires at least one data point') ss = _ss(data, mu) return ss/n def stdev(data, xbar=None): """Return the square root of the sample variance. See ``variance`` for arguments and other details. >>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) 1.0810874155219827 """ var = variance(data, xbar) try: return var.sqrt() except AttributeError: return math.sqrt(var) def pstdev(data, mu=None): """Return the square root of the population variance. See ``pvariance`` for arguments and other details. >>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) 0.986893273527251 """ var = pvariance(data, mu) try: return var.sqrt() except AttributeError: return math.sqrt(var)
unknown
codeparrot/codeparrot-clean
from __future__ import unicode_literals import base64 import binascii from collections import OrderedDict import hashlib import importlib from django.dispatch import receiver from django.conf import settings from django.test.signals import setting_changed from django.utils.encoding import force_bytes, force_str, force_text from django.core.exceptions import ImproperlyConfigured from django.utils.crypto import ( pbkdf2, constant_time_compare, get_random_string) from django.utils.module_loading import import_string from django.utils.translation import ugettext_noop as _ UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX HASHERS = None # lazily loaded from PASSWORD_HASHERS PREFERRED_HASHER = None # defaults to first item in PASSWORD_HASHERS @receiver(setting_changed) def reset_hashers(**kwargs): if kwargs['setting'] == 'PASSWORD_HASHERS': global HASHERS, PREFERRED_HASHER HASHERS = None PREFERRED_HASHER = None def is_password_usable(encoded): if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX): return False try: identify_hasher(encoded) except ValueError: return False return True def check_password(password, encoded, setter=None, preferred='default'): """ Returns a boolean of whether the raw password matches the three part encoded digest. If setter is specified, it'll be called when you need to regenerate the password. """ if password is None or not is_password_usable(encoded): return False preferred = get_hasher(preferred) hasher = identify_hasher(encoded) must_update = hasher.algorithm != preferred.algorithm if not must_update: must_update = preferred.must_update(encoded) is_correct = hasher.verify(password, encoded) if setter and is_correct and must_update: setter(password) return is_correct def make_password(password, salt=None, hasher='default'): """ Turn a plain-text password into a hash for database storage Same as encode() but generates a new random salt. If password is None then a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string will be returned which disallows logins. Additional random string reduces chances of gaining access to staff or superuser accounts. See ticket #20079 for more info. """ if password is None: return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH) hasher = get_hasher(hasher) if not salt: salt = hasher.salt() return hasher.encode(password, salt) def load_hashers(password_hashers=None): global HASHERS global PREFERRED_HASHER hashers = [] if not password_hashers: password_hashers = settings.PASSWORD_HASHERS for backend in password_hashers: hasher = import_string(backend)() if not getattr(hasher, 'algorithm'): raise ImproperlyConfigured("hasher doesn't specify an " "algorithm name: %s" % backend) hashers.append(hasher) HASHERS = dict((hasher.algorithm, hasher) for hasher in hashers) PREFERRED_HASHER = hashers[0] def get_hasher(algorithm='default'): """ Returns an instance of a loaded password hasher. If algorithm is 'default', the default hasher will be returned. This function will also lazy import hashers specified in your settings file if needed. """ if hasattr(algorithm, 'algorithm'): return algorithm elif algorithm == 'default': if PREFERRED_HASHER is None: load_hashers() return PREFERRED_HASHER else: if HASHERS is None: load_hashers() if algorithm not in HASHERS: raise ValueError("Unknown password hashing algorithm '%s'. " "Did you specify it in the PASSWORD_HASHERS " "setting?" % algorithm) return HASHERS[algorithm] def identify_hasher(encoded): """ Returns an instance of a loaded password hasher. Identifies hasher algorithm by examining encoded hash, and calls get_hasher() to return hasher. Raises ValueError if algorithm cannot be identified, or if hasher is not loaded. """ # Ancient versions of Django created plain MD5 passwords and accepted # MD5 passwords with an empty salt. if ((len(encoded) == 32 and '$' not in encoded) or (len(encoded) == 37 and encoded.startswith('md5$$'))): algorithm = 'unsalted_md5' # Ancient versions of Django accepted SHA1 passwords with an empty salt. elif len(encoded) == 46 and encoded.startswith('sha1$$'): algorithm = 'unsalted_sha1' else: algorithm = encoded.split('$', 1)[0] return get_hasher(algorithm) def mask_hash(hash, show=6, char="*"): """ Returns the given hash, with only the first ``show`` number shown. The rest are masked with ``char`` for security reasons. """ masked = hash[:show] masked += char * len(hash[show:]) return masked class BasePasswordHasher(object): """ Abstract base class for password hashers When creating your own hasher, you need to override algorithm, verify(), encode() and safe_summary(). PasswordHasher objects are immutable. """ algorithm = None library = None def _load_library(self): if self.library is not None: if isinstance(self.library, (tuple, list)): name, mod_path = self.library else: mod_path = self.library try: module = importlib.import_module(mod_path) except ImportError as e: raise ValueError("Couldn't load %r algorithm library: %s" % (self.__class__.__name__, e)) return module raise ValueError("Hasher %r doesn't specify a library attribute" % self.__class__.__name__) def salt(self): """ Generates a cryptographically secure nonce salt in ASCII """ return get_random_string() def verify(self, password, encoded): """ Checks if the given password is correct """ raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method') def encode(self, password, salt): """ Creates an encoded database value The result is normally formatted as "algorithm$salt$hash" and must be fewer than 128 characters. """ raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method') def safe_summary(self, encoded): """ Returns a summary of safe values The result is a dictionary and will be used where the password field must be displayed to construct a safe representation of the password. """ raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method') def must_update(self, encoded): return False class PBKDF2PasswordHasher(BasePasswordHasher): """ Secure password hashing using the PBKDF2 algorithm (recommended) Configured to use PBKDF2 + HMAC + SHA256 with 20000 iterations. The result is a 64 byte binary string. Iterations may be changed safely but you must rename the algorithm if you change SHA256. """ algorithm = "pbkdf2_sha256" iterations = 20000 digest = hashlib.sha256 def encode(self, password, salt, iterations=None): assert password is not None assert salt and '$' not in salt if not iterations: iterations = self.iterations hash = pbkdf2(password, salt, iterations, digest=self.digest) hash = base64.b64encode(hash).decode('ascii').strip() return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash) def verify(self, password, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt, int(iterations)) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('iterations'), iterations), (_('salt'), mask_hash(salt)), (_('hash'), mask_hash(hash)), ]) def must_update(self, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) return int(iterations) != self.iterations class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher): """ Alternate PBKDF2 hasher which uses SHA1, the default PRF recommended by PKCS #5. This is compatible with other implementations of PBKDF2, such as openssl's PKCS5_PBKDF2_HMAC_SHA1(). """ algorithm = "pbkdf2_sha1" digest = hashlib.sha1 class BCryptSHA256PasswordHasher(BasePasswordHasher): """ Secure password hashing using the bcrypt algorithm (recommended) This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. """ algorithm = "bcrypt_sha256" digest = hashlib.sha256 library = ("bcrypt", "bcrypt") rounds = 12 def salt(self): bcrypt = self._load_library() return bcrypt.gensalt(self.rounds) def encode(self, password, salt): bcrypt = self._load_library() # Need to reevaluate the force_bytes call once bcrypt is supported on # Python 3 # Hash the password prior to using bcrypt to prevent password truncation # See: https://code.djangoproject.com/ticket/20138 if self.digest is not None: # We use binascii.hexlify here because Python3 decided that a hex encoded # bytestring is somehow a unicode. password = binascii.hexlify(self.digest(force_bytes(password)).digest()) else: password = force_bytes(password) data = bcrypt.hashpw(password, salt) return "%s$%s" % (self.algorithm, force_text(data)) def verify(self, password, encoded): algorithm, data = encoded.split('$', 1) assert algorithm == self.algorithm bcrypt = self._load_library() # Hash the password prior to using bcrypt to prevent password truncation # See: https://code.djangoproject.com/ticket/20138 if self.digest is not None: # We use binascii.hexlify here because Python3 decided that a hex encoded # bytestring is somehow a unicode. password = binascii.hexlify(self.digest(force_bytes(password)).digest()) else: password = force_bytes(password) # Ensure that our data is a bytestring data = force_bytes(data) # force_bytes() necessary for py-bcrypt compatibility hashpw = force_bytes(bcrypt.hashpw(password, data)) return constant_time_compare(data, hashpw) def safe_summary(self, encoded): algorithm, empty, algostr, work_factor, data = encoded.split('$', 4) assert algorithm == self.algorithm salt, checksum = data[:22], data[22:] return OrderedDict([ (_('algorithm'), algorithm), (_('work factor'), work_factor), (_('salt'), mask_hash(salt)), (_('checksum'), mask_hash(checksum)), ]) class BCryptPasswordHasher(BCryptSHA256PasswordHasher): """ Secure password hashing using the bcrypt algorithm This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. This hasher does not first hash the password which means it is subject to the 72 character bcrypt password truncation, most use cases should prefer the BCryptSha512PasswordHasher. See: https://code.djangoproject.com/ticket/20138 """ algorithm = "bcrypt" digest = None class SHA1PasswordHasher(BasePasswordHasher): """ The SHA1 password hashing algorithm (not recommended) """ algorithm = "sha1" def encode(self, password, salt): assert password is not None assert salt and '$' not in salt hash = hashlib.sha1(force_bytes(salt + password)).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), mask_hash(salt, show=2)), (_('hash'), mask_hash(hash)), ]) class MD5PasswordHasher(BasePasswordHasher): """ The Salted MD5 password hashing algorithm (not recommended) """ algorithm = "md5" def encode(self, password, salt): assert password is not None assert salt and '$' not in salt hash = hashlib.md5(force_bytes(salt + password)).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), mask_hash(salt, show=2)), (_('hash'), mask_hash(hash)), ]) class UnsaltedSHA1PasswordHasher(BasePasswordHasher): """ Very insecure algorithm that you should *never* use; stores SHA1 hashes with an empty salt. This class is implemented because Django used to accept such password hashes. Some older Django installs still have these values lingering around so we need to handle and upgrade them properly. """ algorithm = "unsalted_sha1" def salt(self): return '' def encode(self, password, salt): assert salt == '' hash = hashlib.sha1(force_bytes(password)).hexdigest() return 'sha1$$%s' % hash def verify(self, password, encoded): encoded_2 = self.encode(password, '') return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): assert encoded.startswith('sha1$$') hash = encoded[6:] return OrderedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(hash)), ]) class UnsaltedMD5PasswordHasher(BasePasswordHasher): """ Incredibly insecure algorithm that you should *never* use; stores unsalted MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an empty salt. This class is implemented because Django used to store passwords this way and to accept such password hashes. Some older Django installs still have these values lingering around so we need to handle and upgrade them properly. """ algorithm = "unsalted_md5" def salt(self): return '' def encode(self, password, salt): assert salt == '' return hashlib.md5(force_bytes(password)).hexdigest() def verify(self, password, encoded): if len(encoded) == 37 and encoded.startswith('md5$$'): encoded = encoded[5:] encoded_2 = self.encode(password, '') return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): return OrderedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(encoded, show=3)), ]) class CryptPasswordHasher(BasePasswordHasher): """ Password hashing using UNIX crypt (not recommended) The crypt module is not supported on all platforms. """ algorithm = "crypt" library = "crypt" def salt(self): return get_random_string(2) def encode(self, password, salt): crypt = self._load_library() assert len(salt) == 2 data = crypt.crypt(force_str(password), salt) # we don't need to store the salt, but Django used to do this return "%s$%s$%s" % (self.algorithm, '', data) def verify(self, password, encoded): crypt = self._load_library() algorithm, salt, data = encoded.split('$', 2) assert algorithm == self.algorithm return constant_time_compare(data, crypt.crypt(force_str(password), data)) def safe_summary(self, encoded): algorithm, salt, data = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), salt), (_('hash'), mask_hash(data, show=3)), ])
unknown
codeparrot/codeparrot-clean
# Copyright 2016 Internap. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from time import time from fake_switches.netconf import dict_2_etree, XML_ATTRIBUTES from hamcrest import assert_that, has_length, greater_than from tests.juniper import BaseJuniper from tests.util.global_reactor import COMMIT_DELAY class JuniperBaseProtocolWithCommitDelayTest(BaseJuniper): test_switch = "commit-delayed-juniper" def test_lock_edit_candidate_add_vlan_and_commit_with_commit_delay(self): with self.nc.locked(target='candidate'): result = self.nc.edit_config(target='candidate', config=dict_2_etree({ "config": { "configuration": { "vlans": { "vlan": { "name": "VLAN2999", } } } }})) assert_that(result.xpath("//rpc-reply/ok"), has_length(1)) result = self.nc.commit() assert_that(result.xpath("//rpc-reply/ok"), has_length(1)) result = self.nc.get_config(source="running") assert_that(result.xpath("data/configuration/vlans/vlan"), has_length(1)) self.edit({ "vlans": { "vlan": { XML_ATTRIBUTES: {"operation": "delete"}, "name": "VLAN2999" } } }) start_time = time() self.nc.commit() end_time = time() result = self.nc.get_config(source="running") assert_that(result.xpath("data/configuration/vlans/vlan"), has_length(0)) assert_that((end_time - start_time), greater_than(COMMIT_DELAY)) def edit(self, config): result = self.nc.edit_config(target="candidate", config=dict_2_etree({ "config": { "configuration": config } })) assert_that(result.xpath("//rpc-reply/ok"), has_length(1))
unknown
codeparrot/codeparrot-clean
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """QtWebKit specific part of the web element API.""" from PyQt5.QtCore import QRect from PyQt5.QtWebKit import QWebElement from qutebrowser.config import config from qutebrowser.utils import log, utils, javascript from qutebrowser.browser import webelem class IsNullError(webelem.Error): """Gets raised by WebKitElement if an element is null.""" pass class WebKitElement(webelem.AbstractWebElement): """A wrapper around a QWebElement.""" def __init__(self, elem, tab): super().__init__(tab) if isinstance(elem, self.__class__): raise TypeError("Trying to wrap a wrapper!") if elem.isNull(): raise IsNullError('{} is a null element!'.format(elem)) self._elem = elem def __eq__(self, other): if not isinstance(other, WebKitElement): return NotImplemented return self._elem == other._elem # pylint: disable=protected-access def __getitem__(self, key): self._check_vanished() if key not in self: raise KeyError(key) return self._elem.attribute(key) def __setitem__(self, key, val): self._check_vanished() self._elem.setAttribute(key, val) def __delitem__(self, key): self._check_vanished() if key not in self: raise KeyError(key) self._elem.removeAttribute(key) def __contains__(self, key): self._check_vanished() return self._elem.hasAttribute(key) def __iter__(self): self._check_vanished() yield from self._elem.attributeNames() def __len__(self): self._check_vanished() return len(self._elem.attributeNames()) def _check_vanished(self): """Raise an exception if the element vanished (is null).""" if self._elem.isNull(): raise IsNullError('Element {} vanished!'.format(self._elem)) def has_frame(self): self._check_vanished() return self._elem.webFrame() is not None def geometry(self): self._check_vanished() return self._elem.geometry() def style_property(self, name, *, strategy): self._check_vanished() strategies = { # FIXME:qtwebengine which ones do we actually need? 'inline': QWebElement.InlineStyle, 'computed': QWebElement.ComputedStyle, } qt_strategy = strategies[strategy] return self._elem.styleProperty(name, qt_strategy) def classes(self): self._check_vanished() return self._elem.classes() def tag_name(self): """Get the tag name for the current element.""" self._check_vanished() return self._elem.tagName().lower() def outer_xml(self): """Get the full HTML representation of this element.""" self._check_vanished() return self._elem.toOuterXml() def text(self, *, use_js=False): self._check_vanished() if self.is_content_editable() or not use_js: return self._elem.toPlainText() else: return self._elem.evaluateJavaScript('this.value') def set_text(self, text, *, use_js=False): self._check_vanished() if self.is_content_editable() or not use_js: log.webelem.debug("Filling {!r} via set_text.".format(self)) self._elem.setPlainText(text) else: log.webelem.debug("Filling {!r} via javascript.".format(self)) text = javascript.string_escape(text) self._elem.evaluateJavaScript("this.value='{}'".format(text)) def insert_text(self, text): self._check_vanished() if not self.is_editable(strict=True): raise webelem.Error("Element is not editable!") log.webelem.debug("Inserting text into element {!r}".format(self)) self._elem.evaluateJavaScript(""" var text = "{}"; var event = document.createEvent("TextEvent"); event.initTextEvent("textInput", true, true, null, text); this.dispatchEvent(event); """.format(javascript.string_escape(text))) def _parent(self): """Get the parent element of this element.""" self._check_vanished() elem = self._elem.parent() if elem is None or elem.isNull(): return None return WebKitElement(elem, tab=self._tab) def _rect_on_view_js(self): """Javascript implementation for rect_on_view.""" # FIXME:qtwebengine maybe we can reuse this? rects = self._elem.evaluateJavaScript("this.getClientRects()") if rects is None: # pragma: no cover # On e.g. Void Linux with musl libc, the stack size is too small # for jsc, and running JS will fail. If that happens, fall back to # the Python implementation. # https://github.com/The-Compiler/qutebrowser/issues/1641 return None text = utils.compact_text(self._elem.toOuterXml(), 500) log.webelem.vdebug("Client rectangles of element '{}': {}".format( text, rects)) for i in range(int(rects.get("length", 0))): rect = rects[str(i)] width = rect.get("width", 0) height = rect.get("height", 0) if width > 1 and height > 1: # fix coordinates according to zoom level zoom = self._elem.webFrame().zoomFactor() if not config.get('ui', 'zoom-text-only'): rect["left"] *= zoom rect["top"] *= zoom width *= zoom height *= zoom rect = QRect(rect["left"], rect["top"], width, height) frame = self._elem.webFrame() while frame is not None: # Translate to parent frames' position (scroll position # is taken care of inside getClientRects) rect.translate(frame.geometry().topLeft()) frame = frame.parentFrame() return rect return None def _rect_on_view_python(self, elem_geometry): """Python implementation for rect_on_view.""" if elem_geometry is None: geometry = self._elem.geometry() else: geometry = elem_geometry frame = self._elem.webFrame() rect = QRect(geometry) while frame is not None: rect.translate(frame.geometry().topLeft()) rect.translate(frame.scrollPosition() * -1) frame = frame.parentFrame() return rect def rect_on_view(self, *, elem_geometry=None, no_js=False): """Get the geometry of the element relative to the webview. Uses the getClientRects() JavaScript method to obtain the collection of rectangles containing the element and returns the first rectangle which is large enough (larger than 1px times 1px). If all rectangles returned by getClientRects() are too small, falls back to elem.rect_on_view(). Skipping of small rectangles is due to <a> elements containing other elements with "display:block" style, see https://github.com/The-Compiler/qutebrowser/issues/1298 Args: elem_geometry: The geometry of the element, or None. Calling QWebElement::geometry is rather expensive so we want to avoid doing it twice. no_js: Fall back to the Python implementation """ self._check_vanished() # First try getting the element rect via JS, as that's usually more # accurate if elem_geometry is None and not no_js: rect = self._rect_on_view_js() if rect is not None: return rect # No suitable rects found via JS, try via the QWebElement API return self._rect_on_view_python(elem_geometry) def _is_visible(self, mainframe): """Check if the given element is visible in the given frame. This is not public API because it can't be implemented easily here with QtWebEngine, and is only used via find_css(..., only_visible=True) via the tab API. """ self._check_vanished() # CSS attributes which hide an element hidden_attributes = { 'visibility': 'hidden', 'display': 'none', } for k, v in hidden_attributes.items(): if self._elem.styleProperty(k, QWebElement.ComputedStyle) == v: return False elem_geometry = self._elem.geometry() if not elem_geometry.isValid() and elem_geometry.x() == 0: # Most likely an invisible link return False # First check if the element is visible on screen elem_rect = self.rect_on_view(elem_geometry=elem_geometry) mainframe_geometry = mainframe.geometry() if elem_rect.isValid(): visible_on_screen = mainframe_geometry.intersects(elem_rect) else: # We got an invalid rectangle (width/height 0/0 probably), but this # can still be a valid link. visible_on_screen = mainframe_geometry.contains( elem_rect.topLeft()) # Then check if it's visible in its frame if it's not in the main # frame. elem_frame = self._elem.webFrame() framegeom = QRect(elem_frame.geometry()) if not framegeom.isValid(): visible_in_frame = False elif elem_frame.parentFrame() is not None: framegeom.moveTo(0, 0) framegeom.translate(elem_frame.scrollPosition()) if elem_geometry.isValid(): visible_in_frame = framegeom.intersects(elem_geometry) else: # We got an invalid rectangle (width/height 0/0 probably), but # this can still be a valid link. visible_in_frame = framegeom.contains(elem_geometry.topLeft()) else: visible_in_frame = visible_on_screen return all([visible_on_screen, visible_in_frame]) def remove_blank_target(self): elem = self for _ in range(5): if elem is None: break tag = elem.tag_name() if tag == 'a' or tag == 'area': if elem.get('target', None) == '_blank': elem['target'] = '_top' break elem = elem._parent() # pylint: disable=protected-access def get_child_frames(startframe): """Get all children recursively of a given QWebFrame. Loosely based on http://blog.nextgenetics.net/?e=64 Args: startframe: The QWebFrame to start with. Return: A list of children QWebFrame, or an empty list. """ results = [] frames = [startframe] while frames: new_frames = [] for frame in frames: results.append(frame) new_frames += frame.childFrames() frames = new_frames return results
unknown
codeparrot/codeparrot-clean
import sys #import time from unittest import main if sys.version_info[0] == 3: from urllib import request as urllib2 else: import urllib2 import util class Test_wsgiserver(util.TestServer): server = 'wsgiserver.py' URL = 'http://127.0.0.1:8088' not_found_message = '<h1>Not Found</h1>' def read(self, path='/'): url = self.URL + path try: response = urllib2.urlopen(url) except urllib2.HTTPError: response = sys.exc_info()[1] return '%s %s' % (response.code, response.msg), response.read() def _test_hello(self): status, data = self.read('/') self.assertEqual(status, '200 OK') self.assertEqual(data, "<b>hello world</b>") def _test_not_found(self): status, data = self.read('/xxx') self.assertEqual(status, '404 Not Found') self.assertEqual(data, self.not_found_message) class Test_wsgiserver_ssl(Test_wsgiserver): server = 'wsgiserver_ssl.py' URL = 'https://127.0.0.1:8443' class Test_webproxy(Test_wsgiserver): server = 'webproxy.py' def _run_all_tests(self): status, data = self.read('/') self.assertEqual(status, '200 OK') assert "gevent example" in data, repr(data) status, data = self.read('/http://www.google.com') self.assertEqual(status, '200 OK') assert 'google' in data.lower(), repr(data) # class Test_webpy(Test_wsgiserver): # server = 'webpy.py' # not_found_message = 'not found' # # def _test_hello(self): # status, data = self.read('/') # self.assertEqual(status, '200 OK') # assert "Hello, world" in data, repr(data) # # def _test_long(self): # start = time.time() # status, data = self.read('/long') # delay = time.time() - start # assert 10 - 0.5 < delay < 10 + 0.5, delay # self.assertEqual(status, '200 OK') # self.assertEqual(data, 'Hello, 10 seconds later') if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
""" OpenStreetMap Animation Tool: - Provides easy setup to show things moving around on a map. - Requires pygame. Basic idea: 1. Create TrackingViz objects or your own custom SimViz's 2. Create a Simulation object with those Viz's 3. Call the Simulation's run() method 4. Run the simulation: - Mouse over icons to display labels - up/down keys increase/decrease speed of simulation - left/right keys move simulation to begin/end of time window - space bar sets speed to zero - escape key exits The TrackingViz class can be used without any knowledge of Pygame. All you need is an image you want to put on the map and a function defining where it should be on the map as a function of time. For any other visualization on the map, you will want to override the SimViz class. This will require knowledge of how to use Pygame. Basically a Pygame Surface will be passed in when it is time to draw. The Simulation class just does the following: 1. Downloads OSM tiles, patches them together, and resizes 2. Displays a window with the map on it 3. Runs a timer and has all Viz objects draw to the map at each frame. Keyboard input is accepted to control the speed of the simulation. """ # Copyright (c) 2010 Colin Bick, Robert Damphousse # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from manager import OSMManager, PygameImageManager import pygame import time Inf = float('inf') class SimViz(object): """ Abstract interface representing an object which knows how and when to display itself on a surface inside of a Simulation. This class is meant to serve as an interface definition to be subclassed (or at least replicated). """ def __init__(self, drawingOrder=0): """ Base constructor for a SimViz. 'drawingOrder' is used to specify the order in which this viz is drawn to the surface relative to others. See 'getDrawingOrder()'. """ self.drawing_order = drawingOrder def getBoundingBox(self): """ To be overridden. Returns (latmin,latmax,lonmin,lonmax) the lat/lon bounds of this visualization object. Note that for Simulation purposes, this does not need to be implemented if this SimViz is passed in as one of the scene_viz's (as opposed to an actor_viz). """ raise Exception, "UNIMPLEMENTED" def getTimeInterval(self): """ To be overridden. Returns (begin,end) time values for the existence of this visualization object. May return ( -Inf,Inf ) to indicate that it is always present. """ raise Exception, "UNIMPLEMENTED" def setState(self,simtime,getXY): """ To be overridden. Sets the internal state of this viz to the specified time. This should be stored internally, for subsequent calls to methods such as drawToSurface or mouseIntersect. """ raise Exception, "UNIMPLEMENTED" def drawToSurface(self,surf): """ To be overridden. Draws this viz on the supplied surface, according to its internal state. """ raise Exception, "UNIMPLEMENTED" def getDrawingOrder(self): """ Returns an integer representing the order in which this viz should be drawn relative to other vizs. Vizs with a higher drawing order are drawn after those with a lower drawing order, meaning they will be drawn on top. """ return self.drawing_order def getLabel(self): """ To be overridden (optionally). Returns string to be displayed as descriptive label for this viz object. Default behavior is to return None, meaning no label should ever be displayed. """ return None def mouseIntersect(self,mousex,mousey): """ To be overridden. Returns True if the given mouse location is inside some designated region of this visualization. Note that for Simulation purposes, if getLabel() returns None then this method does not need to be implemented. """ raise Exception, "UNIMPLEMENTED" class TrackingViz(SimViz): """ A generic SimViz which displays a moving image on the map. """ def __init__(self,label,image,getLatLonAtTimeFunc, time_window,bounding_box, drawing_order=0): """ Constructs a TrackingViz. Arguments: label - text to display when moused over, or None for no text image - filename of image to display on map getLatLonAtTimeFunc - a function that takes one argument (time) and returns (lat,lon) time_window - a tuple (begin_time,end_time) representing the times this object exists bounding_box - a tuple (min_lat,max_lat,min_lon,max_lon) representing the farthest bounds that this object will reach drawing_order - see SimViz.getDrawingOrder() """ SimViz.__init__(self,drawing_order) self.label = label self.image = pygame.image.load(image) self.width = self.image.get_rect().width self.height = self.image.get_rect().height self.time_window = time_window self.bounding_box = bounding_box self.getLocationAtTime = getLatLonAtTimeFunc def getTimeInterval(self): return self.time_window def getBoundingBox(self): return self.bounding_box def getLabel(self): return self.label def setState(self,simtime,getXY): self.xy = None ll = self.getLocationAtTime(simtime); if ll is None: return x,y = getXY(*ll) self.xy = x,y def drawToSurface(self,surf): if self.xy: x,y=self.xy w,h = self.width,self.height x,y = x-w/2 , y-h/2 surf.blit(self.image, (x,y)) def mouseIntersect(self,mousex,mousey): if not self.xy: return False x,y = self.xy w,h = self.width,self.height return abs(x-mousex)<w/2 and abs(y-mousey)<h/2 class Simulation(object): """ A collection of generic SimViz's and a timer, of sorts. This lets the visualizer say "Give me coordinates of each object at time T". A run() method is provided which displays the simulation in a pygame window. """ def __init__(self, actor_vizs, scene_vizs, initTime = 0): """ Given two collections of generic SimViz objects, and optionally an initial time, creates a Simulation object. Both actor_vizs and scene_vizs should be a collection of SimViz objects. The difference is that the actor_vizs will determine the bounds of the animation in space and time, while the location and time windows of the scene_vizs will be largely ignored. """ self.actor_vizs = actor_vizs; self.scene_vizs = scene_vizs; self.all_vizs = actor_vizs + scene_vizs self.__findBoundingBox(); self.__findTimeWindow(); self.__sortVizs() self.time = 10000 self.setTime(initTime); def __findBoundingBox(self): """Finds the latlon box bounding all objects""" init_box = (Inf,-Inf,Inf,-Inf) def helper(left,right): right = right.getBoundingBox(); return (min(left[0],right[0]), max(left[1],right[1]), min(left[2],right[2]), max(left[3],right[3])); self.bounding_box = reduce( helper, self.actor_vizs, init_box ); def __findTimeWindow(self): """Finds the min and max times over all routes""" init_window = (Inf,-Inf); def helper(left,right): right = right.getTimeInterval(); return (min(left[0],right[0]), max(left[1],right[1])); self.time_window = reduce( helper, self.actor_vizs, init_window ); def __sortVizs(self): """Sorts tracked objects in order of Drawing Order""" def tcmp(t1,t2): return cmp(t1.getDrawingOrder(), t2.getDrawingOrder()) self.all_vizs.sort(cmp=tcmp); def setTime(self,time): """ Moves all bus tracks to the given time. """ self.time = min( max(time, self.time_window[0] ), self.time_window[1] ); def printTime(self): hours = int(self.time/3600) minutes = int( (self.time % 3600) / 60 ) seconds = int( (self.time % 60) ) print "%02d:%02d:%02d" % (hours,minutes,seconds) def getXY(self,lat,lon,bounds,ssize): """ Given coordinates in lon,lat, and a screen size, returns the corresponding (x,y) pixel coordinates. """ x_ratio = ( (lon - bounds[2]) / (bounds[3]-bounds[2]) ) y_ratio = 1.0 - ( (lat - bounds[0]) / (bounds[1]-bounds[0]) ) x,y = int(x_ratio*ssize[0]), int(y_ratio*ssize[1]) return x,y def run(self, speed=0.0, windowsize=(1280,800), refresh_rate = 1.0, font = "/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/site-packages/pygame/freesansbold.ttf", fontsize = 10, osmzoom = 14): """ Pops up a window and displays the simulation on it. Speed is advancement of sim in seconds/second. Refresh rate is pause in seconds between frames. Windowsize is the desired (width,height) of the display window. Font is either the full path to a pygame-compatible font file (e.g. a .ttf file), or an actual pygame Font object, or None. If None, then labels will not be rendered, instead they will be printed to stdout. Fontsize is the size of the font, if it exists. """ pygame.init() black = pygame.Color(0,0,0); notec = pygame.Color(200,200,80); fnt = None if isinstance(font,basestring): try: fnt = pygame.font.Font(font,fontsize); except: fnt = None elif isinstance(font,pygame.font.Font): fnt = font osm = OSMManager(cache = "maptiles/", image_manager = PygameImageManager()); bg_big, new_bounds = osm.createOSMImage(self.bounding_box,zoom=osmzoom); w_h_ratio = float(bg_big.get_width()) / bg_big.get_height(); # Make the window smaller to keep proportions and stay within # specified windowsize newwidth = int(windowsize[1]*w_h_ratio) newheight= int(windowsize[0]/w_h_ratio) if newwidth > windowsize[0]: windowsize = windowsize[0],newheight elif newheight > windowsize[1]: windowsize = newwidth, windowsize[1] screen = pygame.display.set_mode(windowsize); bg_small = pygame.transform.smoothscale(bg_big,windowsize); del bg_big; lastTime = self.time; getXY = lambda lat,lon: self.getXY(lat,lon,new_bounds,windowsize); ## Main simulation loop ## exit = False while not exit: # Check keyboard events for event in pygame.event.get(): if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: exit = True elif event.type == pygame.KEYDOWN and event.key == pygame.K_UP: speed = max( (speed + 1) * 1.4 , (speed / 1.4) + 1 ) elif event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN: speed = min( (speed / 1.4) - 1 , (speed - 1) * 1.4 ) elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE: speed = 0.0 elif event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT: self.time = self.time_window[0]; elif event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT: self.time = self.time_window[1]; # Grab mouse position mousex,mousey = pygame.mouse.get_pos(); selected=None; # Print the time if changed if self.time != lastTime: self.printTime(); lastTime = self.time; ## Draw the background screen.blit(bg_small, (0,0)); ## Draw the tracked objects for sviz in self.all_vizs: sviz.setState(self.time,getXY) sviz.drawToSurface(screen); label = sviz.getLabel() if label and sviz.mouseIntersect(mousex,mousey): selected=sviz ## Display selected label if selected: if fnt: text = fnt.render(selected.getLabel(), True,black,notec) screen.blit(text, (mousex,mousey-10)) del text else: print selected.getLabel() pygame.display.flip() time.sleep(refresh_rate); self.setTime(self.time + speed*refresh_rate); # Clean up and exit del bg_small pygame.display.quit()
unknown
codeparrot/codeparrot-clean
# # Copyright (C) 2014 FreeIPA Contributors see COPYING for license # import random import ctypes.util import binascii import six from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa from cffi import FFI if six.PY3: unicode = str _ffi = FFI() _ffi.cdef(''' /* p11-kit/pkcs11.h */ typedef unsigned long CK_FLAGS; struct _CK_VERSION { unsigned char major; unsigned char minor; }; typedef unsigned long CK_SLOT_ID; typedef unsigned long CK_SESSION_HANDLE; typedef unsigned long CK_USER_TYPE; typedef unsigned long CK_OBJECT_HANDLE; typedef unsigned long CK_OBJECT_CLASS; typedef unsigned long CK_KEY_TYPE; typedef unsigned long CK_ATTRIBUTE_TYPE; struct _CK_ATTRIBUTE { CK_ATTRIBUTE_TYPE type; void *pValue; unsigned long ulValueLen; }; typedef unsigned long CK_MECHANISM_TYPE; struct _CK_MECHANISM { CK_MECHANISM_TYPE mechanism; void *pParameter; unsigned long ulParameterLen; }; typedef unsigned long CK_RV; typedef ... *CK_NOTIFY; struct _CK_FUNCTION_LIST; typedef CK_RV (*CK_C_Initialize) (void *init_args); typedef CK_RV (*CK_C_Finalize) (void *pReserved); typedef ... *CK_C_GetInfo; typedef ... *CK_C_GetFunctionList; CK_RV C_GetFunctionList (struct _CK_FUNCTION_LIST **function_list); typedef ... *CK_C_GetSlotList; typedef ... *CK_C_GetSlotInfo; typedef ... *CK_C_GetTokenInfo; typedef ... *CK_C_WaitForSlotEvent; typedef ... *CK_C_GetMechanismList; typedef ... *CK_C_GetMechanismInfo; typedef ... *CK_C_InitToken; typedef ... *CK_C_InitPIN; typedef ... *CK_C_SetPIN; typedef CK_RV (*CK_C_OpenSession) (CK_SLOT_ID slotID, CK_FLAGS flags, void *application, CK_NOTIFY notify, CK_SESSION_HANDLE *session); typedef CK_RV (*CK_C_CloseSession) (CK_SESSION_HANDLE session); typedef ... *CK_C_CloseAllSessions; typedef ... *CK_C_GetSessionInfo; typedef ... *CK_C_GetOperationState; typedef ... *CK_C_SetOperationState; typedef CK_RV (*CK_C_Login) (CK_SESSION_HANDLE session, CK_USER_TYPE user_type, unsigned char *pin, unsigned long pin_len); typedef CK_RV (*CK_C_Logout) (CK_SESSION_HANDLE session); typedef CK_RV (*CK_C_CreateObject) (CK_SESSION_HANDLE session, struct _CK_ATTRIBUTE *templ, unsigned long count, CK_OBJECT_HANDLE *object); typedef ... *CK_C_CopyObject; typedef CK_RV (*CK_C_DestroyObject) (CK_SESSION_HANDLE session, CK_OBJECT_HANDLE object); typedef ... *CK_C_GetObjectSize; typedef CK_RV (*CK_C_GetAttributeValue) (CK_SESSION_HANDLE session, CK_OBJECT_HANDLE object, struct _CK_ATTRIBUTE *templ, unsigned long count); typedef CK_RV (*CK_C_SetAttributeValue) (CK_SESSION_HANDLE session, CK_OBJECT_HANDLE object, struct _CK_ATTRIBUTE *templ, unsigned long count); typedef CK_RV (*CK_C_FindObjectsInit) (CK_SESSION_HANDLE session, struct _CK_ATTRIBUTE *templ, unsigned long count); typedef CK_RV (*CK_C_FindObjects) (CK_SESSION_HANDLE session, CK_OBJECT_HANDLE *object, unsigned long max_object_count, unsigned long *object_count); typedef CK_RV (*CK_C_FindObjectsFinal) (CK_SESSION_HANDLE session); typedef ... *CK_C_EncryptInit; typedef ... *CK_C_Encrypt; typedef ... *CK_C_EncryptUpdate; typedef ... *CK_C_EncryptFinal; typedef ... *CK_C_DecryptInit; typedef ... *CK_C_Decrypt; typedef ... *CK_C_DecryptUpdate; typedef ... *CK_C_DecryptFinal; typedef ... *CK_C_DigestInit; typedef ... *CK_C_Digest; typedef ... *CK_C_DigestUpdate; typedef ... *CK_C_DigestKey; typedef ... *CK_C_DigestFinal; typedef ... *CK_C_SignInit; typedef ... *CK_C_Sign; typedef ... *CK_C_SignUpdate; typedef ... *CK_C_SignFinal; typedef ... *CK_C_SignRecoverInit; typedef ... *CK_C_SignRecover; typedef ... *CK_C_VerifyInit; typedef ... *CK_C_Verify; typedef ... *CK_C_VerifyUpdate; typedef ... *CK_C_VerifyFinal; typedef ... *CK_C_VerifyRecoverInit; typedef ... *CK_C_VerifyRecover; typedef ... *CK_C_DigestEncryptUpdate; typedef ... *CK_C_DecryptDigestUpdate; typedef ... *CK_C_SignEncryptUpdate; typedef ... *CK_C_DecryptVerifyUpdate; typedef CK_RV (*CK_C_GenerateKey) (CK_SESSION_HANDLE session, struct _CK_MECHANISM *mechanism, struct _CK_ATTRIBUTE *templ, unsigned long count, CK_OBJECT_HANDLE *key); typedef CK_RV (*CK_C_GenerateKeyPair) (CK_SESSION_HANDLE session, struct _CK_MECHANISM *mechanism, struct _CK_ATTRIBUTE * public_key_template, unsigned long public_key_attribute_count, struct _CK_ATTRIBUTE * private_key_template, unsigned long private_key_attribute_count, CK_OBJECT_HANDLE *public_key, CK_OBJECT_HANDLE *private_key); typedef CK_RV (*CK_C_WrapKey) (CK_SESSION_HANDLE session, struct _CK_MECHANISM *mechanism, CK_OBJECT_HANDLE wrapping_key, CK_OBJECT_HANDLE key, unsigned char *wrapped_key, unsigned long *wrapped_key_len); typedef CK_RV (*CK_C_UnwrapKey) (CK_SESSION_HANDLE session, struct _CK_MECHANISM *mechanism, CK_OBJECT_HANDLE unwrapping_key, unsigned char *wrapped_key, unsigned long wrapped_key_len, struct _CK_ATTRIBUTE *templ, unsigned long attribute_count, CK_OBJECT_HANDLE *key); typedef ... *CK_C_DeriveKey; typedef ... *CK_C_SeedRandom; typedef ... *CK_C_GenerateRandom; typedef ... *CK_C_GetFunctionStatus; typedef ... *CK_C_CancelFunction; struct _CK_FUNCTION_LIST { struct _CK_VERSION version; CK_C_Initialize C_Initialize; CK_C_Finalize C_Finalize; CK_C_GetInfo C_GetInfo; CK_C_GetFunctionList C_GetFunctionList; CK_C_GetSlotList C_GetSlotList; CK_C_GetSlotInfo C_GetSlotInfo; CK_C_GetTokenInfo C_GetTokenInfo; CK_C_GetMechanismList C_GetMechanismList; CK_C_GetMechanismInfo C_GetMechanismInfo; CK_C_InitToken C_InitToken; CK_C_InitPIN C_InitPIN; CK_C_SetPIN C_SetPIN; CK_C_OpenSession C_OpenSession; CK_C_CloseSession C_CloseSession; CK_C_CloseAllSessions C_CloseAllSessions; CK_C_GetSessionInfo C_GetSessionInfo; CK_C_GetOperationState C_GetOperationState; CK_C_SetOperationState C_SetOperationState; CK_C_Login C_Login; CK_C_Logout C_Logout; CK_C_CreateObject C_CreateObject; CK_C_CopyObject C_CopyObject; CK_C_DestroyObject C_DestroyObject; CK_C_GetObjectSize C_GetObjectSize; CK_C_GetAttributeValue C_GetAttributeValue; CK_C_SetAttributeValue C_SetAttributeValue; CK_C_FindObjectsInit C_FindObjectsInit; CK_C_FindObjects C_FindObjects; CK_C_FindObjectsFinal C_FindObjectsFinal; CK_C_EncryptInit C_EncryptInit; CK_C_Encrypt C_Encrypt; CK_C_EncryptUpdate C_EncryptUpdate; CK_C_EncryptFinal C_EncryptFinal; CK_C_DecryptInit C_DecryptInit; CK_C_Decrypt C_Decrypt; CK_C_DecryptUpdate C_DecryptUpdate; CK_C_DecryptFinal C_DecryptFinal; CK_C_DigestInit C_DigestInit; CK_C_Digest C_Digest; CK_C_DigestUpdate C_DigestUpdate; CK_C_DigestKey C_DigestKey; CK_C_DigestFinal C_DigestFinal; CK_C_SignInit C_SignInit; CK_C_Sign C_Sign; CK_C_SignUpdate C_SignUpdate; CK_C_SignFinal C_SignFinal; CK_C_SignRecoverInit C_SignRecoverInit; CK_C_SignRecover C_SignRecover; CK_C_VerifyInit C_VerifyInit; CK_C_Verify C_Verify; CK_C_VerifyUpdate C_VerifyUpdate; CK_C_VerifyFinal C_VerifyFinal; CK_C_VerifyRecoverInit C_VerifyRecoverInit; CK_C_VerifyRecover C_VerifyRecover; CK_C_DigestEncryptUpdate C_DigestEncryptUpdate; CK_C_DecryptDigestUpdate C_DecryptDigestUpdate; CK_C_SignEncryptUpdate C_SignEncryptUpdate; CK_C_DecryptVerifyUpdate C_DecryptVerifyUpdate; CK_C_GenerateKey C_GenerateKey; CK_C_GenerateKeyPair C_GenerateKeyPair; CK_C_WrapKey C_WrapKey; CK_C_UnwrapKey C_UnwrapKey; CK_C_DeriveKey C_DeriveKey; CK_C_SeedRandom C_SeedRandom; CK_C_GenerateRandom C_GenerateRandom; CK_C_GetFunctionStatus C_GetFunctionStatus; CK_C_CancelFunction C_CancelFunction; CK_C_WaitForSlotEvent C_WaitForSlotEvent; }; typedef unsigned char CK_BYTE; typedef unsigned char CK_UTF8CHAR; typedef unsigned char CK_BBOOL; typedef unsigned long int CK_ULONG; typedef CK_BYTE *CK_BYTE_PTR; typedef CK_ULONG *CK_ULONG_PTR; typedef CK_OBJECT_HANDLE *CK_OBJECT_HANDLE_PTR; typedef struct _CK_ATTRIBUTE CK_ATTRIBUTE; typedef struct _CK_ATTRIBUTE *CK_ATTRIBUTE_PTR; typedef struct _CK_MECHANISM CK_MECHANISM; typedef struct _CK_FUNCTION_LIST *CK_FUNCTION_LIST_PTR; /* p11-kit/uri.h */ typedef enum { DUMMY /* ..., */ } P11KitUriType; typedef ... P11KitUri; CK_ATTRIBUTE_PTR p11_kit_uri_get_attributes (P11KitUri *uri, CK_ULONG *n_attrs); int p11_kit_uri_any_unrecognized (P11KitUri *uri); P11KitUri* p11_kit_uri_new (void); int p11_kit_uri_parse (const char *string, P11KitUriType uri_type, P11KitUri *uri); void p11_kit_uri_free (P11KitUri *uri); /* p11helper.c */ struct ck_rsa_pkcs_oaep_params { CK_MECHANISM_TYPE hash_alg; unsigned long mgf; unsigned long source; void *source_data; unsigned long source_data_len; }; typedef struct ck_rsa_pkcs_oaep_params CK_RSA_PKCS_OAEP_PARAMS; ''') _libp11_kit = _ffi.dlopen(ctypes.util.find_library('p11-kit')) # utility NULL = _ffi.NULL unsigned_char = _ffi.typeof('unsigned char') unsigned_long = _ffi.typeof('unsigned long') sizeof = _ffi.sizeof def new_ptr(ctype, *args): return _ffi.new(_ffi.getctype(ctype, '*'), *args) def new_array(ctype, *args): return _ffi.new(_ffi.getctype(ctype, '[]'), *args) # p11-kit/pkcs11.h CK_SESSION_HANDLE = _ffi.typeof('CK_SESSION_HANDLE') CK_OBJECT_HANDLE = _ffi.typeof('CK_OBJECT_HANDLE') CKU_USER = 1 CKF_RW_SESSION = 0x2 CKF_SERIAL_SESSION = 0x4 CK_OBJECT_CLASS = _ffi.typeof('CK_OBJECT_CLASS') CKO_PUBLIC_KEY = 2 CKO_PRIVATE_KEY = 3 CKO_SECRET_KEY = 4 CKO_VENDOR_DEFINED = 0x80000000 CK_KEY_TYPE = _ffi.typeof('CK_KEY_TYPE') CKK_RSA = 0 CKK_AES = 0x1f CKA_CLASS = 0 CKA_TOKEN = 1 CKA_PRIVATE = 2 CKA_LABEL = 3 CKA_TRUSTED = 0x86 CKA_KEY_TYPE = 0x100 CKA_ID = 0x102 CKA_SENSITIVE = 0x103 CKA_ENCRYPT = 0x104 CKA_DECRYPT = 0x105 CKA_WRAP = 0x106 CKA_UNWRAP = 0x107 CKA_SIGN = 0x108 CKA_SIGN_RECOVER = 0x109 CKA_VERIFY = 0x10a CKA_VERIFY_RECOVER = 0x10b CKA_DERIVE = 0x10c CKA_MODULUS = 0x120 CKA_MODULUS_BITS = 0x121 CKA_PUBLIC_EXPONENT = 0x122 CKA_VALUE_LEN = 0x161 CKA_EXTRACTABLE = 0x162 CKA_LOCAL = 0x163 CKA_NEVER_EXTRACTABLE = 0x164 CKA_ALWAYS_SENSITIVE = 0x165 CKA_MODIFIABLE = 0x170 CKA_ALWAYS_AUTHENTICATE = 0x202 CKA_WRAP_WITH_TRUSTED = 0x210 CKM_RSA_PKCS_KEY_PAIR_GEN = 0 CKM_RSA_PKCS = 1 CKM_RSA_PKCS_OAEP = 9 CKM_SHA_1 = 0x220 CKM_AES_KEY_GEN = 0x1080 CKR_OK = 0 CKR_ATTRIBUTE_TYPE_INVALID = 0x12 CKR_USER_NOT_LOGGED_IN = 0x101 CK_BYTE = _ffi.typeof('CK_BYTE') CK_BBOOL = _ffi.typeof('CK_BBOOL') CK_ULONG = _ffi.typeof('CK_ULONG') CK_BYTE_PTR = _ffi.typeof('CK_BYTE_PTR') CK_FALSE = 0 CK_TRUE = 1 CK_OBJECT_HANDLE_PTR = _ffi.typeof('CK_OBJECT_HANDLE_PTR') CK_ATTRIBUTE = _ffi.typeof('CK_ATTRIBUTE') CK_MECHANISM = _ffi.typeof('CK_MECHANISM') CK_FUNCTION_LIST_PTR = _ffi.typeof('CK_FUNCTION_LIST_PTR') NULL_PTR = NULL # p11-kit/uri.h P11_KIT_URI_OK = 0 P11_KIT_URI_FOR_OBJECT = 2 p11_kit_uri_get_attributes = _libp11_kit.p11_kit_uri_get_attributes p11_kit_uri_any_unrecognized = _libp11_kit.p11_kit_uri_any_unrecognized p11_kit_uri_new = _libp11_kit.p11_kit_uri_new p11_kit_uri_parse = _libp11_kit.p11_kit_uri_parse p11_kit_uri_free = _libp11_kit.p11_kit_uri_free # library.c def loadLibrary(module): """Load the PKCS#11 library""" # Load PKCS #11 library try: if module: # pylint: disable=no-member pDynLib = _ffi.dlopen(module, _ffi.RTLD_NOW | _ffi.RTLD_LOCAL) else: raise Exception() except Exception: # Failed to load the PKCS #11 library raise # Retrieve the entry point for C_GetFunctionList pGetFunctionList = pDynLib.C_GetFunctionList if pGetFunctionList == NULL: raise Exception() # Store the handle so we can dlclose it later return pGetFunctionList, pDynLib # p11helper.c # compat TODO CKM_AES_KEY_WRAP = 0x2109 CKM_AES_KEY_WRAP_PAD = 0x210a # TODO CKA_COPYABLE = 0x0017 CKG_MGF1_SHA1 = 0x00000001 CKZ_DATA_SPECIFIED = 0x00000001 CK_RSA_PKCS_OAEP_PARAMS = _ffi.typeof('CK_RSA_PKCS_OAEP_PARAMS') true_ptr = new_ptr(CK_BBOOL, CK_TRUE) false_ptr = new_ptr(CK_BBOOL, CK_FALSE) MAX_TEMPLATE_LEN = 32 # # Constants # CONST_RSA_PKCS_OAEP_PARAMS_ptr = new_ptr(CK_RSA_PKCS_OAEP_PARAMS, dict( hash_alg=CKM_SHA_1, mgf=CKG_MGF1_SHA1, source=CKZ_DATA_SPECIFIED, source_data=NULL, source_data_len=0, )) # # ipap11helper Exceptions # class P11HelperException(Exception): """parent class for all exceptions""" pass P11HelperException.__name__ = 'Exception' class Error(P11HelperException): """general error""" pass class NotFound(P11HelperException): """key not found""" pass class DuplicationError(P11HelperException): """key already exists""" pass ######################################################################## # Support functions # def pyobj_to_bool(pyobj): if pyobj: return true_ptr return false_ptr def convert_py2bool(mapping): return tuple(pyobj_to_bool(py_obj) for py_obj in mapping) def string_to_pybytes_or_none(str, len): if str == NULL: return None return _ffi.buffer(str, len)[:] def unicode_to_char_array(unicode): """ Convert a unicode string to the utf8 encoded char array :param unicode: input python unicode object """ try: utf8_str = unicode.encode('utf-8') except Exception: raise Error("Unable to encode UTF-8") try: result = new_array(unsigned_char, utf8_str) except Exception: raise Error("Unable to get bytes from string") l = len(utf8_str) return result, l def char_array_to_unicode(array, l): """ Convert utf-8 encoded char array to unicode object """ return _ffi.buffer(array, l)[:].decode('utf-8') def int_to_bytes(value): try: return binascii.unhexlify('{0:x}'.format(value)) except (TypeError, binascii.Error): return binascii.unhexlify('0{0:x}'.format(value)) def bytes_to_int(value): return int(binascii.hexlify(value), 16) def check_return_value(rv, message): """ Tests result value of pkc11 operations """ if rv != CKR_OK: try: errmsg = "Error at %s: 0x%x\n" % (message, rv) except Exception: raise Error("An error occured during error message generation. " "Please report this problem. Developers will use " "a crystal ball to find out the root cause.") else: raise Error(errmsg) def _fill_template_from_parts(attr, template_len, id, id_len, label, label_len, class_, cka_wrap, cka_unwrap): """ Fill template structure with pointers to attributes passed as independent variables. Variables with NULL values will be omitted from template. @warning input variables should not be modified when template is in use """ cnt = 0 if label != NULL: attr[0].type = CKA_LABEL attr[0].pValue = label attr[0].ulValueLen = label_len attr += 1 cnt += 1 assert cnt < template_len[0] if id != NULL: attr[0].type = CKA_ID attr[0].pValue = id attr[0].ulValueLen = id_len attr += 1 cnt += 1 assert cnt < template_len[0] if cka_wrap != NULL: attr[0].type = CKA_WRAP attr[0].pValue = cka_wrap attr[0].ulValueLen = sizeof(CK_BBOOL) attr += 1 cnt += 1 assert cnt < template_len[0] if cka_unwrap != NULL: attr[0].type = CKA_UNWRAP attr[0].pValue = cka_unwrap attr[0].ulValueLen = sizeof(CK_BBOOL) attr += 1 cnt += 1 assert cnt < template_len[0] if class_ != NULL: attr[0].type = CKA_CLASS attr[0].pValue = class_ attr[0].ulValueLen = sizeof(CK_OBJECT_CLASS) attr += 1 cnt += 1 assert cnt < template_len[0] template_len[0] = cnt def _parse_uri(uri_str): """ Parse string to P11-kit representation of PKCS#11 URI. """ uri = p11_kit_uri_new() if not uri: raise Error("Cannot initialize URI parser") try: result = p11_kit_uri_parse(uri_str, P11_KIT_URI_FOR_OBJECT, uri) if result != P11_KIT_URI_OK: raise Error("Cannot parse URI") if p11_kit_uri_any_unrecognized(uri): raise Error("PKCS#11 URI contains unsupported attributes") except Error: p11_kit_uri_free(uri) raise return uri def _set_wrapping_mech_parameters(mech_type, mech): """ Function set default param values for wrapping mechanism :param mech_type: mechanism type :param mech: filled structure with params based on mech type Warning: do not dealloc param values, it is static variables """ if mech_type in (CKM_RSA_PKCS, CKM_AES_KEY_WRAP, CKM_AES_KEY_WRAP_PAD): mech.pParameter = NULL mech.ulParameterLen = 0 elif mech_type == CKM_RSA_PKCS_OAEP: # Use the same configuration as openSSL # https://www.openssl.org/docs/crypto/RSA_public_encrypt.html mech.pParameter = CONST_RSA_PKCS_OAEP_PARAMS_ptr mech.ulParameterLen = sizeof(CK_RSA_PKCS_OAEP_PARAMS) else: raise Error("Unsupported wrapping mechanism") mech.mechanism = mech_type ######################################################################## # P11_Helper object # class P11_Helper(object): @property def p11(self): return self.p11_ptr[0] @property def session(self): return self.session_ptr[0] def _find_key(self, template, template_len): """ Find keys matching specified template. Function returns list of key handles via objects parameter. :param template: PKCS#11 template for attribute matching """ result_objects = [] result_object_ptr = new_ptr(CK_OBJECT_HANDLE) objectCount_ptr = new_ptr(CK_ULONG) rv = self.p11.C_FindObjectsInit(self.session, template, template_len) check_return_value(rv, "Find key init") rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1, objectCount_ptr) check_return_value(rv, "Find key") while objectCount_ptr[0] > 0: result_objects.append(result_object_ptr[0]) rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1, objectCount_ptr) check_return_value(rv, "Check for duplicated key") rv = self.p11.C_FindObjectsFinal(self.session) check_return_value(rv, "Find objects final") return result_objects def _id_exists(self, id, id_len, class_): """ Test if object with specified label, id and class exists :param id: key ID, (if value is NULL, will not be used to find key) :param id_len: key ID length :param class_ key: class :return: True if object was found, False if object doesnt exists """ object_count_ptr = new_ptr(CK_ULONG) result_object_ptr = new_ptr(CK_OBJECT_HANDLE) class_ptr = new_ptr(CK_OBJECT_CLASS, class_) class_sec_ptr = new_ptr(CK_OBJECT_CLASS, CKO_SECRET_KEY) template_pub_priv = new_array(CK_ATTRIBUTE, ( (CKA_ID, id, id_len), (CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)), )) template_sec = new_array(CK_ATTRIBUTE, ( (CKA_ID, id, id_len), (CKA_CLASS, class_sec_ptr, sizeof(CK_OBJECT_CLASS)), )) template_id = new_array(CK_ATTRIBUTE, ( (CKA_ID, id, id_len), )) # # Only one secret key with same ID is allowed # if class_ == CKO_SECRET_KEY: rv = self.p11.C_FindObjectsInit(self.session, template_id, 1) check_return_value(rv, "id, label exists init") rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1, object_count_ptr) check_return_value(rv, "id, label exists") rv = self.p11.C_FindObjectsFinal(self.session) check_return_value(rv, "id, label exists final") if object_count_ptr[0] > 0: return True return False # # Public and private keys can share one ID, but # # test if secret key with same ID exists rv = self.p11.C_FindObjectsInit(self.session, template_sec, 2) check_return_value(rv, "id, label exists init") rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1, object_count_ptr) check_return_value(rv, "id, label exists") rv = self.p11.C_FindObjectsFinal(self.session) check_return_value(rv, "id, label exists final") if object_count_ptr[0] > 0: # object found return True # test if pub/private key with same id exists object_count_ptr[0] = 0 rv = self.p11.C_FindObjectsInit(self.session, template_pub_priv, 2) check_return_value(rv, "id, label exists init") rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1, object_count_ptr) check_return_value(rv, "id, label exists") rv = self.p11.C_FindObjectsFinal(self.session) check_return_value(rv, "id, label exists final") if object_count_ptr[0] > 0: # Object found return True # Object not found return False def __init__(self, slot, user_pin, library_path): self.p11_ptr = new_ptr(CK_FUNCTION_LIST_PTR) self.session_ptr = new_ptr(CK_SESSION_HANDLE) self.slot = 0 self.session_ptr[0] = 0 self.p11_ptr[0] = NULL self.module_handle = None # Parse method args if isinstance(user_pin, unicode): user_pin = user_pin.encode() self.slot = slot try: pGetFunctionList, module_handle = loadLibrary(library_path) except Exception: raise Error("Could not load the library.") self.module_handle = module_handle # # Load the function list # pGetFunctionList(self.p11_ptr) # # Initialize # rv = self.p11.C_Initialize(NULL) check_return_value(rv, "initialize") # # Start session # rv = self.p11.C_OpenSession(self.slot, CKF_SERIAL_SESSION | CKF_RW_SESSION, NULL, NULL, self.session_ptr) check_return_value(rv, "open session") # # Login # rv = self.p11.C_Login(self.session, CKU_USER, user_pin, len(user_pin)) check_return_value(rv, "log in") def finalize(self): """ Finalize operations with pkcs11 library """ if self.p11 == NULL: return # # Logout # rv = self.p11.C_Logout(self.session) check_return_value(rv, "log out") # # End session # rv = self.p11.C_CloseSession(self.session) check_return_value(rv, "close session") # # Finalize # self.p11.C_Finalize(NULL) self.p11_ptr[0] = NULL self.session_ptr[0] = 0 self.slot = 0 self.module_handle = None ################################################################# # Methods working with keys # def generate_master_key(self, label, id, key_length=16, cka_copyable=True, cka_decrypt=False, cka_derive=False, cka_encrypt=False, cka_extractable=True, cka_modifiable=True, cka_private=True, cka_sensitive=True, cka_sign=False, cka_unwrap=True, cka_verify=False, cka_wrap=True, cka_wrap_with_trusted=False): """ Generate master key :return: master key handle """ if isinstance(id, unicode): id = id.encode() attrs = ( cka_copyable, cka_decrypt, cka_derive, cka_encrypt, cka_extractable, cka_modifiable, cka_private, cka_sensitive, cka_sign, cka_unwrap, cka_verify, cka_wrap, cka_wrap_with_trusted, ) key_length_ptr = new_ptr(CK_ULONG, key_length) master_key_ptr = new_ptr(CK_OBJECT_HANDLE) label_unicode = label id_length = len(id) id_ = new_array(CK_BYTE, id) # TODO check long overflow label, label_length = unicode_to_char_array(label_unicode) # TODO param? mechanism_ptr = new_ptr(CK_MECHANISM, ( CKM_AES_KEY_GEN, NULL_PTR, 0 )) if key_length not in (16, 24, 32): raise Error("generate_master_key: key length allowed values are: " "16, 24 and 32") if self._id_exists(id_, id_length, CKO_SECRET_KEY): raise DuplicationError("Master key with same ID already exists") # Process keyword boolean arguments (cka_copyable_ptr, cka_decrypt_ptr, cka_derive_ptr, cka_encrypt_ptr, cka_extractable_ptr, cka_modifiable_ptr, cka_private_ptr, cka_sensitive_ptr, cka_sign_ptr, cka_unwrap_ptr, cka_verify_ptr, cka_wrap_ptr, cka_wrap_with_trusted_ptr,) = convert_py2bool(attrs) symKeyTemplate = new_array(CK_ATTRIBUTE, ( (CKA_ID, id_, id_length), (CKA_LABEL, label, label_length), (CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)), (CKA_VALUE_LEN, key_length_ptr, sizeof(CK_ULONG)), # TODO Softhsm doesn't support it # (CKA_COPYABLE, cka_copyable_ptr, sizeof(CK_BBOOL)), (CKA_DECRYPT, cka_decrypt_ptr, sizeof(CK_BBOOL)), (CKA_DERIVE, cka_derive_ptr, sizeof(CK_BBOOL)), (CKA_ENCRYPT, cka_encrypt_ptr, sizeof(CK_BBOOL)), (CKA_EXTRACTABLE, cka_extractable_ptr, sizeof(CK_BBOOL)), (CKA_MODIFIABLE, cka_modifiable_ptr, sizeof(CK_BBOOL)), (CKA_PRIVATE, cka_private_ptr, sizeof(CK_BBOOL)), (CKA_SENSITIVE, cka_sensitive_ptr, sizeof(CK_BBOOL)), (CKA_SIGN, cka_sign_ptr, sizeof(CK_BBOOL)), (CKA_UNWRAP, cka_unwrap_ptr, sizeof(CK_BBOOL)), (CKA_VERIFY, cka_verify_ptr, sizeof(CK_BBOOL)), (CKA_WRAP, cka_wrap_ptr, sizeof(CK_BBOOL)), (CKA_WRAP_WITH_TRUSTED, cka_wrap_with_trusted_ptr, sizeof(CK_BBOOL)), )) rv = self.p11.C_GenerateKey(self.session, mechanism_ptr, symKeyTemplate, (sizeof(symKeyTemplate) // sizeof(CK_ATTRIBUTE)), master_key_ptr) check_return_value(rv, "generate master key") return master_key_ptr[0] def generate_replica_key_pair(self, label, id, modulus_bits=2048, pub_cka_copyable=True, pub_cka_derive=False, pub_cka_encrypt=False, pub_cka_modifiable=True, pub_cka_private=True, pub_cka_trusted=False, pub_cka_verify=False, pub_cka_verify_recover=False, pub_cka_wrap=True, priv_cka_always_authenticate=False, priv_cka_copyable=True, priv_cka_decrypt=False, priv_cka_derive=False, priv_cka_extractable=False, priv_cka_modifiable=True, priv_cka_private=True, priv_cka_sensitive=True, priv_cka_sign=False, priv_cka_sign_recover=False, priv_cka_unwrap=True, priv_cka_wrap_with_trusted=False): """ Generate replica keys :returns: tuple (public_key_handle, private_key_handle) """ if isinstance(id, unicode): id = id.encode() attrs_pub = ( pub_cka_copyable, pub_cka_derive, pub_cka_encrypt, pub_cka_modifiable, pub_cka_private, pub_cka_trusted, pub_cka_verify, pub_cka_verify_recover, pub_cka_wrap, ) attrs_priv = ( priv_cka_always_authenticate, priv_cka_copyable, priv_cka_decrypt, priv_cka_derive, priv_cka_extractable, priv_cka_modifiable, priv_cka_private, priv_cka_sensitive, priv_cka_sign, priv_cka_sign_recover, priv_cka_unwrap, priv_cka_wrap_with_trusted, ) label_unicode = label id_ = new_array(CK_BYTE, id) id_length = len(id) label, label_length = unicode_to_char_array(label_unicode) public_key_ptr = new_ptr(CK_OBJECT_HANDLE) private_key_ptr = new_ptr(CK_OBJECT_HANDLE) mechanism_ptr = new_ptr(CK_MECHANISM, (CKM_RSA_PKCS_KEY_PAIR_GEN, NULL_PTR, 0)) if self._id_exists(id_, id_length, CKO_PRIVATE_KEY): raise DuplicationError("Private key with same ID already exists") if self._id_exists(id_, id_length, CKO_PUBLIC_KEY): raise DuplicationError("Public key with same ID already exists") modulus_bits_ptr = new_ptr(CK_ULONG, modulus_bits) # Process keyword boolean arguments (pub_cka_copyable_ptr, pub_cka_derive_ptr, pub_cka_encrypt_ptr, pub_cka_modifiable_ptr, pub_cka_private_ptr, pub_cka_trusted_ptr, pub_cka_verify_ptr, pub_cka_verify_recover_ptr, pub_cka_wrap_ptr, ) = convert_py2bool(attrs_pub) (priv_cka_always_authenticate_ptr, priv_cka_copyable_ptr, priv_cka_decrypt_ptr, priv_cka_derive_ptr, priv_cka_extractable_ptr, priv_cka_modifiable_ptr, priv_cka_private_ptr, priv_cka_sensitive_ptr, priv_cka_sign_ptr, priv_cka_sign_recover_ptr, priv_cka_unwrap_ptr, priv_cka_wrap_with_trusted_ptr,) = convert_py2bool(attrs_priv) # 65537 (RFC 6376 section 3.3.1) public_exponent = new_array(CK_BYTE, (1, 0, 1)) publicKeyTemplate = new_array(CK_ATTRIBUTE, ( (CKA_ID, id_, id_length), (CKA_LABEL, label, label_length), (CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)), (CKA_MODULUS_BITS, modulus_bits_ptr, sizeof(CK_ULONG)), (CKA_PUBLIC_EXPONENT, public_exponent, 3), # TODO Softhsm doesn't support it # (CKA_COPYABLE, pub_cka_copyable_p, sizeof(CK_BBOOL)), (CKA_DERIVE, pub_cka_derive_ptr, sizeof(CK_BBOOL)), (CKA_ENCRYPT, pub_cka_encrypt_ptr, sizeof(CK_BBOOL)), (CKA_MODIFIABLE, pub_cka_modifiable_ptr, sizeof(CK_BBOOL)), (CKA_PRIVATE, pub_cka_private_ptr, sizeof(CK_BBOOL)), (CKA_TRUSTED, pub_cka_trusted_ptr, sizeof(CK_BBOOL)), (CKA_VERIFY, pub_cka_verify_ptr, sizeof(CK_BBOOL)), (CKA_VERIFY_RECOVER, pub_cka_verify_recover_ptr, sizeof(CK_BBOOL)), (CKA_WRAP, pub_cka_wrap_ptr, sizeof(CK_BBOOL)), )) privateKeyTemplate = new_array(CK_ATTRIBUTE, ( (CKA_ID, id_, id_length), (CKA_LABEL, label, label_length), (CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)), (CKA_ALWAYS_AUTHENTICATE, priv_cka_always_authenticate_ptr, sizeof(CK_BBOOL)), # TODO Softhsm doesn't support it # (CKA_COPYABLE, priv_cka_copyable_ptr, sizeof(CK_BBOOL)), (CKA_DECRYPT, priv_cka_decrypt_ptr, sizeof(CK_BBOOL)), (CKA_DERIVE, priv_cka_derive_ptr, sizeof(CK_BBOOL)), (CKA_EXTRACTABLE, priv_cka_extractable_ptr, sizeof(CK_BBOOL)), (CKA_MODIFIABLE, priv_cka_modifiable_ptr, sizeof(CK_BBOOL)), (CKA_PRIVATE, priv_cka_private_ptr, sizeof(CK_BBOOL)), (CKA_SENSITIVE, priv_cka_sensitive_ptr, sizeof(CK_BBOOL)), (CKA_SIGN, priv_cka_sign_ptr, sizeof(CK_BBOOL)), (CKA_SIGN_RECOVER, priv_cka_sign_ptr, sizeof(CK_BBOOL)), (CKA_UNWRAP, priv_cka_unwrap_ptr, sizeof(CK_BBOOL)), (CKA_WRAP_WITH_TRUSTED, priv_cka_wrap_with_trusted_ptr, sizeof(CK_BBOOL)), )) rv = self.p11.C_GenerateKeyPair(self.session, mechanism_ptr, publicKeyTemplate, (sizeof(publicKeyTemplate) // sizeof(CK_ATTRIBUTE)), privateKeyTemplate, (sizeof(privateKeyTemplate) // sizeof(CK_ATTRIBUTE)), public_key_ptr, private_key_ptr) check_return_value(rv, "generate key pair") return public_key_ptr[0], private_key_ptr[0] def find_keys(self, objclass=CKO_VENDOR_DEFINED, label=None, id=None, cka_wrap=None, cka_unwrap=None, uri=None): """ Find key """ if isinstance(id, unicode): id = id.encode() if isinstance(uri, unicode): uri = uri.encode() class_ = objclass class_ptr = new_ptr(CK_OBJECT_CLASS, class_) ckawrap = NULL ckaunwrap = NULL if id is not None: id_ = new_array(CK_BYTE, id) id_length = len(id) else: id_ = NULL id_length = 0 label_unicode, label = label, NULL cka_wrap_bool = cka_wrap cka_unwrap_bool = cka_unwrap label_length = 0 uri_str = uri uri = NULL template = new_array(CK_ATTRIBUTE, MAX_TEMPLATE_LEN) template_len_ptr = new_ptr(CK_ULONG, MAX_TEMPLATE_LEN) # TODO check long overflow if label_unicode is not None: label, label_length = unicode_to_char_array(label_unicode) if cka_wrap_bool is not None: if cka_wrap_bool: ckawrap = true_ptr else: ckawrap = false_ptr if cka_unwrap_bool is not None: if cka_unwrap_bool: ckaunwrap = true_ptr else: ckaunwrap = false_ptr if class_ == CKO_VENDOR_DEFINED: class_ptr = NULL try: if uri_str is None: _fill_template_from_parts(template, template_len_ptr, id_, id_length, label, label_length, class_ptr, ckawrap, ckaunwrap) else: uri = _parse_uri(uri_str) template = (p11_kit_uri_get_attributes(uri, template_len_ptr)) # Do not deallocate URI while you are using the template. # Template contains pointers to values inside URI! result_list = self._find_key(template, template_len_ptr[0]) return result_list finally: if uri != NULL: p11_kit_uri_free(uri) def delete_key(self, key_handle): """ delete key """ # TODO check long overflow rv = self.p11.C_DestroyObject(self.session, key_handle) check_return_value(rv, "object deletion") def _export_RSA_public_key(self, object): """ export RSA public key """ class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PUBLIC_KEY) key_type_ptr = new_ptr(CK_KEY_TYPE, CKK_RSA) obj_template = new_array(CK_ATTRIBUTE, ( (CKA_MODULUS, NULL_PTR, 0), (CKA_PUBLIC_EXPONENT, NULL_PTR, 0), (CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)), (CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)), )) rv = self.p11.C_GetAttributeValue(self.session, object, obj_template, (sizeof(obj_template) // sizeof(CK_ATTRIBUTE))) check_return_value(rv, "get RSA public key values - prepare") # Set proper size for attributes modulus = new_array(CK_BYTE, obj_template[0].ulValueLen * sizeof(CK_BYTE)) obj_template[0].pValue = modulus exponent = new_array(CK_BYTE, obj_template[1].ulValueLen * sizeof(CK_BYTE)) obj_template[1].pValue = exponent rv = self.p11.C_GetAttributeValue(self.session, object, obj_template, (sizeof(obj_template) // sizeof(CK_ATTRIBUTE))) check_return_value(rv, "get RSA public key values") # Check if the key is RSA public key if class_ptr[0] != CKO_PUBLIC_KEY: raise Error("export_RSA_public_key: required public key class") if key_type_ptr[0] != CKK_RSA: raise Error("export_RSA_public_key: required RSA key type") try: n = bytes_to_int(string_to_pybytes_or_none( modulus, obj_template[0].ulValueLen)) except Exception: raise Error("export_RSA_public_key: internal error: unable to " "convert modulus") try: e = bytes_to_int(string_to_pybytes_or_none( exponent, obj_template[1].ulValueLen)) except Exception: raise Error("export_RSA_public_key: internal error: unable to " "convert exponent") # set modulus and exponent rsa_ = rsa.RSAPublicNumbers(e, n) try: pkey = rsa_.public_key(default_backend()) except Exception: raise Error("export_RSA_public_key: internal error: " "EVP_PKEY_set1_RSA failed") try: ret = pkey.public_bytes( format=serialization.PublicFormat.SubjectPublicKeyInfo, encoding=serialization.Encoding.DER, ) except Exception: ret = None return ret def export_public_key(self, key_handle): """ Export public key Export public key in SubjectPublicKeyInfo (RFC5280) DER encoded format """ object = key_handle class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PUBLIC_KEY) key_type_ptr = new_ptr(CK_KEY_TYPE, CKK_RSA) # TODO check long overflow obj_template = new_array(CK_ATTRIBUTE, ( (CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)), (CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)), )) rv = self.p11.C_GetAttributeValue(self.session, object, obj_template, (sizeof(obj_template) // sizeof(CK_ATTRIBUTE))) check_return_value(rv, "export_public_key: get RSA public key values") if class_ptr[0] != CKO_PUBLIC_KEY: raise Error("export_public_key: required public key class") if key_type_ptr[0] == CKK_RSA: return self._export_RSA_public_key(object) else: raise Error("export_public_key: unsupported key type") def _import_RSA_public_key(self, label, label_length, id, id_length, pkey, cka_copyable, cka_derive, cka_encrypt, cka_modifiable, cka_private, cka_trusted, cka_verify, cka_verify_recover, cka_wrap): """ Import RSA public key """ class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PUBLIC_KEY) keyType_ptr = new_ptr(CK_KEY_TYPE, CKK_RSA) cka_token = true_ptr if not isinstance(pkey, rsa.RSAPublicKey): raise Error("Required RSA public key") rsa_ = pkey.public_numbers() # convert BIGNUM to binary array modulus = new_array(CK_BYTE, int_to_bytes(rsa_.n)) modulus_len = sizeof(modulus) - 1 if modulus_len == 0: raise Error("import_RSA_public_key: BN_bn2bin modulus error") exponent = new_array(CK_BYTE, int_to_bytes(rsa_.e)) exponent_len = sizeof(exponent) - 1 if exponent_len == 0: raise Error("import_RSA_public_key: BN_bn2bin exponent error") template = new_array(CK_ATTRIBUTE, ( (CKA_ID, id, id_length), (CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)), (CKA_KEY_TYPE, keyType_ptr, sizeof(CK_KEY_TYPE)), (CKA_TOKEN, cka_token, sizeof(CK_BBOOL)), (CKA_LABEL, label, label_length), (CKA_MODULUS, modulus, modulus_len), (CKA_PUBLIC_EXPONENT, exponent, exponent_len), # TODO Softhsm doesn't support it # (CKA_COPYABLE, cka_copyable, sizeof(CK_BBOOL)), (CKA_DERIVE, cka_derive, sizeof(CK_BBOOL)), (CKA_ENCRYPT, cka_encrypt, sizeof(CK_BBOOL)), (CKA_MODIFIABLE, cka_modifiable, sizeof(CK_BBOOL)), (CKA_PRIVATE, cka_private, sizeof(CK_BBOOL)), (CKA_TRUSTED, cka_trusted, sizeof(CK_BBOOL)), (CKA_VERIFY, cka_verify, sizeof(CK_BBOOL)), (CKA_VERIFY_RECOVER, cka_verify_recover, sizeof(CK_BBOOL)), (CKA_WRAP, cka_wrap, sizeof(CK_BBOOL)), )) object_ptr = new_ptr(CK_OBJECT_HANDLE) rv = self.p11.C_CreateObject(self.session, template, (sizeof(template) // sizeof(CK_ATTRIBUTE)), object_ptr) check_return_value(rv, "create public key object") return object_ptr[0] def import_public_key(self, label, id, data, cka_copyable=True, cka_derive=False, cka_encrypt=False, cka_modifiable=True, cka_private=True, cka_trusted=False, cka_verify=True, cka_verify_recover=True, cka_wrap=False): """ Import RSA public key """ if isinstance(id, unicode): id = id.encode() if isinstance(data, unicode): data = data.encode() label_unicode = label id_ = new_array(CK_BYTE, id) id_length = len(id) attrs_pub = ( cka_copyable, cka_derive, cka_encrypt, cka_modifiable, cka_private, cka_trusted, cka_verify, cka_verify_recover, cka_wrap, ) label, label_length = unicode_to_char_array(label_unicode) if self._id_exists(id_, id_length, CKO_PUBLIC_KEY): raise DuplicationError("Public key with same ID already exists") # Process keyword boolean arguments (cka_copyable_ptr, cka_derive_ptr, cka_encrypt_ptr, cka_modifiable_ptr, cka_private_ptr, cka_trusted_ptr, cka_verify_ptr, cka_verify_recover_ptr, cka_wrap_ptr,) = convert_py2bool(attrs_pub) # decode from ASN1 DER try: pkey = serialization.load_der_public_key(data, default_backend()) except Exception: raise Error("import_public_key: d2i_PUBKEY error") if isinstance(pkey, rsa.RSAPublicKey): ret = self._import_RSA_public_key(label, label_length, id_, id_length, pkey, cka_copyable_ptr, cka_derive_ptr, cka_encrypt_ptr, cka_modifiable_ptr, cka_private_ptr, cka_trusted_ptr, cka_verify_ptr, cka_verify_recover_ptr, cka_wrap_ptr) elif isinstance(pkey, dsa.DSAPublicKey): raise Error("DSA is not supported") elif isinstance(pkey, ec.EllipticCurvePublicKey): raise Error("EC is not supported") else: raise Error("Unsupported key type") return ret def export_wrapped_key(self, key, wrapping_key, wrapping_mech): """ Export wrapped key """ object_key = key object_wrapping_key = wrapping_key wrapped_key_len_ptr = new_ptr(CK_ULONG, 0) wrapping_mech_ptr = new_ptr(CK_MECHANISM, (wrapping_mech, NULL, 0)) # currently we don't support parameter in mechanism # TODO check long overflow # TODO export method # fill mech parameters _set_wrapping_mech_parameters(wrapping_mech_ptr.mechanism, wrapping_mech_ptr) rv = self.p11.C_WrapKey(self.session, wrapping_mech_ptr, object_wrapping_key, object_key, NULL, wrapped_key_len_ptr) check_return_value(rv, "key wrapping: get buffer length") wrapped_key = new_array(CK_BYTE, wrapped_key_len_ptr[0]) rv = self.p11.C_WrapKey(self.session, wrapping_mech_ptr, object_wrapping_key, object_key, wrapped_key, wrapped_key_len_ptr) check_return_value(rv, "key wrapping: wrapping") result = string_to_pybytes_or_none(wrapped_key, wrapped_key_len_ptr[0]) return result def import_wrapped_secret_key(self, label, id, data, unwrapping_key, wrapping_mech, key_type, cka_copyable=True, cka_decrypt=False, cka_derive=False, cka_encrypt=False, cka_extractable=True, cka_modifiable=True, cka_private=True, cka_sensitive=True, cka_sign=False, cka_unwrap=True, cka_verify=False, cka_wrap=True, cka_wrap_with_trusted=False): """ Import wrapped secret key """ if isinstance(id, unicode): id = id.encode() if isinstance(data, unicode): data = data.encode() wrapped_key = new_array(CK_BYTE, data) wrapped_key_len = len(data) unwrapping_key_object = unwrapping_key unwrapped_key_object_ptr = new_ptr(CK_OBJECT_HANDLE, 0) label_unicode = label id_ = new_array(CK_BYTE, id) id_length = len(id) wrapping_mech_ptr = new_ptr(CK_MECHANISM, (wrapping_mech, NULL, 0)) key_class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_SECRET_KEY) key_type_ptr = new_ptr(CK_KEY_TYPE, key_type) attrs = ( cka_copyable, cka_decrypt, cka_derive, cka_encrypt, cka_extractable, cka_modifiable, cka_private, cka_sensitive, cka_sign, cka_unwrap, cka_verify, cka_wrap, cka_wrap_with_trusted, ) _set_wrapping_mech_parameters(wrapping_mech_ptr.mechanism, wrapping_mech_ptr) label, label_length = unicode_to_char_array(label_unicode) if self._id_exists(id_, id_length, key_class_ptr[0]): raise DuplicationError("Secret key with same ID already exists") # Process keyword boolean arguments (cka_copyable_ptr, cka_decrypt_ptr, cka_derive_ptr, cka_encrypt_ptr, cka_extractable_ptr, cka_modifiable_ptr, cka_private_ptr, cka_sensitive_ptr, cka_sign_ptr, cka_unwrap_ptr, cka_verify_ptr, cka_wrap_ptr, cka_wrap_with_trusted_ptr,) = convert_py2bool(attrs) template = new_array(CK_ATTRIBUTE, ( (CKA_CLASS, key_class_ptr, sizeof(CK_OBJECT_CLASS)), (CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)), (CKA_ID, id_, id_length), (CKA_LABEL, label, label_length), (CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)), # TODO Softhsm doesn't support it # (CKA_COPYABLE, cka_copyable_ptr, sizeof(CK_BBOOL)), (CKA_DECRYPT, cka_decrypt_ptr, sizeof(CK_BBOOL)), (CKA_DERIVE, cka_derive_ptr, sizeof(CK_BBOOL)), (CKA_ENCRYPT, cka_encrypt_ptr, sizeof(CK_BBOOL)), (CKA_EXTRACTABLE, cka_extractable_ptr, sizeof(CK_BBOOL)), (CKA_MODIFIABLE, cka_modifiable_ptr, sizeof(CK_BBOOL)), (CKA_PRIVATE, cka_private_ptr, sizeof(CK_BBOOL)), (CKA_SENSITIVE, cka_sensitive_ptr, sizeof(CK_BBOOL)), (CKA_SIGN, cka_sign_ptr, sizeof(CK_BBOOL)), (CKA_UNWRAP, cka_unwrap_ptr, sizeof(CK_BBOOL)), (CKA_VERIFY, cka_verify_ptr, sizeof(CK_BBOOL)), (CKA_WRAP, cka_wrap_ptr, sizeof(CK_BBOOL)), (CKA_WRAP_WITH_TRUSTED, cka_wrap_with_trusted_ptr, sizeof(CK_BBOOL)), )) rv = self.p11.C_UnwrapKey(self.session, wrapping_mech_ptr, unwrapping_key_object, wrapped_key, wrapped_key_len, template, sizeof(template) // sizeof(CK_ATTRIBUTE), unwrapped_key_object_ptr) check_return_value(rv, "import_wrapped_key: key unwrapping") return unwrapped_key_object_ptr[0] def import_wrapped_private_key(self, label, id, data, unwrapping_key, wrapping_mech, key_type, cka_always_authenticate=False, cka_copyable=True, cka_decrypt=False, cka_derive=False, cka_extractable=True, cka_modifiable=True, cka_private=True, cka_sensitive=True, cka_sign=True, cka_sign_recover=True, cka_unwrap=False, cka_wrap_with_trusted=False): """ Import wrapped private key """ if isinstance(id, unicode): id = id.encode() if isinstance(data, unicode): data = data.encode() wrapped_key = new_array(CK_BYTE, data) wrapped_key_len = len(data) unwrapping_key_object = unwrapping_key unwrapped_key_object_ptr = new_ptr(CK_OBJECT_HANDLE, 0) label_unicode = label id_ = new_array(CK_BYTE, id) id_length = len(id) wrapping_mech_ptr = new_ptr(CK_MECHANISM, (wrapping_mech, NULL, 0)) key_class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PRIVATE_KEY) key_type_ptr = new_ptr(CK_KEY_TYPE, key_type) attrs_priv = ( cka_always_authenticate, cka_copyable, cka_decrypt, cka_derive, cka_extractable, cka_modifiable, cka_private, cka_sensitive, cka_sign, cka_sign_recover, cka_unwrap, cka_wrap_with_trusted, ) label, label_length = unicode_to_char_array(label_unicode) if self._id_exists(id_, id_length, CKO_SECRET_KEY): raise DuplicationError("Secret key with same ID already exists") # Process keyword boolean arguments (cka_always_authenticate_ptr, cka_copyable_ptr, cka_decrypt_ptr, cka_derive_ptr, cka_extractable_ptr, cka_modifiable_ptr, cka_private_ptr, cka_sensitive_ptr, cka_sign_ptr, cka_sign_recover_ptr, cka_unwrap_ptr, cka_wrap_with_trusted_ptr, ) = convert_py2bool(attrs_priv) template = new_array(CK_ATTRIBUTE, ( (CKA_CLASS, key_class_ptr, sizeof(CK_OBJECT_CLASS)), (CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)), (CKA_ID, id_, id_length), (CKA_LABEL, label, label_length), (CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)), (CKA_ALWAYS_AUTHENTICATE, cka_always_authenticate_ptr, sizeof(CK_BBOOL)), # TODO Softhsm doesn't support it # (CKA_COPYABLE, cka_copyable_ptr, sizeof(CK_BBOOL)), (CKA_DECRYPT, cka_decrypt_ptr, sizeof(CK_BBOOL)), (CKA_DERIVE, cka_derive_ptr, sizeof(CK_BBOOL)), (CKA_EXTRACTABLE, cka_extractable_ptr, sizeof(CK_BBOOL)), (CKA_MODIFIABLE, cka_modifiable_ptr, sizeof(CK_BBOOL)), (CKA_PRIVATE, cka_private_ptr, sizeof(CK_BBOOL)), (CKA_SENSITIVE, cka_sensitive_ptr, sizeof(CK_BBOOL)), (CKA_SIGN, cka_sign_ptr, sizeof(CK_BBOOL)), (CKA_SIGN_RECOVER, cka_sign_ptr, sizeof(CK_BBOOL)), (CKA_UNWRAP, cka_unwrap_ptr, sizeof(CK_BBOOL)), (CKA_WRAP_WITH_TRUSTED, cka_wrap_with_trusted_ptr, sizeof(CK_BBOOL)), )) rv = self.p11.C_UnwrapKey(self.session, wrapping_mech_ptr, unwrapping_key_object, wrapped_key, wrapped_key_len, template, sizeof(template) // sizeof(CK_ATTRIBUTE), unwrapped_key_object_ptr) check_return_value(rv, "import_wrapped_key: key unwrapping") return unwrapped_key_object_ptr[0] def set_attribute(self, key_object, attr, value): """ Set object attributes """ object = key_object attribute_ptr = new_ptr(CK_ATTRIBUTE) attribute_ptr.type = attr if attr in (CKA_ALWAYS_AUTHENTICATE, CKA_ALWAYS_SENSITIVE, CKA_COPYABLE, CKA_ENCRYPT, CKA_EXTRACTABLE, CKA_DECRYPT, CKA_DERIVE, CKA_LOCAL, CKA_MODIFIABLE, CKA_NEVER_EXTRACTABLE, CKA_PRIVATE, CKA_SENSITIVE, CKA_SIGN, CKA_SIGN_RECOVER, CKA_TOKEN, CKA_TRUSTED, CKA_UNWRAP, CKA_VERIFY, CKA_VERIFY_RECOVER, CKA_WRAP, CKA_WRAP_WITH_TRUSTED): attribute_ptr.pValue = true_ptr if value else false_ptr attribute_ptr.ulValueLen = sizeof(CK_BBOOL) elif attr == CKA_ID: if not isinstance(value, bytes): raise Error("Bytestring value expected") attribute_ptr.pValue = new_array(CK_BYTE, value) attribute_ptr.ulValueLen = len(value) elif attr == CKA_LABEL: if not isinstance(value, unicode): raise Error("Unicode value expected") label, label_length = unicode_to_char_array(value) attribute_ptr.pValue = label attribute_ptr.ulValueLen = label_length elif attr == CKA_KEY_TYPE: if not isinstance(value, int): raise Error("Integer value expected") attribute_ptr.pValue = new_ptr(unsigned_long, value) attribute_ptr.ulValueLen = sizeof(unsigned_long) else: raise Error("Unknown attribute") template = new_array(CK_ATTRIBUTE, (attribute_ptr[0],)) rv = self.p11.C_SetAttributeValue(self.session, object, template, (sizeof(template) // sizeof(CK_ATTRIBUTE))) check_return_value(rv, "set_attribute") def get_attribute(self, key_object, attr): object = key_object attribute_ptr = new_ptr(CK_ATTRIBUTE) attribute_ptr.type = attr attribute_ptr.pValue = NULL_PTR attribute_ptr.ulValueLen = 0 template = new_array(CK_ATTRIBUTE, (attribute_ptr[0],)) rv = self.p11.C_GetAttributeValue(self.session, object, template, (sizeof(template) // sizeof(CK_ATTRIBUTE))) if rv == CKR_ATTRIBUTE_TYPE_INVALID or template[0].ulValueLen == -1: raise NotFound("attribute does not exist") check_return_value(rv, "get_attribute init") value = new_array(unsigned_char, template[0].ulValueLen) template[0].pValue = value rv = self.p11.C_GetAttributeValue(self.session, object, template, (sizeof(template) // sizeof(CK_ATTRIBUTE))) check_return_value(rv, "get_attribute") if attr in (CKA_ALWAYS_AUTHENTICATE, CKA_ALWAYS_SENSITIVE, CKA_COPYABLE, CKA_ENCRYPT, CKA_EXTRACTABLE, CKA_DECRYPT, CKA_DERIVE, CKA_LOCAL, CKA_MODIFIABLE, CKA_NEVER_EXTRACTABLE, CKA_PRIVATE, CKA_SENSITIVE, CKA_SIGN, CKA_SIGN_RECOVER, CKA_TOKEN, CKA_TRUSTED, CKA_UNWRAP, CKA_VERIFY, CKA_VERIFY_RECOVER, CKA_WRAP, CKA_WRAP_WITH_TRUSTED): ret = bool(_ffi.cast(_ffi.getctype(CK_BBOOL, '*'), value)[0]) elif attr == CKA_LABEL: ret = char_array_to_unicode(value, template[0].ulValueLen) elif attr in (CKA_MODULUS, CKA_PUBLIC_EXPONENT, CKA_ID): ret = string_to_pybytes_or_none(value, template[0].ulValueLen) elif attr == CKA_KEY_TYPE: ret = _ffi.cast(_ffi.getctype(unsigned_long, '*'), value)[0] else: raise Error("Unknown attribute") return ret # Key Classes KEY_CLASS_PUBLIC_KEY = CKO_PUBLIC_KEY KEY_CLASS_PRIVATE_KEY = CKO_PRIVATE_KEY KEY_CLASS_SECRET_KEY = CKO_SECRET_KEY # Key types KEY_TYPE_RSA = CKK_RSA KEY_TYPE_AES = CKK_AES # Wrapping mech type MECH_RSA_PKCS = CKM_RSA_PKCS MECH_RSA_PKCS_OAEP = CKM_RSA_PKCS_OAEP MECH_AES_KEY_WRAP = CKM_AES_KEY_WRAP MECH_AES_KEY_WRAP_PAD = CKM_AES_KEY_WRAP_PAD def generate_master_key(p11, keylabel=u"dnssec-master", key_length=16, disable_old_keys=True): assert isinstance(p11, P11_Helper) key_id = None while True: # check if key with this ID exist in LDAP or softHSM # id is 16 Bytes long key_id = "".join(chr(random.randint(0, 255)) for _ in range(0, 16)) keys = p11.find_keys(KEY_CLASS_SECRET_KEY, label=keylabel, id=key_id) if not keys: break # we found unique id p11.generate_master_key(keylabel, key_id, key_length=key_length, cka_wrap=True, cka_unwrap=True) if disable_old_keys: # set CKA_WRAP=False for old master keys master_keys = p11.find_keys(KEY_CLASS_SECRET_KEY, label=keylabel, cka_wrap=True) for handle in master_keys: # don't disable wrapping for new key # compare IDs not handle if key_id != p11.get_attribute(handle, CKA_ID): p11.set_attribute(handle, CKA_WRAP, False)
unknown
codeparrot/codeparrot-clean
""" Views related to the video upload feature """ from boto import s3 import csv from uuid import uuid4 from django.conf import settings from django.contrib.auth.decorators import login_required from django.http import HttpResponse, HttpResponseNotFound from django.utils.translation import ugettext as _, ugettext_noop from django.views.decorators.http import require_GET, require_http_methods import rfc6266 from edxval.api import create_video, get_videos_for_course, SortDirection, VideoSortField from opaque_keys.edx.keys import CourseKey from contentstore.models import VideoUploadConfig from contentstore.utils import reverse_course_url from edxmako.shortcuts import render_to_response from util.json_request import expect_json, JsonResponse from .course import get_course_and_check_access __all__ = ["videos_handler", "video_encodings_download"] # Default expiration, in seconds, of one-time URLs used for uploading videos. KEY_EXPIRATION_IN_SECONDS = 86400 class StatusDisplayStrings(object): """ A class to map status strings as stored in VAL to display strings for the video upload page """ # Translators: This is the status of an active video upload _UPLOADING = ugettext_noop("Uploading") # Translators: This is the status for a video that the servers are currently processing _IN_PROGRESS = ugettext_noop("In Progress") # Translators: This is the status for a video that the servers have successfully processed _COMPLETE = ugettext_noop("Ready") # Translators: This is the status for a video that the servers have failed to process _FAILED = ugettext_noop("Failed") # Translators: This is the status for a video for which an invalid # processing token was provided in the course settings _INVALID_TOKEN = ugettext_noop("Invalid Token") # Translators: This is the status for a video that was included in a course import _IMPORTED = ugettext_noop("Imported") # Translators: This is the status for a video that is in an unknown state _UNKNOWN = ugettext_noop("Unknown") _STATUS_MAP = { "upload": _UPLOADING, "ingest": _IN_PROGRESS, "transcode_queue": _IN_PROGRESS, "transcode_active": _IN_PROGRESS, "file_delivered": _COMPLETE, "file_complete": _COMPLETE, "file_corrupt": _FAILED, "pipeline_error": _FAILED, "invalid_token": _INVALID_TOKEN, "imported": _IMPORTED, } @staticmethod def get(val_status): """Map a VAL status string to a localized display string""" return _(StatusDisplayStrings._STATUS_MAP.get(val_status, StatusDisplayStrings._UNKNOWN)) # pylint: disable=translation-of-non-string @expect_json @login_required @require_http_methods(("GET", "POST")) def videos_handler(request, course_key_string): """ The restful handler for video uploads. GET html: return an HTML page to display previous video uploads and allow new ones json: return json representing the videos that have been uploaded and their statuses POST json: create a new video upload; the actual files should not be provided to this endpoint but rather PUT to the respective upload_url values contained in the response """ course = _get_and_validate_course(course_key_string, request.user) if not course: return HttpResponseNotFound() if request.method == "GET": if "application/json" in request.META.get("HTTP_ACCEPT", ""): return videos_index_json(course) else: return videos_index_html(course) else: return videos_post(course, request) @login_required @require_GET def video_encodings_download(request, course_key_string): """ Returns a CSV report containing the encoded video URLs for video uploads in the following format: Video ID,Name,Status,Profile1 URL,Profile2 URL aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4 """ course = _get_and_validate_course(course_key_string, request.user) if not course: return HttpResponseNotFound() def get_profile_header(profile): """Returns the column header string for the given profile's URLs""" # Translators: This is the header for a CSV file column # containing URLs for video encodings for the named profile # (e.g. desktop, mobile high quality, mobile low quality) return _("{profile_name} URL").format(profile_name=profile) profile_whitelist = VideoUploadConfig.get_profile_whitelist() videos = list(_get_videos(course)) name_col = _("Name") duration_col = _("Duration") added_col = _("Date Added") video_id_col = _("Video ID") status_col = _("Status") profile_cols = [get_profile_header(profile) for profile in profile_whitelist] def make_csv_dict(video): """ Makes a dictionary suitable for writing CSV output. This involves extracting the required items from the original video dict and converting all keys and values to UTF-8 encoded string objects, because the CSV module doesn't play well with unicode objects. """ # Translators: This is listed as the duration for a video that has not # yet reached the point in its processing by the servers where its # duration is determined. duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending") ret = dict( [ (name_col, video["client_video_id"]), (duration_col, duration_val), (added_col, video["created"].isoformat()), (video_id_col, video["edx_video_id"]), (status_col, video["status"]), ] + [ (get_profile_header(encoded_video["profile"]), encoded_video["url"]) for encoded_video in video["encoded_videos"] if encoded_video["profile"] in profile_whitelist ] ) return { key.encode("utf-8"): value.encode("utf-8") for key, value in ret.items() } response = HttpResponse(content_type="text/csv") # Translators: This is the suggested filename when downloading the URL # listing for videos uploaded through Studio filename = _("{course}_video_urls").format(course=course.id.course) # See https://tools.ietf.org/html/rfc6266#appendix-D response["Content-Disposition"] = rfc6266.build_header( filename + ".csv", filename_compat="video_urls.csv" ) writer = csv.DictWriter( response, [ col_name.encode("utf-8") for col_name in [name_col, duration_col, added_col, video_id_col, status_col] + profile_cols ], dialect=csv.excel ) writer.writeheader() for video in videos: writer.writerow(make_csv_dict(video)) return response def _get_and_validate_course(course_key_string, user): """ Given a course key, return the course if it exists, the given user has access to it, and it is properly configured for video uploads """ course_key = CourseKey.from_string(course_key_string) # For now, assume all studio users that have access to the course can upload videos. # In the future, we plan to add a new org-level role for video uploaders. course = get_course_and_check_access(course_key, user) if ( settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] and getattr(settings, "VIDEO_UPLOAD_PIPELINE", None) and course and course.video_pipeline_configured ): return course else: return None def _get_videos(course): """ Retrieves the list of videos from VAL corresponding to this course. """ videos = list(get_videos_for_course(course.id, VideoSortField.created, SortDirection.desc)) # convert VAL's status to studio's Video Upload feature status. for video in videos: video["status"] = StatusDisplayStrings.get(video["status"]) return videos def _get_index_videos(course): """ Returns the information about each video upload required for the video list """ return list( { attr: video[attr] for attr in ["edx_video_id", "client_video_id", "created", "duration", "status"] } for video in _get_videos(course) ) def videos_index_html(course): """ Returns an HTML page to display previous video uploads and allow new ones """ return render_to_response( "videos_index.html", { "context_course": course, "post_url": reverse_course_url("videos_handler", unicode(course.id)), "encodings_download_url": reverse_course_url("video_encodings_download", unicode(course.id)), "previous_uploads": _get_index_videos(course), "concurrent_upload_limit": settings.VIDEO_UPLOAD_PIPELINE.get("CONCURRENT_UPLOAD_LIMIT", 0), } ) def videos_index_json(course): """ Returns JSON in the following format: { "videos": [{ "edx_video_id": "aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa", "client_video_id": "video.mp4", "created": "1970-01-01T00:00:00Z", "duration": 42.5, "status": "upload" }] } """ return JsonResponse({"videos": _get_index_videos(course)}, status=200) def videos_post(course, request): """ Input (JSON): { "files": [{ "file_name": "video.mp4", "content_type": "video/mp4" }] } Returns (JSON): { "files": [{ "file_name": "video.mp4", "upload_url": "http://example.com/put_video" }] } The returned array corresponds exactly to the input array. """ error = None if "files" not in request.json: error = "Request object is not JSON or does not contain 'files'" elif any( "file_name" not in file or "content_type" not in file for file in request.json["files"] ): error = "Request 'files' entry does not contain 'file_name' and 'content_type'" if error: return JsonResponse({"error": error}, status=400) bucket = storage_service_bucket() course_video_upload_token = course.video_upload_pipeline["course_video_upload_token"] req_files = request.json["files"] resp_files = [] for req_file in req_files: file_name = req_file["file_name"] edx_video_id = unicode(uuid4()) key = storage_service_key(bucket, file_name=edx_video_id) for metadata_name, value in [ ("course_video_upload_token", course_video_upload_token), ("client_video_id", file_name), ("course_key", unicode(course.id)), ]: key.set_metadata(metadata_name, value) upload_url = key.generate_url( KEY_EXPIRATION_IN_SECONDS, "PUT", headers={"Content-Type": req_file["content_type"]} ) # persist edx_video_id in VAL create_video({ "edx_video_id": edx_video_id, "status": "upload", "client_video_id": file_name, "duration": 0, "encoded_videos": [], "courses": [course.id] }) resp_files.append({"file_name": file_name, "upload_url": upload_url}) return JsonResponse({"files": resp_files}, status=200) def storage_service_bucket(): """ Returns an S3 bucket for video uploads. """ conn = s3.connection.S3Connection( settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY ) return conn.get_bucket(settings.VIDEO_UPLOAD_PIPELINE["BUCKET"]) def storage_service_key(bucket, file_name): """ Returns an S3 key to the given file in the given bucket. """ key_name = "{}/{}".format( settings.VIDEO_UPLOAD_PIPELINE.get("ROOT_PATH", ""), file_name ) return s3.key.Key(bucket, key_name)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import json import argparse import requests import logging as log api_url = "https://api.cloudflare.com/client/v4/user/load_balancers/pools" api_key = "<your-apikey>" email = "<your-email>" headers = {'X-Auth-Email': email, 'X-Auth-Key': api_key, 'Content-Type': 'application/json'} def get_arguments(): """parse the argument provided to the script""" parser = argparse.ArgumentParser( description='Script to manage cloudflare load balancers \ through API', epilog='copyright © - 2017 Fastnetserv', usage='python %s -option') parser.add_argument('-a', '--add-pool', action='store_true', default=False, help='Add a Load Balancer Monitor') parser.add_argument('-l', '--list-pools', action='store_true', default=False, help='List all Load Balancer Monitors') parser.add_argument('-r', '--del-pool', type=str, default=None, help='specify a Load Balancer Monitor to \ be deleted') parser.add_argument('-e', '--edit-pool', type=str, default='endian', help='specify a Load Balancer Monitor to be edited') parser.add_argument('-d', '--pool-details', type=str, default=None, help='give more details about a specific Load \ Balancer monitor') return parser.parse_args() ''' List all balancer monitors ''' def get_pools(): pool_list = requests.get(api_url, headers=headers) print json.dumps(pool_list.json(), sort_keys=True, indent=2, separators=(',', ': ')) ''' Add a balancer monitor ''' def add_pool(data): pool_add = requests.post(api_url, data, headers=headers) print json.dumps(pool_add.json(), sort_keys=True, indent=2, separators=(',', ': ')) ''' List details of a monitor ''' def get_pool_details(pool_id): pool_detail = requests.get(api_url+"/"+pool_id, headers=headers) print json.dumps(pool_detail.json(), sort_keys=True, indent=2, separators=(',', ': ')) ''' Delede a monitor by id ''' def del_pool(pool_delete): pool_delete = requests.delete(api_url+"/"+pool_delete, headers=headers) print json.dumps(pool_delete.json(), sort_keys=True, indent=2, separators=(',', ': ')) def main(): args = get_arguments() if args.list_pools: log.info("Taking the load balancer pool lists, please wait...") get_pools() sys.exit() if args.pool_details is not None: log.info("Retrieving details for Pool id: " + args.pool_details) get_pool_details(args.pool_details) sys.exit() if args.del_pool is not None: log.info("Deleting Pool id: " + args.del_monitor) del_pool(args.del_pool) sys.exit() if args.add_pool: name = raw_input("Add a pool name\n") listen = raw_input("Insert the list of origins within this pool \ (only the address)\n") description = raw_input("Add a description for this pool\n") monitor_id = raw_input("Insert the Monitor id\n") notification_mail = raw_input("Insert an email address for \ notification") print "\nOk, got the following:\n", "Name: ", name, "\nOrigins: ", listen, "\nDescription: ", description, "\nMonitor id: ", monitor_id, "Notification mail: ", notification_mail, params = raw_input("Do you confir the following parameters? y/n: ") if params == 'y': data = json.dumps({"description": description, "\nname": name, "enabled": true, "\nmonitor": monitor_id, "\norigins": [{"name": name, "address": listen, "enabled": true}], "notification_email": notification_mail}) add_pool(data) sys.exit() elif params == 'n': sys.exit() else: print "Reply y/n" sys.exit() if __name__ == "__main__": log.basicConfig(stream=sys.stdout, level=log.INFO) main()
unknown
codeparrot/codeparrot-clean
#!/usr/local/bin/ruby # This program is contributed by Shin Nishiyama # modified by K.Sasada NP = 5 ROW = 8 + NP COL = 8 $p = [] $b = [] $no = 0 def piece(n, a, nb) nb.each{|x| a[n] = x if n == NP-1 $p << [a.sort] else nbc=nb.dup [-ROW, -1, 1, ROW].each{|d| if x+d > 0 and not a.include?(x+d) and not nbc.include?(x+d) nbc << x+d end } nbc.delete x piece(n+1,a[0..n],nbc) end } end def kikaku(a) a.collect {|x| x - a[0]} end def ud(a) kikaku(a.collect {|x| ((x+NP)%ROW)-ROW*((x+NP)/ROW) }.sort) end def rl(a) kikaku(a.collect {|x| ROW*((x+NP)/ROW)+ROW-((x+NP)%ROW)}.sort) end def xy(a) kikaku(a.collect {|x| ROW*((x+NP)%ROW) + (x+NP)/ROW }.sort) end def mkpieces piece(0,[],[0]) $p.each do |a| a0 = a[0] a[1] = ud(a0) a[2] = rl(a0) a[3] = ud(rl(a0)) a[4] = xy(a0) a[5] = ud(xy(a0)) a[6] = rl(xy(a0)) a[7] = ud(rl(xy(a0))) a.sort! a.uniq! end $p.uniq!.sort! {|x,y| x[0] <=> y[0] } end def mkboard (0...ROW*COL).each{|i| if i % ROW >= ROW-NP $b[i] = -2 else $b[i] = -1 end $b[3*ROW+3]=$b[3*ROW+4]=$b[4*ROW+3]=$b[4*ROW+4]=-2 } end def pboard return # skip print print "No. #$no\n" (0...COL).each{|i| print "|" (0...ROW-NP).each{|j| x = $b[i*ROW+j] if x < 0 print "..|" else printf "%2d|",x+1 end } print "\n" } print "\n" end $pnum=[] def setpiece(a,pos) if a.length == $p.length then $no += 1 pboard return end while $b[pos] != -1 pos += 1 end ($pnum - a).each do |i| $p[i].each do |x| f = 0 x.each{|s| if $b[pos+s] != -1 f=1 break end } if f == 0 then x.each{|s| $b[pos+s] = i } a << i setpiece(a.dup, pos) a.pop x.each{|s| $b[pos+s] = -1 } end end end end mkpieces mkboard $p[4] = [$p[4][0]] $pnum = (0...$p.length).to_a setpiece([],0)
ruby
github
https://github.com/ruby/ruby
benchmark/app_pentomino.rb
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from frappe.utils import cint from frappe.utils.jinja import validate_template from frappe import _ class AddressTemplate(Document): def validate(self): if not self.template: self.template = get_default_address_template() self.defaults = frappe.db.get_values("Address Template", {"is_default":1, "name":("!=", self.name)}) if not self.is_default: if not self.defaults: self.is_default = 1 if cint(frappe.db.get_single_value('System Settings', 'setup_complete')): frappe.msgprint(_("Setting this Address Template as default as there is no other default")) validate_template(self.template) def on_update(self): if self.is_default and self.defaults: for d in self.defaults: frappe.db.set_value("Address Template", d[0], "is_default", 0) def on_trash(self): if self.is_default: frappe.throw(_("Default Address Template cannot be deleted")) @frappe.whitelist() def get_default_address_template(): '''Get default address template (translated)''' return '''{{ address_line1 }}<br>{% if address_line2 %}{{ address_line2 }}<br>{% endif -%}\ {{ city }}<br> {% if state %}{{ state }}<br>{% endif -%} {% if pincode %}{{ pincode }}<br>{% endif -%} {{ country }}<br> {% if phone %}'''+_('Phone')+''': {{ phone }}<br>{% endif -%} {% if fax %}'''+_('Fax')+''': {{ fax }}<br>{% endif -%} {% if email_id %}'''+_('Email')+''': {{ email_id }}<br>{% endif -%}'''
unknown
codeparrot/codeparrot-clean
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: BUSL-1.1 package command import ( "bytes" "crypto/x509" "encoding/json" "fmt" "strconv" "strings" "github.com/ghodss/yaml" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/healthcheck" "github.com/ryanuber/columnize" ) type PKIVerifySignCommand struct { *BaseCommand flagConfig string flagReturnIndicator string flagDefaultDisabled bool flagList bool } func (c *PKIVerifySignCommand) Synopsis() string { return "Check whether one certificate validates another specified certificate" } func (c *PKIVerifySignCommand) Help() string { helpText := ` Usage: vault pki verify-sign POSSIBLE-ISSUER POSSIBLE-ISSUED Verifies whether the listed issuer has signed the listed issued certificate. POSSIBLE-ISSUER and POSSIBLE-ISSUED are the fully name-spaced path to an issuer certificate, for instance: 'ns1/mount1/issuer/issuerName/json'. Returns five fields of information: - signature_match: was the key of the issuer used to sign the issued. - path_match: the possible issuer appears in the valid certificate chain of the issued. - key_id_match: does the key-id of the issuer match the key_id of the subject. - subject_match: does the subject name of the issuer match the issuer subject of the issued. - trust_match: if someone trusted the parent issuer, is the chain provided sufficient to trust the child issued. ` + c.Flags().Help() return strings.TrimSpace(helpText) } func (c *PKIVerifySignCommand) Flags() *FlagSets { set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) return set } func (c *PKIVerifySignCommand) Run(args []string) int { f := c.Flags() if err := f.Parse(args); err != nil { c.UI.Error(err.Error()) return 1 } args = f.Args() if len(args) < 2 { if len(args) == 0 { c.UI.Error("Not enough arguments (expected potential issuer and issued, got nothing)") } else { c.UI.Error("Not enough arguments (expected both potential issuer and issued, got only one)") } return 1 } else if len(args) > 2 { c.UI.Error(fmt.Sprintf("Too many arguments (expected only potential issuer and issued, got %d arguments)", len(args))) for _, arg := range args { if strings.HasPrefix(arg, "-") { c.UI.Warn(fmt.Sprintf("Options (%v) must be specified before positional arguments (%v)", arg, args[0])) break } } return 1 } issuer := sanitizePath(args[0]) issued := sanitizePath(args[1]) client, err := c.Client() if err != nil { c.UI.Error(fmt.Sprintf("Failed to obtain client: %s", err)) return 1 } issuerResp, err := readIssuer(client, issuer) if err != nil { c.UI.Error(fmt.Sprintf("Failed to read issuer: %s: %s", issuer, err.Error())) return 1 } results, err := verifySignBetween(client, issuerResp, issued) if err != nil { c.UI.Error(fmt.Sprintf("Failed to run verification: %v", err)) return pkiRetUsage } c.outputResults(results, issuer, issued) return 0 } func verifySignBetween(client *api.Client, issuerResp *issuerResponse, issuedPath string) (map[string]bool, error) { // Note that this eats warnings issuerCert := issuerResp.certificate issuerKeyId := issuerCert.SubjectKeyId // Fetch and Parse the Potential Issued Cert issuedCertBundle, err := readIssuer(client, issuedPath) if err != nil { return nil, fmt.Errorf("error: unable to fetch issuer %v: %w", issuedPath, err) } parentKeyId := issuedCertBundle.certificate.AuthorityKeyId // Check the Chain-Match rootCertPool := x509.NewCertPool() rootCertPool.AddCert(issuerCert) checkTrustPathOptions := x509.VerifyOptions{ Roots: rootCertPool, } trust := false trusts, err := issuedCertBundle.certificate.Verify(checkTrustPathOptions) if err != nil && !strings.Contains(err.Error(), "certificate signed by unknown authority") { return nil, err } else if err == nil { for _, chain := range trusts { // Output of this Should Only Have One Trust with Chain of Length Two (Child followed by Parent) for _, cert := range chain { if issuedCertBundle.certificate.Equal(cert) { trust = true break } } } } pathMatch := false for _, cert := range issuedCertBundle.caChain { if bytes.Equal(cert.Raw, issuerCert.Raw) { pathMatch = true break } } signatureMatch := false err = issuedCertBundle.certificate.CheckSignatureFrom(issuerCert) if err == nil { signatureMatch = true } result := map[string]bool{ // This comparison isn't strictly correct, despite a standard ordering these are sets "subject_match": bytes.Equal(issuerCert.RawSubject, issuedCertBundle.certificate.RawIssuer), "path_match": pathMatch, "trust_match": trust, // TODO: Refactor into a reasonable function "key_id_match": bytes.Equal(parentKeyId, issuerKeyId), "signature_match": signatureMatch, } return result, nil } type issuerResponse struct { keyId string certificate *x509.Certificate caChain []*x509.Certificate } func readIssuer(client *api.Client, issuerPath string) (*issuerResponse, error) { issuerResp, err := client.Logical().Read(issuerPath) if err != nil { return nil, err } issuerCertPem, err := requireStrRespField(issuerResp, "certificate") if err != nil { return nil, err } issuerCert, err := healthcheck.ParsePEMCert(issuerCertPem) if err != nil { return nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuerPath, err) } caChainPem, err := requireStrListRespField(issuerResp, "ca_chain") if err != nil { return nil, fmt.Errorf("unable to parse issuer %v's CA chain: %w", issuerPath, err) } var caChain []*x509.Certificate for _, pem := range caChainPem { trimmedPem := strings.TrimSpace(pem) if trimmedPem == "" { continue } cert, err := healthcheck.ParsePEMCert(trimmedPem) if err != nil { return nil, err } caChain = append(caChain, cert) } keyId := optStrRespField(issuerResp, "key_id") return &issuerResponse{ keyId: keyId, certificate: issuerCert, caChain: caChain, }, nil } func optStrRespField(resp *api.Secret, reqField string) string { if resp == nil || resp.Data == nil { return "" } if val, present := resp.Data[reqField]; !present { return "" } else if strVal, castOk := val.(string); !castOk || strVal == "" { return "" } else { return strVal } } func requireStrRespField(resp *api.Secret, reqField string) (string, error) { if resp == nil || resp.Data == nil { return "", fmt.Errorf("nil response received, %s field unavailable", reqField) } if val, present := resp.Data[reqField]; !present { return "", fmt.Errorf("response did not contain field: %s", reqField) } else if strVal, castOk := val.(string); !castOk || strVal == "" { return "", fmt.Errorf("field %s value was blank or not a string: %v", reqField, val) } else { return strVal, nil } } func requireStrListRespField(resp *api.Secret, reqField string) ([]string, error) { if resp == nil || resp.Data == nil { return nil, fmt.Errorf("nil response received, %s field unavailable", reqField) } if val, present := resp.Data[reqField]; !present { return nil, fmt.Errorf("response did not contain field: %s", reqField) } else { return healthcheck.StringList(val) } } func (c *PKIVerifySignCommand) outputResults(results map[string]bool, potentialParent, potentialChild string) error { switch Format(c.UI) { case "", "table": return c.outputResultsTable(results, potentialParent, potentialChild) case "json": return c.outputResultsJSON(results) case "yaml": return c.outputResultsYAML(results) default: return fmt.Errorf("unknown output format: %v", Format(c.UI)) } } func (c *PKIVerifySignCommand) outputResultsTable(results map[string]bool, potentialParent, potentialChild string) error { c.UI.Output("issuer:" + potentialParent) c.UI.Output("issued:" + potentialChild + "\n") data := []string{"field" + hopeDelim + "value"} for field, finding := range results { row := field + hopeDelim + strconv.FormatBool(finding) data = append(data, row) } c.UI.Output(tableOutput(data, &columnize.Config{ Delim: hopeDelim, })) c.UI.Output("\n") return nil } func (c *PKIVerifySignCommand) outputResultsJSON(results map[string]bool) error { bytes, err := json.MarshalIndent(results, "", " ") if err != nil { return err } c.UI.Output(string(bytes)) return nil } func (c *PKIVerifySignCommand) outputResultsYAML(results map[string]bool) error { bytes, err := yaml.Marshal(results) if err != nil { return err } c.UI.Output(string(bytes)) return nil }
go
github
https://github.com/hashicorp/vault
command/pki_verify_sign.go
# # Author: Pearu Peterson, March 2002 # # additions by Travis Oliphant, March 2002 # additions by Eric Jones, June 2002 # additions by Johannes Loehnert, June 2006 # additions by Bart Vandereycken, June 2006 # additions by Andrew D Straw, May 2007 # additions by Tiziano Zito, November 2008 # # April 2010: Functions for LU, QR, SVD, Schur and Cholesky decompositions were # moved to their own files. Still in this file are functions for eigenstuff # and for the Hessenberg form. from __future__ import division, print_function, absolute_import __all__ = ['eig','eigh','eig_banded','eigvals','eigvalsh', 'eigvals_banded', 'hessenberg'] import numpy import numpy as np from numpy import array, asarray_chkfinite, asarray, diag, zeros, ones, \ isfinite, inexact, nonzero, iscomplexobj, cast, flatnonzero, conj # Local imports from scipy._lib.six import xrange from scipy._lib._util import _asarray_validated from .misc import LinAlgError, _datacopied, norm from .lapack import get_lapack_funcs from .blas import get_blas_funcs _I = cast['F'](1j) def _make_complex_eigvecs(w, vin, dtype): """ Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output """ # - see LAPACK man page DGGEV at ALPHAI v = numpy.array(vin, dtype=dtype) m = (w.imag > 0) m[:-1] |= (w.imag[1:] < 0) # workaround for LAPACK bug, cf. ticket #709 for i in flatnonzero(m): v.imag[:,i] = vin[:,i+1] conj(v[:,i], v[:,i+1]) return v def _geneig(a1, b1, left, right, overwrite_a, overwrite_b): ggev, = get_lapack_funcs(('ggev',), (a1, b1)) cvl, cvr = left, right res = ggev(a1, b1, lwork=-1) lwork = res[-2][0].real.astype(numpy.int) if ggev.typecode in 'cz': alpha, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork, overwrite_a, overwrite_b) w = alpha / beta else: alphar, alphai, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork, overwrite_a,overwrite_b) w = (alphar + _I * alphai) / beta if info < 0: raise ValueError('illegal value in %d-th argument of internal ggev' % -info) if info > 0: raise LinAlgError("generalized eig algorithm did not converge (info=%d)" % info) only_real = numpy.logical_and.reduce(numpy.equal(w.imag, 0.0)) if not (ggev.typecode in 'cz' or only_real): t = w.dtype.char if left: vl = _make_complex_eigvecs(w, vl, t) if right: vr = _make_complex_eigvecs(w, vr, t) # the eigenvectors returned by the lapack function are NOT normalized for i in xrange(vr.shape[0]): if right: vr[:, i] /= norm(vr[:, i]) if left: vl[:, i] /= norm(vl[:, i]) if not (left or right): return w if left: if right: return w, vl, vr return w, vl return w, vr def eig(a, b=None, left=False, right=True, overwrite_a=False, overwrite_b=False, check_finite=True): """ Solve an ordinary or generalized eigenvalue problem of a square matrix. Find eigenvalues w and right or left eigenvectors of a general matrix:: a vr[:,i] = w[i] b vr[:,i] a.H vl[:,i] = w[i].conj() b.H vl[:,i] where ``.H`` is the Hermitian conjugation. Parameters ---------- a : (M, M) array_like A complex or real matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional Right-hand side matrix in a generalized eigenvalue problem. Default is None, identity matrix is assumed. left : bool, optional Whether to calculate and return left eigenvectors. Default is False. right : bool, optional Whether to calculate and return right eigenvectors. Default is True. overwrite_a : bool, optional Whether to overwrite `a`; may improve performance. Default is False. overwrite_b : bool, optional Whether to overwrite `b`; may improve performance. Default is False. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (M,) double or complex ndarray The eigenvalues, each repeated according to its multiplicity. vl : (M, M) double or complex ndarray The normalized left eigenvector corresponding to the eigenvalue ``w[i]`` is the column vl[:,i]. Only returned if ``left=True``. vr : (M, M) double or complex ndarray The normalized right eigenvector corresponding to the eigenvalue ``w[i]`` is the column ``vr[:,i]``. Only returned if ``right=True``. Raises ------ LinAlgError If eigenvalue computation does not converge. See Also -------- eigh : Eigenvalues and right eigenvectors for symmetric/Hermitian arrays. """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') overwrite_a = overwrite_a or (_datacopied(a1, a)) if b is not None: b1 = _asarray_validated(b, check_finite=check_finite) overwrite_b = overwrite_b or _datacopied(b1, b) if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]: raise ValueError('expected square matrix') if b1.shape != a1.shape: raise ValueError('a and b must have the same shape') return _geneig(a1, b1, left, right, overwrite_a, overwrite_b) geev, geev_lwork = get_lapack_funcs(('geev', 'geev_lwork'), (a1,)) compute_vl, compute_vr = left, right lwork, info = geev_lwork(a1.shape[0], compute_vl=compute_vl, compute_vr=compute_vr) if info != 0: raise LinAlgError("internal *geev work array calculation failed: %d" % (info,)) lwork = int(lwork.real) if geev.typecode in 'cz': w, vl, vr, info = geev(a1, lwork=lwork, compute_vl=compute_vl, compute_vr=compute_vr, overwrite_a=overwrite_a) else: wr, wi, vl, vr, info = geev(a1, lwork=lwork, compute_vl=compute_vl, compute_vr=compute_vr, overwrite_a=overwrite_a) t = {'f':'F','d':'D'}[wr.dtype.char] w = wr + _I * wi if info < 0: raise ValueError('illegal value in %d-th argument of internal geev' % -info) if info > 0: raise LinAlgError("eig algorithm did not converge (only eigenvalues " "with order >= %d have converged)" % info) only_real = numpy.logical_and.reduce(numpy.equal(w.imag, 0.0)) if not (geev.typecode in 'cz' or only_real): t = w.dtype.char if left: vl = _make_complex_eigvecs(w, vl, t) if right: vr = _make_complex_eigvecs(w, vr, t) if not (left or right): return w if left: if right: return w, vl, vr return w, vl return w, vr def eigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False, overwrite_b=False, turbo=True, eigvals=None, type=1, check_finite=True): """ Solve an ordinary or generalized eigenvalue problem for a complex Hermitian or real symmetric matrix. Find eigenvalues w and optionally eigenvectors v of matrix `a`, where `b` is positive definite:: a v[:,i] = w[i] b v[:,i] v[i,:].conj() a v[:,i] = w[i] v[i,:].conj() b v[:,i] = 1 Parameters ---------- a : (M, M) array_like A complex Hermitian or real symmetric matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional A complex Hermitian or real symmetric definite positive matrix in. If omitted, identity matrix is assumed. lower : bool, optional Whether the pertinent array data is taken from the lower or upper triangle of `a`. (Default: lower) eigvals_only : bool, optional Whether to calculate only eigenvalues and no eigenvectors. (Default: both are calculated) turbo : bool, optional Use divide and conquer algorithm (faster but expensive in memory, only for generalized eigenvalue problem and if eigvals=None) eigvals : tuple (lo, hi), optional Indexes of the smallest and largest (in ascending order) eigenvalues and corresponding eigenvectors to be returned: 0 <= lo <= hi <= M-1. If omitted, all eigenvalues and eigenvectors are returned. type : int, optional Specifies the problem type to be solved: type = 1: a v[:,i] = w[i] b v[:,i] type = 2: a b v[:,i] = w[i] v[:,i] type = 3: b a v[:,i] = w[i] v[:,i] overwrite_a : bool, optional Whether to overwrite data in `a` (may improve performance) overwrite_b : bool, optional Whether to overwrite data in `b` (may improve performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (N,) float ndarray The N (1<=N<=M) selected eigenvalues, in ascending order, each repeated according to its multiplicity. v : (M, N) complex ndarray (if eigvals_only == False) The normalized selected eigenvector corresponding to the eigenvalue w[i] is the column v[:,i]. Normalization: type 1 and 3: v.conj() a v = w type 2: inv(v).conj() a inv(v) = w type = 1 or 2: v.conj() b v = I type = 3: v.conj() inv(b) v = I Raises ------ LinAlgError : If eigenvalue computation does not converge, an error occurred, or b matrix is not definite positive. Note that if input matrices are not symmetric or hermitian, no error is reported but results will be wrong. See Also -------- eig : eigenvalues and right eigenvectors for non-symmetric arrays """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') overwrite_a = overwrite_a or (_datacopied(a1, a)) if iscomplexobj(a1): cplx = True else: cplx = False if b is not None: b1 = _asarray_validated(b, check_finite=check_finite) overwrite_b = overwrite_b or _datacopied(b1, b) if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]: raise ValueError('expected square matrix') if b1.shape != a1.shape: raise ValueError("wrong b dimensions %s, should " "be %s" % (str(b1.shape), str(a1.shape))) if iscomplexobj(b1): cplx = True else: cplx = cplx or False else: b1 = None # Set job for fortran routines _job = (eigvals_only and 'N') or 'V' # port eigenvalue range from python to fortran convention if eigvals is not None: lo, hi = eigvals if lo < 0 or hi >= a1.shape[0]: raise ValueError('The eigenvalue range specified is not valid.\n' 'Valid range is [%s,%s]' % (0, a1.shape[0]-1)) lo += 1 hi += 1 eigvals = (lo, hi) # set lower if lower: uplo = 'L' else: uplo = 'U' # fix prefix for lapack routines if cplx: pfx = 'he' else: pfx = 'sy' # Standard Eigenvalue Problem # Use '*evr' routines # FIXME: implement calculation of optimal lwork # for all lapack routines if b1 is None: (evr,) = get_lapack_funcs((pfx+'evr',), (a1,)) if eigvals is None: w, v, info = evr(a1, uplo=uplo, jobz=_job, range="A", il=1, iu=a1.shape[0], overwrite_a=overwrite_a) else: (lo, hi) = eigvals w_tot, v, info = evr(a1, uplo=uplo, jobz=_job, range="I", il=lo, iu=hi, overwrite_a=overwrite_a) w = w_tot[0:hi-lo+1] # Generalized Eigenvalue Problem else: # Use '*gvx' routines if range is specified if eigvals is not None: (gvx,) = get_lapack_funcs((pfx+'gvx',), (a1,b1)) (lo, hi) = eigvals w_tot, v, ifail, info = gvx(a1, b1, uplo=uplo, iu=hi, itype=type,jobz=_job, il=lo, overwrite_a=overwrite_a, overwrite_b=overwrite_b) w = w_tot[0:hi-lo+1] # Use '*gvd' routine if turbo is on and no eigvals are specified elif turbo: (gvd,) = get_lapack_funcs((pfx+'gvd',), (a1,b1)) v, w, info = gvd(a1, b1, uplo=uplo, itype=type, jobz=_job, overwrite_a=overwrite_a, overwrite_b=overwrite_b) # Use '*gv' routine if turbo is off and no eigvals are specified else: (gv,) = get_lapack_funcs((pfx+'gv',), (a1,b1)) v, w, info = gv(a1, b1, uplo=uplo, itype=type, jobz=_job, overwrite_a=overwrite_a, overwrite_b=overwrite_b) # Check if we had a successful exit if info == 0: if eigvals_only: return w else: return w, v elif info < 0: raise LinAlgError("illegal value in %i-th argument of internal" " fortran routine." % (-info)) elif info > 0 and b1 is None: raise LinAlgError("unrecoverable internal error.") # The algorithm failed to converge. elif info > 0 and info <= b1.shape[0]: if eigvals is not None: raise LinAlgError("the eigenvectors %s failed to" " converge." % nonzero(ifail)-1) else: raise LinAlgError("internal fortran routine failed to converge: " "%i off-diagonal elements of an " "intermediate tridiagonal form did not converge" " to zero." % info) # This occurs when b is not positive definite else: raise LinAlgError("the leading minor of order %i" " of 'b' is not positive definite. The" " factorization of 'b' could not be completed" " and no eigenvalues or eigenvectors were" " computed." % (info-b1.shape[0])) def eig_banded(a_band, lower=False, eigvals_only=False, overwrite_a_band=False, select='a', select_range=None, max_ev=0, check_finite=True): """ Solve real symmetric or complex hermitian band matrix eigenvalue problem. Find eigenvalues w and optionally right eigenvectors v of a:: a v[:,i] = w[i] v[:,i] v.H v = identity The matrix a is stored in a_band either in lower diagonal or upper diagonal ordered form: a_band[u + i - j, j] == a[i,j] (if upper form; i <= j) a_band[ i - j, j] == a[i,j] (if lower form; i >= j) where u is the number of bands above the diagonal. Example of a_band (shape of a is (6,6), u=2):: upper form: * * a02 a13 a24 a35 * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 lower form: a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Cells marked with * are not used. Parameters ---------- a_band : (u+1, M) array_like The bands of the M by M matrix a. lower : bool, optional Is the matrix in the lower form. (Default is upper form) eigvals_only : bool, optional Compute only the eigenvalues and no eigenvectors. (Default: calculate also eigenvectors) overwrite_a_band : bool, optional Discard data in a_band (may enhance performance) select : {'a', 'v', 'i'}, optional Which eigenvalues to calculate ====== ======================================== select calculated ====== ======================================== 'a' All eigenvalues 'v' Eigenvalues in the interval (min, max] 'i' Eigenvalues with indices min <= i <= max ====== ======================================== select_range : (min, max), optional Range of selected eigenvalues max_ev : int, optional For select=='v', maximum number of eigenvalues expected. For other values of select, has no meaning. In doubt, leave this parameter untouched. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (M,) ndarray The eigenvalues, in ascending order, each repeated according to its multiplicity. v : (M, M) float or complex ndarray The normalized eigenvector corresponding to the eigenvalue w[i] is the column v[:,i]. Raises LinAlgError if eigenvalue computation does not converge """ if eigvals_only or overwrite_a_band: a1 = _asarray_validated(a_band, check_finite=check_finite) overwrite_a_band = overwrite_a_band or (_datacopied(a1, a_band)) else: a1 = array(a_band) if issubclass(a1.dtype.type, inexact) and not isfinite(a1).all(): raise ValueError("array must not contain infs or NaNs") overwrite_a_band = 1 if len(a1.shape) != 2: raise ValueError('expected two-dimensional array') if select.lower() not in [0, 1, 2, 'a', 'v', 'i', 'all', 'value', 'index']: raise ValueError('invalid argument for select') if select.lower() in [0, 'a', 'all']: if a1.dtype.char in 'GFD': bevd, = get_lapack_funcs(('hbevd',), (a1,)) # FIXME: implement this somewhen, for now go with builtin values # FIXME: calc optimal lwork by calling ?hbevd(lwork=-1) # or by using calc_lwork.f ??? # lwork = calc_lwork.hbevd(bevd.typecode, a1.shape[0], lower) internal_name = 'hbevd' else: # a1.dtype.char in 'fd': bevd, = get_lapack_funcs(('sbevd',), (a1,)) # FIXME: implement this somewhen, for now go with builtin values # see above # lwork = calc_lwork.sbevd(bevd.typecode, a1.shape[0], lower) internal_name = 'sbevd' w,v,info = bevd(a1, compute_v=not eigvals_only, lower=lower, overwrite_ab=overwrite_a_band) if select.lower() in [1, 2, 'i', 'v', 'index', 'value']: # calculate certain range only if select.lower() in [2, 'i', 'index']: select = 2 vl, vu, il, iu = 0.0, 0.0, min(select_range), max(select_range) if min(il, iu) < 0 or max(il, iu) >= a1.shape[1]: raise ValueError('select_range out of bounds') max_ev = iu - il + 1 else: # 1, 'v', 'value' select = 1 vl, vu, il, iu = min(select_range), max(select_range), 0, 0 if max_ev == 0: max_ev = a_band.shape[1] if eigvals_only: max_ev = 1 # calculate optimal abstol for dsbevx (see manpage) if a1.dtype.char in 'fF': # single precision lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='f'),)) else: lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='d'),)) abstol = 2 * lamch('s') if a1.dtype.char in 'GFD': bevx, = get_lapack_funcs(('hbevx',), (a1,)) internal_name = 'hbevx' else: # a1.dtype.char in 'gfd' bevx, = get_lapack_funcs(('sbevx',), (a1,)) internal_name = 'sbevx' # il+1, iu+1: translate python indexing (0 ... N-1) into Fortran # indexing (1 ... N) w, v, m, ifail, info = bevx(a1, vl, vu, il+1, iu+1, compute_v=not eigvals_only, mmax=max_ev, range=select, lower=lower, overwrite_ab=overwrite_a_band, abstol=abstol) # crop off w and v w = w[:m] if not eigvals_only: v = v[:, :m] if info < 0: raise ValueError('illegal value in %d-th argument of internal %s' % (-info, internal_name)) if info > 0: raise LinAlgError("eig algorithm did not converge") if eigvals_only: return w return w, v def eigvals(a, b=None, overwrite_a=False, check_finite=True): """ Compute eigenvalues from an ordinary or generalized eigenvalue problem. Find eigenvalues of a general matrix:: a vr[:,i] = w[i] b vr[:,i] Parameters ---------- a : (M, M) array_like A complex or real matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional Right-hand side matrix in a generalized eigenvalue problem. If omitted, identity matrix is assumed. overwrite_a : bool, optional Whether to overwrite data in a (may improve performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (M,) double or complex ndarray The eigenvalues, each repeated according to its multiplicity, but not in any specific order. Raises ------ LinAlgError If eigenvalue computation does not converge See Also -------- eigvalsh : eigenvalues of symmetric or Hermitian arrays, eig : eigenvalues and right eigenvectors of general arrays. eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. """ return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a, check_finite=check_finite) def eigvalsh(a, b=None, lower=True, overwrite_a=False, overwrite_b=False, turbo=True, eigvals=None, type=1, check_finite=True): """ Solve an ordinary or generalized eigenvalue problem for a complex Hermitian or real symmetric matrix. Find eigenvalues w of matrix a, where b is positive definite:: a v[:,i] = w[i] b v[:,i] v[i,:].conj() a v[:,i] = w[i] v[i,:].conj() b v[:,i] = 1 Parameters ---------- a : (M, M) array_like A complex Hermitian or real symmetric matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional A complex Hermitian or real symmetric definite positive matrix in. If omitted, identity matrix is assumed. lower : bool, optional Whether the pertinent array data is taken from the lower or upper triangle of `a`. (Default: lower) turbo : bool, optional Use divide and conquer algorithm (faster but expensive in memory, only for generalized eigenvalue problem and if eigvals=None) eigvals : tuple (lo, hi), optional Indexes of the smallest and largest (in ascending order) eigenvalues and corresponding eigenvectors to be returned: 0 <= lo < hi <= M-1. If omitted, all eigenvalues and eigenvectors are returned. type : int, optional Specifies the problem type to be solved: type = 1: a v[:,i] = w[i] b v[:,i] type = 2: a b v[:,i] = w[i] v[:,i] type = 3: b a v[:,i] = w[i] v[:,i] overwrite_a : bool, optional Whether to overwrite data in `a` (may improve performance) overwrite_b : bool, optional Whether to overwrite data in `b` (may improve performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (N,) float ndarray The N (1<=N<=M) selected eigenvalues, in ascending order, each repeated according to its multiplicity. Raises ------ LinAlgError : If eigenvalue computation does not converge, an error occurred, or b matrix is not definite positive. Note that if input matrices are not symmetric or hermitian, no error is reported but results will be wrong. See Also -------- eigvals : eigenvalues of general arrays eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays eig : eigenvalues and right eigenvectors for non-symmetric arrays """ return eigh(a, b=b, lower=lower, eigvals_only=True, overwrite_a=overwrite_a, overwrite_b=overwrite_b, turbo=turbo, eigvals=eigvals, type=type, check_finite=check_finite) def eigvals_banded(a_band, lower=False, overwrite_a_band=False, select='a', select_range=None, check_finite=True): """ Solve real symmetric or complex hermitian band matrix eigenvalue problem. Find eigenvalues w of a:: a v[:,i] = w[i] v[:,i] v.H v = identity The matrix a is stored in a_band either in lower diagonal or upper diagonal ordered form: a_band[u + i - j, j] == a[i,j] (if upper form; i <= j) a_band[ i - j, j] == a[i,j] (if lower form; i >= j) where u is the number of bands above the diagonal. Example of a_band (shape of a is (6,6), u=2):: upper form: * * a02 a13 a24 a35 * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 lower form: a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Cells marked with * are not used. Parameters ---------- a_band : (u+1, M) array_like The bands of the M by M matrix a. lower : bool, optional Is the matrix in the lower form. (Default is upper form) overwrite_a_band : bool, optional Discard data in a_band (may enhance performance) select : {'a', 'v', 'i'}, optional Which eigenvalues to calculate ====== ======================================== select calculated ====== ======================================== 'a' All eigenvalues 'v' Eigenvalues in the interval (min, max] 'i' Eigenvalues with indices min <= i <= max ====== ======================================== select_range : (min, max), optional Range of selected eigenvalues check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (M,) ndarray The eigenvalues, in ascending order, each repeated according to its multiplicity. Raises LinAlgError if eigenvalue computation does not converge See Also -------- eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian band matrices eigvals : eigenvalues of general arrays eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays eig : eigenvalues and right eigenvectors for non-symmetric arrays """ return eig_banded(a_band, lower=lower, eigvals_only=1, overwrite_a_band=overwrite_a_band, select=select, select_range=select_range, check_finite=check_finite) _double_precision = ['i','l','d'] def hessenberg(a, calc_q=False, overwrite_a=False, check_finite=True): """ Compute Hessenberg form of a matrix. The Hessenberg decomposition is:: A = Q H Q^H where `Q` is unitary/orthogonal and `H` has only zero elements below the first sub-diagonal. Parameters ---------- a : (M, M) array_like Matrix to bring into Hessenberg form. calc_q : bool, optional Whether to compute the transformation matrix. Default is False. overwrite_a : bool, optional Whether to overwrite `a`; may improve performance. Default is False. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- H : (M, M) ndarray Hessenberg form of `a`. Q : (M, M) ndarray Unitary/orthogonal similarity transformation matrix ``A = Q H Q^H``. Only returned if ``calc_q=True``. """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): raise ValueError('expected square matrix') overwrite_a = overwrite_a or (_datacopied(a1, a)) # if 2x2 or smaller: already in Hessenberg if a1.shape[0] <= 2: if calc_q: return a1, numpy.eye(a1.shape[0]) return a1 gehrd, gebal, gehrd_lwork = get_lapack_funcs(('gehrd','gebal', 'gehrd_lwork'), (a1,)) ba, lo, hi, pivscale, info = gebal(a1, permute=0, overwrite_a=overwrite_a) if info < 0: raise ValueError('illegal value in %d-th argument of internal gebal ' '(hessenberg)' % -info) n = len(a1) lwork, info = gehrd_lwork(ba.shape[0], lo=lo, hi=hi) if info != 0: raise ValueError('failed to compute internal gehrd work array size. ' 'LAPACK info = %d ' % info) lwork = int(lwork.real) hq, tau, info = gehrd(ba, lo=lo, hi=hi, lwork=lwork, overwrite_a=1) if info < 0: raise ValueError('illegal value in %d-th argument of internal gehrd ' '(hessenberg)' % -info) h = numpy.triu(hq, -1) if not calc_q: return h # use orghr/unghr to compute q orghr, orghr_lwork = get_lapack_funcs(('orghr', 'orghr_lwork'), (a1,)) lwork, info = orghr_lwork(n, lo=lo, hi=hi) if info != 0: raise ValueError('failed to compute internal orghr work array size. ' 'LAPACK info = %d ' % info) lwork = int(lwork.real) q, info = orghr(a=hq, tau=tau, lo=lo, hi=hi, lwork=lwork, overwrite_a=1) if info < 0: raise ValueError('illegal value in %d-th argument of internal orghr ' '(hessenberg)' % -info) return h, q
unknown
codeparrot/codeparrot-clean
import json from .utils import identity, compose, true class Storage(object): r""" >>> import tempfile >>> import operator >>> tablename = tempfile.NamedTemporaryFile(delete=False).name >>> storage = Storage() >>> data = {1: 'foo'} >>> storage._to_fmt(None)(data) {1: 'foo'} >>> storage._to_fmt('json')(data) {u'1': u'foo'} >>> storage._format('foo') 'foo\n' >>> storage._format(['foo', 'bar']) 'foo\nbar\n' >>> storage.dumps(data, fmt='json') '{"1": "foo"}\n' >>> object_id = lambda x: operator.__getitem__(x, 'id') >>> storage.dump([{'id': 1, 'x': 'a'}, {'id': 2, 'x': 'a'}], tablename, ... fmt='json', unique=object_id) >>> storage.load(tablename, fmt='json') [{u'x': u'a', u'id': 1}, {u'x': u'a', u'id': 2}] >>> storage.load(tablename, fmt='json', func=object_id) [1, 2] >>> storage.dump([{'id': 1, 'x': 'b'}, {'id': 3, 'x': 'b'}], tablename, ... fmt='json', unique=object_id) >>> storage.load(tablename, fmt='json', func=lambda x: (x['id'], x['x'])) [(1, u'a'), (2, u'a'), (3, u'b')] """ def _serializer(self, fmt): return { None: identity, 'json': json.dumps }.get(fmt, identity) def _deserializer(self, fmt): return { None: identity, 'json': json.loads }.get(fmt, identity) def _to_fmt(self, fmt): serializer = self._serializer(fmt) deserializer = self._deserializer(fmt) return lambda x: deserializer(serializer(x)) def load(self, table, fmt, func=None): deserializer = self._deserializer(fmt) functor = compose(deserializer, func or identity) try: with open(table, 'r') as storage: return map(functor, storage.readlines()) except (IOError, ValueError): # if file is # * not existing # * empty/corrupted # return no data return [] def _filter(self, table, fmt, unique): if unique: uniques = set(self.load(table, fmt, func=unique) if unique else []) return compose(compose(self._to_fmt(fmt), unique), lambda x: x not in uniques) else: return true def _format(self, content): if not isinstance(content, basestring): content = '\n'.join(content) # always end table storage by a newline for least surprise principle if content and not content.endswith('\n'): content += '\n' return content def dumps(self, content, fmt, exclude=None): if not isinstance(content, (list, tuple)): content = [content] content = map(self._serializer(fmt), filter(exclude, content)) return self._format(content) def mode(self, override): return 'w' if override else 'a' def dump(self, content, table, fmt=None, unique=None, override=False): content = self.dumps(content, fmt, exclude=self._filter(table, fmt, unique)) with open(table, self.mode(override)) as table_storage: table_storage.write(content)
unknown
codeparrot/codeparrot-clean
## Input ```javascript // @validateMemoizedEffectDependencies function Component(props) { // Items cannot be memoized bc its mutation spans a hook call const items = [props.value]; const [state, _setState] = useState(null); mutate(items); // Items is no longer mutable here, but it hasn't been memoized useEffect(() => { console.log(items); }, [items]); return [items, state]; } ``` ## Error ``` Found 1 error: Compilation Skipped: React Compiler has skipped optimizing this component because the effect dependencies could not be memoized. Unmemoized effect dependencies can trigger an infinite loop or other unexpected behavior error.invalid-useEffect-dep-not-memoized-bc-range-overlaps-hook.ts:9:2 7 | 8 | // Items is no longer mutable here, but it hasn't been memoized > 9 | useEffect(() => { | ^^^^^^^^^^^^^^^^^ > 10 | console.log(items); | ^^^^^^^^^^^^^^^^^^^^^^^ > 11 | }, [items]); | ^^^^^^^^^^^^^^ React Compiler has skipped optimizing this component because the effect dependencies could not be memoized. Unmemoized effect dependencies can trigger an infinite loop or other unexpected behavior 12 | 13 | return [items, state]; 14 | } ```
unknown
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.invalid-useEffect-dep-not-memoized-bc-range-overlaps-hook.expect.md
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert FastSpeech2Conformer checkpoint.""" import argparse import torch from transformers import ( FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerModel, FastSpeech2ConformerWithHifiGan, FastSpeech2ConformerWithHifiGanConfig, logging, ) from .convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch import ( convert_espnet_state_dict_to_hf, remap_model_yaml_config, ) from .convert_hifigan import load_weights, remap_hifigan_yaml_config logging.set_verbosity_info() logger = logging.get_logger("transformers.models.FastSpeech2Conformer") def convert_FastSpeech2ConformerWithHifiGan_checkpoint( checkpoint_path, yaml_config_path, pytorch_dump_folder_path, repo_id=None, ): # Prepare the model model_params, *_ = remap_model_yaml_config(yaml_config_path) model_config = FastSpeech2ConformerConfig(**model_params) model = FastSpeech2ConformerModel(model_config) espnet_checkpoint = torch.load(checkpoint_path, weights_only=True) hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint) model.load_state_dict(hf_compatible_state_dict) # Prepare the vocoder config_kwargs = remap_hifigan_yaml_config(yaml_config_path) vocoder_config = FastSpeech2ConformerHifiGanConfig(**config_kwargs) vocoder = FastSpeech2ConformerHifiGan(vocoder_config) load_weights(espnet_checkpoint, vocoder, vocoder_config) # Prepare the model + vocoder config = FastSpeech2ConformerWithHifiGanConfig(model_config, vocoder_config) with_hifigan_model = FastSpeech2ConformerWithHifiGan(config) with_hifigan_model.model = model with_hifigan_model.vocoder = vocoder with_hifigan_model.save_pretrained(pytorch_dump_folder_path) if repo_id: print("Pushing to the hub...") with_hifigan_model.push_to_hub(repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument( "--yaml_config_path", required=True, default=None, type=str, help="Path to config.yaml of model to convert" ) parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output `FastSpeech2ConformerModel` PyTorch model.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the Hugging Face hub." ) args = parser.parse_args() convert_FastSpeech2ConformerWithHifiGan_checkpoint( args.checkpoint_path, args.yaml_config_path, args.pytorch_dump_folder_path, args.push_to_hub, )
python
github
https://github.com/huggingface/transformers
src/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py
__all__ = [ "assert_nodes_equal", "assert_edges_equal", "assert_graphs_equal", "almost_equal", ] def almost_equal(x, y, places=7): return round(abs(x - y), places) == 0 def assert_nodes_equal(nodes1, nodes2): # Assumes iterables of nodes, or (node,datadict) tuples nlist1 = list(nodes1) nlist2 = list(nodes2) try: d1 = dict(nlist1) d2 = dict(nlist2) except (ValueError, TypeError): d1 = dict.fromkeys(nlist1) d2 = dict.fromkeys(nlist2) assert d1 == d2 def assert_edges_equal(edges1, edges2): # Assumes iterables with u,v nodes as # edge tuples (u,v), or # edge tuples with data dicts (u,v,d), or # edge tuples with keys and data dicts (u,v,k, d) from collections import defaultdict d1 = defaultdict(dict) d2 = defaultdict(dict) c1 = 0 for c1, e in enumerate(edges1): u, v = e[0], e[1] data = [e[2:]] if v in d1[u]: data = d1[u][v] + data d1[u][v] = data d1[v][u] = data c2 = 0 for c2, e in enumerate(edges2): u, v = e[0], e[1] data = [e[2:]] if v in d2[u]: data = d2[u][v] + data d2[u][v] = data d2[v][u] = data assert c1 == c2 # can check one direction because lengths are the same. for n, nbrdict in d1.items(): for nbr, datalist in nbrdict.items(): assert n in d2 assert nbr in d2[n] d2datalist = d2[n][nbr] for data in datalist: assert datalist.count(data) == d2datalist.count(data) def assert_graphs_equal(graph1, graph2): assert graph1.adj == graph2.adj assert graph1.nodes == graph2.nodes assert graph1.graph == graph2.graph
unknown
codeparrot/codeparrot-clean
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PERMUTER_H_ #define TENSORFLOW_CORE_COMMON_RUNTIME_PERMUTER_H_ #include <deque> #include <memory> #include <string> #include <vector> #include "tensorflow/core/common_runtime/base_collective_executor.h" #include "tensorflow/core/framework/collective.h" namespace tensorflow { class Device; // Implementation of collective permute. // // Permute takes // - a list of devices participating in the collective // - a permutation as a list of integers. // - a tensor // // The list of devices replaces the need for group_key and group_size. The // number of inputs only scales with the number of devices within one group. // // The integers in the permutation are based on indices of the list of devices. // E.g. devices = {"GPU:0", "GPU:1"} and permutation = {1,0} means // - devices[0] sends to devices[permutation[0]] and // - devices[1] sends to devices[permutation[1]]. // // Each device sends exactly one tensor and receives exactly one tensor. class Permuter : public CollectiveImplementationInterface { public: Permuter(); ~Permuter() override = default; void Run(StatusCallback done) override; absl::Status InitializeCollectiveParams( CollectiveParams* col_params) override { return absl::OkStatus(); } // Initializes members of CollectiveContext not yet initialized, i.e. device // and device_locality. Also saves the CollectiveContext in this object. absl::Status InitializeCollectiveContext( std::shared_ptr<CollectiveContext> col_ctx) override; private: std::shared_ptr<CollectiveContext> col_ctx_; const CollectiveParams* col_params_; // Not owned StatusCallback done_; mutex mu_; absl::Status status_ TF_GUARDED_BY(mu_); int counter_ TF_GUARDED_BY(mu_); void DispatchSend(int src_rank, int target_rank, const Tensor* tensor, const StatusCallback& done); void DispatchRecv(int src_rank, int target_rank, Tensor* tensor, const StatusCallback& done); // Atomically increments counter_ by one for sending, one for receiving. // Invokes done when counter_ reaches 2. // The purpose of checking counter_ is to ensure that done_ is called once. StatusCallback CheckCounterAndCallDone(); }; } // namespace tensorflow #endif // TENSORFLOW_CORE_COMMON_RUNTIME_PERMUTER_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/core/common_runtime/permuter.h
// Copyright(c) 2015-present, Gabi Melman & spdlog contributors. // Distributed under the MIT License (http://opensource.org/licenses/MIT) #pragma once #include <spdlog/details/log_msg.h> #include <spdlog/formatter.h> namespace spdlog { namespace sinks { class SPDLOG_API sink { public: virtual ~sink() = default; virtual void log(const details::log_msg &msg) = 0; virtual void flush() = 0; virtual void set_pattern(const std::string &pattern) = 0; virtual void set_formatter(std::unique_ptr<spdlog::formatter> sink_formatter) = 0; void set_level(level::level_enum log_level); level::level_enum level() const; bool should_log(level::level_enum msg_level) const; protected: // sink log level - default is all level_t level_{level::trace}; }; } // namespace sinks } // namespace spdlog #ifdef SPDLOG_HEADER_ONLY #include "sink-inl.h" #endif
c
github
https://github.com/nodejs/node
deps/LIEF/third-party/spdlog/include/spdlog/sinks/sink.h
/*------------------------------------------------------------------------- * * jsonbsubs.c * Subscripting support functions for jsonb. * * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/utils/adt/jsonbsubs.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "executor/execExpr.h" #include "nodes/nodeFuncs.h" #include "nodes/subscripting.h" #include "parser/parse_coerce.h" #include "parser/parse_expr.h" #include "utils/builtins.h" #include "utils/jsonb.h" /* SubscriptingRefState.workspace for jsonb subscripting execution */ typedef struct JsonbSubWorkspace { bool expectArray; /* jsonb root is expected to be an array */ Oid *indexOid; /* OID of coerced subscript expression, could * be only integer or text */ Datum *index; /* Subscript values in Datum format */ } JsonbSubWorkspace; /* * Finish parse analysis of a SubscriptingRef expression for a jsonb. * * Transform the subscript expressions, coerce them to text, * and determine the result type of the SubscriptingRef node. */ static void jsonb_subscript_transform(SubscriptingRef *sbsref, List *indirection, ParseState *pstate, bool isSlice, bool isAssignment) { List *upperIndexpr = NIL; ListCell *idx; /* * Transform and convert the subscript expressions. Jsonb subscripting * does not support slices, look only at the upper index. */ foreach(idx, indirection) { A_Indices *ai = lfirst_node(A_Indices, idx); Node *subExpr; if (isSlice) { Node *expr = ai->uidx ? ai->uidx : ai->lidx; ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("jsonb subscript does not support slices"), parser_errposition(pstate, exprLocation(expr)))); } if (ai->uidx) { Oid subExprType = InvalidOid, targetType = UNKNOWNOID; subExpr = transformExpr(pstate, ai->uidx, pstate->p_expr_kind); subExprType = exprType(subExpr); if (subExprType != UNKNOWNOID) { Oid targets[2] = {INT4OID, TEXTOID}; /* * Jsonb can handle multiple subscript types, but cases when a * subscript could be coerced to multiple target types must be * avoided, similar to overloaded functions. It could be * possibly extend with jsonpath in the future. */ for (int i = 0; i < 2; i++) { if (can_coerce_type(1, &subExprType, &targets[i], COERCION_IMPLICIT)) { /* * One type has already succeeded, it means there are * two coercion targets possible, failure. */ if (targetType != UNKNOWNOID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("subscript type %s is not supported", format_type_be(subExprType)), errhint("jsonb subscript must be coercible to only one type, integer or text."), parser_errposition(pstate, exprLocation(subExpr)))); targetType = targets[i]; } } /* * No suitable types were found, failure. */ if (targetType == UNKNOWNOID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("subscript type %s is not supported", format_type_be(subExprType)), errhint("jsonb subscript must be coercible to either integer or text."), parser_errposition(pstate, exprLocation(subExpr)))); } else targetType = TEXTOID; /* * We known from can_coerce_type that coercion will succeed, so * coerce_type could be used. Note the implicit coercion context, * which is required to handle subscripts of different types, * similar to overloaded functions. */ subExpr = coerce_type(pstate, subExpr, subExprType, targetType, -1, COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1); if (subExpr == NULL) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("jsonb subscript must have text type"), parser_errposition(pstate, exprLocation(subExpr)))); } else { /* * Slice with omitted upper bound. Should not happen as we already * errored out on slice earlier, but handle this just in case. */ Assert(isSlice && ai->is_slice); ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("jsonb subscript does not support slices"), parser_errposition(pstate, exprLocation(ai->uidx)))); } upperIndexpr = lappend(upperIndexpr, subExpr); } /* store the transformed lists into the SubscriptingRef node */ sbsref->refupperindexpr = upperIndexpr; sbsref->reflowerindexpr = NIL; /* Determine the result type of the subscripting operation; always jsonb */ sbsref->refrestype = JSONBOID; sbsref->reftypmod = -1; } /* * During execution, process the subscripts in a SubscriptingRef expression. * * The subscript expressions are already evaluated in Datum form in the * SubscriptingRefState's arrays. Check and convert them as necessary. * * If any subscript is NULL, we throw error in assignment cases, or in fetch * cases set result to NULL and return false (instructing caller to skip the * rest of the SubscriptingRef sequence). */ static bool jsonb_subscript_check_subscripts(ExprState *state, ExprEvalStep *op, ExprContext *econtext) { SubscriptingRefState *sbsrefstate = op->d.sbsref_subscript.state; JsonbSubWorkspace *workspace = (JsonbSubWorkspace *) sbsrefstate->workspace; /* * In case if the first subscript is an integer, the source jsonb is * expected to be an array. This information is not used directly, all * such cases are handled within corresponding jsonb assign functions. But * if the source jsonb is NULL the expected type will be used to construct * an empty source. */ if (sbsrefstate->numupper > 0 && sbsrefstate->upperprovided[0] && !sbsrefstate->upperindexnull[0] && workspace->indexOid[0] == INT4OID) workspace->expectArray = true; /* Process upper subscripts */ for (int i = 0; i < sbsrefstate->numupper; i++) { if (sbsrefstate->upperprovided[i]) { /* If any index expr yields NULL, result is NULL or error */ if (sbsrefstate->upperindexnull[i]) { if (sbsrefstate->isassignment) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("jsonb subscript in assignment must not be null"))); *op->resnull = true; return false; } /* * For jsonb fetch and assign functions we need to provide path in * text format. Convert if it's not already text. */ if (workspace->indexOid[i] == INT4OID) { Datum datum = sbsrefstate->upperindex[i]; char *cs = DatumGetCString(DirectFunctionCall1(int4out, datum)); workspace->index[i] = CStringGetTextDatum(cs); } else workspace->index[i] = sbsrefstate->upperindex[i]; } } return true; } /* * Evaluate SubscriptingRef fetch for a jsonb element. * * Source container is in step's result variable (it's known not NULL, since * we set fetch_strict to true). */ static void jsonb_subscript_fetch(ExprState *state, ExprEvalStep *op, ExprContext *econtext) { SubscriptingRefState *sbsrefstate = op->d.sbsref.state; JsonbSubWorkspace *workspace = (JsonbSubWorkspace *) sbsrefstate->workspace; Jsonb *jsonbSource; /* Should not get here if source jsonb (or any subscript) is null */ Assert(!(*op->resnull)); jsonbSource = DatumGetJsonbP(*op->resvalue); *op->resvalue = jsonb_get_element(jsonbSource, workspace->index, sbsrefstate->numupper, op->resnull, false); } /* * Evaluate SubscriptingRef assignment for a jsonb element assignment. * * Input container (possibly null) is in result area, replacement value is in * SubscriptingRefState's replacevalue/replacenull. */ static void jsonb_subscript_assign(ExprState *state, ExprEvalStep *op, ExprContext *econtext) { SubscriptingRefState *sbsrefstate = op->d.sbsref.state; JsonbSubWorkspace *workspace = (JsonbSubWorkspace *) sbsrefstate->workspace; Jsonb *jsonbSource; JsonbValue replacevalue; if (sbsrefstate->replacenull) replacevalue.type = jbvNull; else JsonbToJsonbValue(DatumGetJsonbP(sbsrefstate->replacevalue), &replacevalue); /* * In case if the input container is null, set up an empty jsonb and * proceed with the assignment. */ if (*op->resnull) { JsonbValue newSource; /* * To avoid any surprising results, set up an empty jsonb array in * case of an array is expected (i.e. the first subscript is integer), * otherwise jsonb object. */ if (workspace->expectArray) { newSource.type = jbvArray; newSource.val.array.nElems = 0; newSource.val.array.rawScalar = false; } else { newSource.type = jbvObject; newSource.val.object.nPairs = 0; } jsonbSource = JsonbValueToJsonb(&newSource); *op->resnull = false; } else jsonbSource = DatumGetJsonbP(*op->resvalue); *op->resvalue = jsonb_set_element(jsonbSource, workspace->index, sbsrefstate->numupper, &replacevalue); /* The result is never NULL, so no need to change *op->resnull */ } /* * Compute old jsonb element value for a SubscriptingRef assignment * expression. Will only be called if the new-value subexpression * contains SubscriptingRef or FieldStore. This is the same as the * regular fetch case, except that we have to handle a null jsonb, * and the value should be stored into the SubscriptingRefState's * prevvalue/prevnull fields. */ static void jsonb_subscript_fetch_old(ExprState *state, ExprEvalStep *op, ExprContext *econtext) { SubscriptingRefState *sbsrefstate = op->d.sbsref.state; if (*op->resnull) { /* whole jsonb is null, so any element is too */ sbsrefstate->prevvalue = (Datum) 0; sbsrefstate->prevnull = true; } else { Jsonb *jsonbSource = DatumGetJsonbP(*op->resvalue); sbsrefstate->prevvalue = jsonb_get_element(jsonbSource, sbsrefstate->upperindex, sbsrefstate->numupper, &sbsrefstate->prevnull, false); } } /* * Set up execution state for a jsonb subscript operation. Opposite to the * arrays subscription, there is no limit for number of subscripts as jsonb * type itself doesn't have nesting limits. */ static void jsonb_exec_setup(const SubscriptingRef *sbsref, SubscriptingRefState *sbsrefstate, SubscriptExecSteps *methods) { JsonbSubWorkspace *workspace; ListCell *lc; int nupper = sbsref->refupperindexpr->length; char *ptr; /* Allocate type-specific workspace with space for per-subscript data */ workspace = palloc0(MAXALIGN(sizeof(JsonbSubWorkspace)) + nupper * (sizeof(Datum) + sizeof(Oid))); workspace->expectArray = false; ptr = ((char *) workspace) + MAXALIGN(sizeof(JsonbSubWorkspace)); /* * This coding assumes sizeof(Datum) >= sizeof(Oid), else we might * misalign the indexOid pointer */ workspace->index = (Datum *) ptr; ptr += nupper * sizeof(Datum); workspace->indexOid = (Oid *) ptr; sbsrefstate->workspace = workspace; /* Collect subscript data types necessary at execution time */ foreach(lc, sbsref->refupperindexpr) { Node *expr = lfirst(lc); int i = foreach_current_index(lc); workspace->indexOid[i] = exprType(expr); } /* * Pass back pointers to appropriate step execution functions. */ methods->sbs_check_subscripts = jsonb_subscript_check_subscripts; methods->sbs_fetch = jsonb_subscript_fetch; methods->sbs_assign = jsonb_subscript_assign; methods->sbs_fetch_old = jsonb_subscript_fetch_old; } /* * jsonb_subscript_handler * Subscripting handler for jsonb. * */ Datum jsonb_subscript_handler(PG_FUNCTION_ARGS) { static const SubscriptRoutines sbsroutines = { .transform = jsonb_subscript_transform, .exec_setup = jsonb_exec_setup, .fetch_strict = true, /* fetch returns NULL for NULL inputs */ .fetch_leakproof = true, /* fetch returns NULL for bad subscript */ .store_leakproof = false /* ... but assignment throws error */ }; PG_RETURN_POINTER(&sbsroutines); }
c
github
https://github.com/postgres/postgres
src/backend/utils/adt/jsonbsubs.c
import datetime from parsley import makeGrammar as make_grammar, _GrammarWrapper __version__ = "0.1.0" class Duplicated(Exception): """ A key group was duplicated, or contained a duplicate value. """ @classmethod def in_group(cls, key, key_group=None): group_str = "the document" if key_group is None else repr(key_group) return cls("%r already appears in %s." % (key, group_str)) toml_grammar = r""" document = key_group*:groups -> document(groups) key_group = (header_line:header value_line*:values | (-> []):header value_line+:values) ignore -> header, values header_line = ignore '[' key_name:name ']' line_end -> name key_name = key_segment:first ('.' key_segment)*:rest -> [first] + rest key_segment = <(~('[' | ']' | '.') anything)+> value_line = ~header_line ignore name:k ws '=' ws value:v line_end -> (k, v) name = <(~(space | '=' | nl) anything)+> value = string | datetime | float | integer | boolean | array array = '[' ignore elements:members ignore ']' -> self.array(members) elements = (value:first (ignore ',' ignore value)*:rest ','? -> [first] + rest) | -> [] string = '"' (escape_char | ~('"' | '\\') anything)*:c '"' -> ''.join(c).decode("utf-8") escape_char = '\\' (('0' -> '\0') |('b' -> '\b') |('t' -> '\t') |('n' -> '\n') |('f' -> '\f') |('r' -> '\r') |('"' -> '"') |('\\' -> '\\') |('/' -> '/') |escape_unichar) escape_unichar = 'u' <hexdigit{4}>:hs -> unichr(int(hs, 16)).encode("utf-8") integer = ('-' | -> ''):sign digit1_9:first <digit*>:rest -> int(sign + first + rest) float = integer:whole '.' <digit+>:frac -> float(str(whole) + "." + frac) boolean = ('true' -> True) | ('false' -> False) datetime = (digit1_9:first digit{3}:rest -> "".join([first] + rest)):year '-' digit{2}:month '-' digit{2}:day 'T' digit{2}:hour ':' digit{2}:minute ':' digit{2}:second (('.' digit+) | -> 0):microsecond 'Z' -> datetime( year=int("".join(year)), month=int("".join(month)), day=int("".join(day)), hour=int("".join(hour)), minute=int("".join(minute)), second=int("".join(second)), ) line_end = ws comment? nl ignore = (comment | space | nl)* comment = '#' (~'\n' anything)* ws = space* space = ' ' | '\t' nl = '\r\n' | '\r' | '\n' digit1_9 = :x ?(x in '123456789') -> x hexdigit = :x ?(x in '0123456789abcdefABCDEF') -> x """ def document(groups): doc = dict() for header, values in sorted(groups): key_group, subgroup = doc, None if header: path, key = header[:-1], header[-1] for subgroup in path: key_group = key_group.setdefault(subgroup, {}) if key in key_group: raise Duplicated.in_group(key, subgroup) key_group[key] = key_group = {} for key, value in values: if key in key_group: raise Duplicated.in_group(key, subgroup) key_group[key] = value return doc _TOMLParser = make_grammar( toml_grammar, bindings={"document" : document, "datetime" : datetime.datetime}, name="TOMLParser", unwrap=True, ) class TOMLParser(_TOMLParser): """ A TOML parser. """ def __init__(self, toml, homogeneous_arrays=True): """ Initialize me. :argument str toml: some TOML :argument bool homogeneous_arrays: enfore homogeneity of array members """ super(TOMLParser, self).__init__(toml) self.homogeneous_arrays = homogeneous_arrays def array(self, members): if self.homogeneous_arrays and len(set(type(e) for e in members)) > 1: raise TypeError("%r is not homogeneous." % (members,)) return members def loads(toml, **kwargs): """ Load some ``TOML`` from a string. :argument kwargs: passed along to :class:`TOMLParser` """ return _GrammarWrapper(TOMLParser(toml, **kwargs), toml).document()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """Module providing JSON storage for static asset assignanvts""" from plone.autoform.interfaces import IFormFieldProvider from plone.dexterity.interfaces import IDexterityContent from plone.supermodel import model from zope.component import adapter from zope.interface import implementer from zope.interface import provider from zope import schema from anv.sitecontent import MessageFactory as _ @provider(IFormFieldProvider) class IGalleryEnabled(model.Schema): """Behavior providing a checkbox to toggle gallery display""" model.fieldset( 'display', label=u"Display", fields=['displayGallery', 'displayPreviewCards'] ) displayGallery = schema.Bool( title=_(u"Check to enable gallery display"), description=_(u"When activated the view will attempt to display a " u"gallery of all contained images"), required=False, ) displayPreviewCards = schema.Bool( title=_(u"Check to enable contained pages preview"), description=_(u"When activated the view will display a " u"list of all contained pages as content cards"), required=False, ) @implementer(IGalleryEnabled) @adapter(IDexterityContent) class GalleryEnabled(object): def __init__(self, context): self.context = context
unknown
codeparrot/codeparrot-clean
# flake8: noqa # -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import DataMigration from django.core.exceptions import ObjectDoesNotExist from django.db import models class Migration(DataMigration): depends_on = ( ('cms', '0058_placeholderref_table_rename'), ) def migrate_placeholder(self, orm, goodpracticeexample, old_slot, new_slot, new_field): placeholder = None try: placeholder_m2m_object = goodpracticeexample.placeholders.through.objects.get( goodpracticeexample=goodpracticeexample, placeholder__slot=old_slot) placeholder = placeholder_m2m_object.placeholder except ObjectDoesNotExist: pass if placeholder: new_placeholder = orm['cms.Placeholder'].objects.create( slot=new_slot) for plugin in placeholder.get_plugins(): plugin.placeholder_id = new_placeholder.pk plugin.save() setattr(goodpracticeexample, new_field, new_placeholder) goodpracticeexample.save() try: placeholder_m2m_object.delete() placeholder.delete() except ObjectDoesNotExist: pass def forwards(self, orm): for goodpracticeexample in orm['good_practice_examples.GoodPracticeExample'].objects.all(): self.migrate_placeholder( orm, goodpracticeexample, 'practice_description', 'good_practice_example_practice_description', 'practice_description') def backwards(self, orm): raise RuntimeError('No backwards migration provided.') models = { 'cms.placeholder': { 'Meta': {'object_name': 'Placeholder'}, 'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, u'good_practice_examples.country': { 'Meta': {'object_name': 'Country'}, 'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'good_practice_examples.goal': { 'Meta': {'object_name': 'Goal'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'good_practice_examples.goaltranslation': { 'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'GoalTranslation', 'db_table': "u'good_practice_examples_goal_translation'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['good_practice_examples.Goal']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, u'good_practice_examples.goodpracticeexample': { 'Meta': {'object_name': 'GoodPracticeExample'}, 'countries': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['good_practice_examples.Country']", 'symmetrical': 'False'}), 'goals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['good_practice_examples.Goal']", 'symmetrical': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'placeholders': ('djangocms_utils.fields.M2MPlaceholderField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}), 'practice_description': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'goodpracticeexamples'", 'null': 'True', 'to': "orm['cms.Placeholder']"}), 'sectors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['good_practice_examples.Sector']", 'symmetrical': 'False'}) }, u'good_practice_examples.goodpracticeexampletranslation': { 'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'GoodPracticeExampleTranslation', 'db_table': "u'good_practice_examples_goodpracticeexample_translation'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['good_practice_examples.GoodPracticeExample']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, u'good_practice_examples.sector': { 'Meta': {'object_name': 'Sector'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'good_practice_examples.sectortranslation': { 'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'SectorTranslation', 'db_table': "u'good_practice_examples_sector_translation'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['good_practice_examples.Sector']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}) } } complete_apps = ['good_practice_examples'] symmetrical = True
unknown
codeparrot/codeparrot-clean
from __future__ import absolute_import from celery import shared_task from django.core import management from leonardo.decorators import catch_result from django.apps import apps from django.utils import six from .models import Package CounterWidget = apps.get_model('web', 'CounterWidget') def _get_counters(name): return CounterWidget.objects.filter(label__icontains=name) def update_counter_modules(): '''Package counter''' for counter in _get_counters('modules'): counter.number = Package.objects.count() counter.save() return counter.number def update_counter_downloads(): '''Update downloads''' for counter in _get_counters('downloads'): counter.number = reduce(lambda x, y: x + y, [p.total_downloads for p in Package.objects.all()]) counter.save() return counter.number def update_counter_stars(): '''Update stars''' for counter in _get_counters('stars'): counter.number = reduce(lambda x, y: x + y, [p.repo_watchers for p in Package.objects.all()]) counter.save() return counter.number def update_counter_forks(): '''Update forks''' for counter in _get_counters('forks'): counter.number = reduce(lambda x, y: x + y, [p.repo_forks for p in Package.objects.all()]) counter.save() return counter.number def update_counter_participants(): '''Update participants''' participants = [] for p in Package.objects.all(): contributors = p.participants.split(',') for contributor in contributors: if contributor not in participants: participants.append(contributor) for counter in _get_counters('participants'): counter.number = len(participants) counter.save() return counter.number COUNTERS_CONFIG = { 'modules': update_counter_modules, 'downloads': update_counter_downloads, 'stars': update_counter_stars, 'forks': update_counter_forks, 'participants': update_counter_participants, } @shared_task @catch_result def update_counters(): result = [] for counter, update_fn in six.iteritems(COUNTERS_CONFIG): try: res = update_fn() except Exception as e: result.append({counter: str(e)}) else: result.append({counter: res}) return {'result': result} @shared_task @catch_result def update_all_packages(): management.call_command('update_all_packages') return {'result': 'Update packages OK'}
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from ansiblelint import AnsibleLintRule class MismatchedBracketRule(AnsibleLintRule): id = 'ANSIBLE0003' shortdesc = 'Mismatched { and }' description = 'If lines contain more { than } or vice ' + \ 'versa then templating can fail nastily' tags = ['templating'] def match(self, file, line): return line.count("{") != line.count("}")
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe from frappe import _ import frappe.model import frappe.utils import json, os from six import iteritems ''' Handle RESTful requests that are mapped to the `/api/resource` route. Requests via FrappeClient are also handled here. ''' @frappe.whitelist() def get_list(doctype, fields=None, filters=None, order_by=None, limit_start=None, limit_page_length=20): '''Returns a list of records by filters, fields, ordering and limit :param doctype: DocType of the data to be queried :param fields: fields to be returned. Default is `name` :param filters: filter list by this dict :param order_by: Order by this fieldname :param limit_start: Start at this index :param limit_page_length: Number of records to be returned (default 20)''' return frappe.get_list(doctype, fields=fields, filters=filters, order_by=order_by, limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=False) @frappe.whitelist() def get(doctype, name=None, filters=None): '''Returns a document by name or filters :param doctype: DocType of the document to be returned :param name: return document of this `name` :param filters: If name is not set, filter by these values and return the first match''' if filters and not name: name = frappe.db.get_value(doctype, json.loads(filters)) if not name: frappe.throw(_("No document found for given filters")) doc = frappe.get_doc(doctype, name) if not doc.has_permission("read"): raise frappe.PermissionError return frappe.get_doc(doctype, name).as_dict() @frappe.whitelist() def get_value(doctype, fieldname, filters=None, as_dict=True, debug=False): '''Returns a value form a document :param doctype: DocType to be queried :param fieldname: Field to be returned (default `name`) :param filters: dict or string for identifying the record''' if not frappe.has_permission(doctype): frappe.throw(_("Not permitted"), frappe.PermissionError) try: filters = json.loads(filters) except ValueError: # name passed, not json pass try: fieldname = json.loads(fieldname) except ValueError: # name passed, not json pass return frappe.db.get_value(doctype, filters, fieldname, as_dict=as_dict, debug=debug) @frappe.whitelist() def set_value(doctype, name, fieldname, value=None): '''Set a value using get_doc, group of values :param doctype: DocType of the document :param name: name of the document :param fieldname: fieldname string or JSON / dict with key value pair :param value: value if fieldname is JSON / dict''' if fieldname!="idx" and fieldname in frappe.model.default_fields: frappe.throw(_("Cannot edit standard fields")) if not value: values = fieldname if isinstance(fieldname, basestring): try: values = json.loads(fieldname) except ValueError: values = {fieldname: ''} else: values = {fieldname: value} doc = frappe.db.get_value(doctype, name, ["parenttype", "parent"], as_dict=True) if doc and doc.parent and doc.parenttype: doc = frappe.get_doc(doc.parenttype, doc.parent) child = doc.getone({"doctype": doctype, "name": name}) child.update(values) else: doc = frappe.get_doc(doctype, name) doc.update(values) doc.save() return doc.as_dict() @frappe.whitelist() def insert(doc=None): '''Insert a document :param doc: JSON or dict object to be inserted''' if isinstance(doc, basestring): doc = json.loads(doc) if doc.get("parent") and doc.get("parenttype"): # inserting a child record parent = frappe.get_doc(doc.get("parenttype"), doc.get("parent")) parent.append(doc.get("parentfield"), doc) parent.save() return parent.as_dict() else: doc = frappe.get_doc(doc).insert() return doc.as_dict() @frappe.whitelist() def insert_many(docs=None): '''Insert multiple documents :param docs: JSON or list of dict objects to be inserted in one request''' if isinstance(docs, basestring): docs = json.loads(docs) out = [] if len(docs) > 200: frappe.throw(_('Only 200 inserts allowed in one request')) for doc in docs: if doc.get("parent") and doc.get("parenttype"): # inserting a child record parent = frappe.get_doc(doc.get("parenttype"), doc.get("parent")) parent.append(doc.get("parentfield"), doc) parent.save() out.append(parent.name) else: doc = frappe.get_doc(doc).insert() out.append(doc.name) return out @frappe.whitelist() def save(doc): '''Update (save) an existing document :param doc: JSON or dict object with the properties of the document to be updated''' if isinstance(doc, basestring): doc = json.loads(doc) doc = frappe.get_doc(doc).save() return doc.as_dict() @frappe.whitelist() def rename_doc(doctype, old_name, new_name, merge=False): '''Rename document :param doctype: DocType of the document to be renamed :param old_name: Current `name` of the document to be renamed :param new_name: New `name` to be set''' new_name = frappe.rename_doc(doctype, old_name, new_name, merge=merge) return new_name @frappe.whitelist() def submit(doc): '''Submit a document :param doc: JSON or dict object to be submitted remotely''' if isinstance(doc, basestring): doc = json.loads(doc) doc = frappe.get_doc(doc) doc.submit() return doc.as_dict() @frappe.whitelist() def cancel(doctype, name): '''Cancel a document :param doctype: DocType of the document to be cancelled :param name: name of the document to be cancelled''' wrapper = frappe.get_doc(doctype, name) wrapper.cancel() return wrapper.as_dict() @frappe.whitelist() def delete(doctype, name): '''Delete a remote document :param doctype: DocType of the document to be deleted :param name: name of the document to be deleted''' frappe.delete_doc(doctype, name) @frappe.whitelist() def set_default(key, value, parent=None): """set a user default value""" frappe.db.set_default(key, value, parent or frappe.session.user) frappe.clear_cache(user=frappe.session.user) @frappe.whitelist() def make_width_property_setter(doc): '''Set width Property Setter :param doc: Property Setter document with `width` property''' if isinstance(doc, basestring): doc = json.loads(doc) if doc["doctype"]=="Property Setter" and doc["property"]=="width": frappe.get_doc(doc).insert(ignore_permissions = True) @frappe.whitelist() def bulk_update(docs): '''Bulk update documents :param docs: JSON list of documents to be updated remotely. Each document must have `docname` property''' docs = json.loads(docs) failed_docs = [] for doc in docs: try: ddoc = {key: val for key, val in iteritems(doc) if key not in ['doctype', 'docname']} doctype = doc['doctype'] docname = doc['docname'] doc = frappe.get_doc(doctype, docname) doc.update(ddoc) doc.save() except: failed_docs.append({ 'doc': doc, 'exc': frappe.utils.get_traceback() }) return {'failed_docs': failed_docs} @frappe.whitelist() def has_permission(doctype, docname, perm_type="read"): '''Returns a JSON with data whether the document has the requested permission :param doctype: DocType of the document to be checked :param docname: `name` of the document to be checked :param perm_type: one of `read`, `write`, `create`, `submit`, `cancel`, `report`. Default is `read`''' # perm_type can be one of read, write, create, submit, cancel, report return {"has_permission": frappe.has_permission(doctype, perm_type.lower(), docname)} @frappe.whitelist() def get_password(doctype, name, fieldname): '''Return a password type property. Only applicable for System Managers :param doctype: DocType of the document that holds the password :param name: `name` of the document that holds the password :param fieldname: `fieldname` of the password property ''' frappe.only_for("System Manager") return frappe.get_doc(doctype, name).get_password(fieldname) @frappe.whitelist() def get_js(items): '''Load JS code files. Will also append translations and extend `frappe._messages` :param items: JSON list of paths of the js files to be loaded.''' items = json.loads(items) out = [] for src in items: src = src.strip("/").split("/") if ".." in src or src[0] != "assets": frappe.throw(_("Invalid file path: {0}").format("/".join(src))) contentpath = os.path.join(frappe.local.sites_path, *src) with open(contentpath, "r") as srcfile: code = frappe.utils.cstr(srcfile.read()) if frappe.local.lang != "en": messages = frappe.get_lang_dict("jsfile", contentpath) messages = json.dumps(messages) code += "\n\n$.extend(frappe._messages, {})".format(messages) out.append(code) return out
unknown
codeparrot/codeparrot-clean
""" Test Iterator Length Transparency Some functions or methods which accept general iterable arguments have optional, more efficient code paths if they know how many items to expect. For instance, map(func, iterable), will pre-allocate the exact amount of space required whenever the iterable can report its length. The desired invariant is: len(it)==len(list(it)). A complication is that an iterable and iterator can be the same object. To maintain the invariant, an iterator needs to dynamically update its length. For instance, an iterable such as xrange(10) always reports its length as ten, but it=iter(xrange(10)) starts at ten, and then goes to nine after it.next(). Having this capability means that map() can ignore the distinction between map(func, iterable) and map(func, iter(iterable)). When the iterable is immutable, the implementation can straight-forwardly report the original length minus the cumulative number of calls to next(). This is the case for tuples, xrange objects, and itertools.repeat(). Some containers become temporarily immutable during iteration. This includes dicts, sets, and collections.deque. Their implementation is equally simple though they need to permantently set their length to zero whenever there is an attempt to iterate after a length mutation. The situation slightly more involved whenever an object allows length mutation during iteration. Lists and sequence iterators are dynanamically updatable. So, if a list is extended during iteration, the iterator will continue through the new items. If it shrinks to a point before the most recent iteration, then no further items are available and the length is reported at zero. Reversed objects can also be wrapped around mutable objects; however, any appends after the current position are ignored. Any other approach leads to confusion and possibly returning the same item more than once. The iterators not listed above, such as enumerate and the other itertools, are not length transparent because they have no way to distinguish between iterables that report static length and iterators whose length changes with each call (i.e. the difference between enumerate('abc') and enumerate(iter('abc')). """ import unittest from test import test_support from itertools import repeat from collections import deque from __builtin__ import len as _len n = 10 def len(obj): try: return _len(obj) except TypeError: try: # note: this is an internal undocumented API, # don't rely on it in your own programs return obj.__length_hint__() except AttributeError: raise TypeError class TestInvariantWithoutMutations(unittest.TestCase): def test_invariant(self): it = self.it for i in reversed(xrange(1, n+1)): self.assertEqual(len(it), i) it.next() self.assertEqual(len(it), 0) self.assertRaises(StopIteration, it.next) self.assertEqual(len(it), 0) class TestTemporarilyImmutable(TestInvariantWithoutMutations): def test_immutable_during_iteration(self): # objects such as deques, sets, and dictionaries enforce # length immutability during iteration it = self.it self.assertEqual(len(it), n) it.next() self.assertEqual(len(it), n-1) self.mutate() self.assertRaises(RuntimeError, it.next) self.assertEqual(len(it), 0) ## ------- Concrete Type Tests ------- class TestRepeat(TestInvariantWithoutMutations): def setUp(self): self.it = repeat(None, n) def test_no_len_for_infinite_repeat(self): # The repeat() object can also be infinite self.assertRaises(TypeError, len, repeat(None)) class TestXrange(TestInvariantWithoutMutations): def setUp(self): self.it = iter(xrange(n)) class TestXrangeCustomReversed(TestInvariantWithoutMutations): def setUp(self): self.it = reversed(xrange(n)) class TestTuple(TestInvariantWithoutMutations): def setUp(self): self.it = iter(tuple(xrange(n))) ## ------- Types that should not be mutated during iteration ------- class TestDeque(TestTemporarilyImmutable): def setUp(self): d = deque(xrange(n)) self.it = iter(d) self.mutate = d.pop class TestDequeReversed(TestTemporarilyImmutable): def setUp(self): d = deque(xrange(n)) self.it = reversed(d) self.mutate = d.pop class TestDictKeys(TestTemporarilyImmutable): def setUp(self): d = dict.fromkeys(xrange(n)) self.it = iter(d) self.mutate = d.popitem class TestDictItems(TestTemporarilyImmutable): def setUp(self): d = dict.fromkeys(xrange(n)) self.it = d.iteritems() self.mutate = d.popitem class TestDictValues(TestTemporarilyImmutable): def setUp(self): d = dict.fromkeys(xrange(n)) self.it = d.itervalues() self.mutate = d.popitem class TestSet(TestTemporarilyImmutable): def setUp(self): d = set(xrange(n)) self.it = iter(d) self.mutate = d.pop ## ------- Types that can mutate during iteration ------- class TestList(TestInvariantWithoutMutations): def setUp(self): self.it = iter(range(n)) def test_mutation(self): d = range(n) it = iter(d) it.next() it.next() self.assertEqual(len(it), n-2) d.append(n) self.assertEqual(len(it), n-1) # grow with append d[1:] = [] self.assertEqual(len(it), 0) self.assertEqual(list(it), []) d.extend(xrange(20)) self.assertEqual(len(it), 0) class TestListReversed(TestInvariantWithoutMutations): def setUp(self): self.it = reversed(range(n)) def test_mutation(self): d = range(n) it = reversed(d) it.next() it.next() self.assertEqual(len(it), n-2) d.append(n) self.assertEqual(len(it), n-2) # ignore append d[1:] = [] self.assertEqual(len(it), 0) self.assertEqual(list(it), []) # confirm invariant d.extend(xrange(20)) self.assertEqual(len(it), 0) ## -- Check to make sure exceptions are not suppressed by __length_hint__() class BadLen(object): def __iter__(self): return iter(range(10)) def __len__(self): raise RuntimeError('hello') class BadLengthHint(object): def __iter__(self): return iter(range(10)) def __length_hint__(self): raise RuntimeError('hello') class NoneLengthHint(object): def __iter__(self): return iter(range(10)) def __length_hint__(self): return None class TestLengthHintExceptions(unittest.TestCase): def test_issue1242657(self): self.assertRaises(RuntimeError, list, BadLen()) self.assertRaises(RuntimeError, list, BadLengthHint()) self.assertRaises(RuntimeError, [].extend, BadLen()) self.assertRaises(RuntimeError, [].extend, BadLengthHint()) self.assertRaises(RuntimeError, zip, BadLen()) self.assertRaises(RuntimeError, zip, BadLengthHint()) self.assertRaises(RuntimeError, filter, None, BadLen()) self.assertRaises(RuntimeError, filter, None, BadLengthHint()) self.assertRaises(RuntimeError, map, chr, BadLen()) self.assertRaises(RuntimeError, map, chr, BadLengthHint()) b = bytearray(range(10)) self.assertRaises(RuntimeError, b.extend, BadLen()) self.assertRaises(RuntimeError, b.extend, BadLengthHint()) def test_invalid_hint(self): # Make sure an invalid result doesn't muck-up the works self.assertEqual(list(NoneLengthHint()), list(range(10))) def test_main(): unittests = [ TestRepeat, TestXrange, TestXrangeCustomReversed, TestTuple, TestDeque, TestDequeReversed, TestDictKeys, TestDictItems, TestDictValues, TestSet, TestList, TestListReversed, TestLengthHintExceptions, ] test_support.run_unittest(*unittests) if __name__ == "__main__": test_main()
unknown
codeparrot/codeparrot-clean
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * This is the interface for flush/sync operations. * Consult the Hadoop filesystem specification for the definition of the * semantics of these operations. */ @InterfaceAudience.Public @InterfaceStability.Stable public interface Syncable { /** Flush out the data in client's user buffer. After the return of * this call, new readers will see the data. * @throws IOException if any error occurs */ void hflush() throws IOException; /** Similar to posix fsync, flush out the data in client's user buffer * all the way to the disk device (but the disk may have it in its cache). * @throws IOException if error occurs */ void hsync() throws IOException; }
java
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Syncable.java
//! Manages the low-level pushing and popping of stack frames and the (de)allocation of local variables. //! For handling of argument passing and return values, see the `call` module. use std::cell::Cell; use std::{fmt, mem}; use either::{Either, Left, Right}; use rustc_hir as hir; use rustc_hir::definitions::DefPathData; use rustc_index::IndexVec; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::{bug, mir}; use rustc_mir_dataflow::impls::always_storage_live_locals; use rustc_span::Span; use tracing::field::Empty; use tracing::{info_span, instrument, trace}; use super::{ AllocId, CtfeProvenance, Immediate, InterpCx, InterpResult, Machine, MemPlace, MemPlaceMeta, MemoryKind, Operand, PlaceTy, Pointer, Provenance, ReturnAction, Scalar, from_known_layout, interp_ok, throw_ub, throw_unsup, }; use crate::{enter_trace_span, errors}; // The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread // boundary and dropped in the other thread, it would exit the span in the other thread. struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>); impl SpanGuard { /// By default a `SpanGuard` does nothing. fn new() -> Self { Self(tracing::Span::none(), std::marker::PhantomData) } /// If a span is entered, we exit the previous span (if any, normally none) and enter the /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of /// `Frame` by creating a dummy span to being with and then entering it once the frame has /// been pushed. fn enter(&mut self, span: tracing::Span) { // This executes the destructor on the previous instance of `SpanGuard`, ensuring that // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we // can't protect the tracing stack, but that'll just lead to weird logging, no actual // problems. *self = Self(span, std::marker::PhantomData); self.0.with_subscriber(|(id, dispatch)| { dispatch.enter(id); }); } } impl Drop for SpanGuard { fn drop(&mut self) { self.0.with_subscriber(|(id, dispatch)| { dispatch.exit(id); }); } } /// A stack frame. pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { //////////////////////////////////////////////////////////////////////////////// // Function and callsite information //////////////////////////////////////////////////////////////////////////////// /// The MIR for the function called on this frame. pub(super) body: &'tcx mir::Body<'tcx>, /// The def_id and args of the current function. pub(super) instance: ty::Instance<'tcx>, /// Extra data for the machine. pub extra: Extra, //////////////////////////////////////////////////////////////////////////////// // Return place and locals //////////////////////////////////////////////////////////////////////////////// /// Where to continue when returning from this function. return_cont: ReturnContinuation, /// The location where the result of the current stack frame should be written to, /// and its layout in the caller. This place is to be interpreted relative to the /// *caller's* stack frame. We use a `PlaceTy` instead of an `MPlaceTy` since this /// avoids having to move *all* return places into Miri's memory. pub return_place: PlaceTy<'tcx, Prov>, /// The list of locals for this stack frame, stored in order as /// `[return_ptr, arguments..., variables..., temporaries...]`. /// The locals are stored as `Option<Value>`s. /// `None` represents a local that is currently dead, while a live local /// can either directly contain `Scalar` or refer to some part of an `Allocation`. /// /// Do *not* access this directly; always go through the machine hook! pub locals: IndexVec<mir::Local, LocalState<'tcx, Prov>>, /// The span of the `tracing` crate is stored here. /// When the guard is dropped, the span is exited. This gives us /// a full stack trace on all tracing statements. tracing_span: SpanGuard, //////////////////////////////////////////////////////////////////////////////// // Current position within the function //////////////////////////////////////////////////////////////////////////////// /// If this is `Right`, we are not currently executing any particular statement in /// this frame (can happen e.g. during frame initialization, and during unwinding on /// frames without cleanup code). /// /// Needs to be public because ConstProp does unspeakable things to it. pub(super) loc: Either<mir::Location, Span>, } /// Where and how to continue when returning/unwinding from the current function. #[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these pub enum ReturnContinuation { /// Jump to the next block in the caller, or cause UB if None (that's a function /// that may never return). /// `ret` stores the block we jump to on a normal return, while `unwind` /// stores the block used for cleanup during unwinding. Goto { ret: Option<mir::BasicBlock>, unwind: mir::UnwindAction }, /// The root frame of the stack: nowhere else to jump to, so we stop. /// `cleanup` says whether locals are deallocated. Static computation /// wants them leaked to intern what they need (and just throw away /// the entire `ecx` when it is done). Stop { cleanup: bool }, } /// Return type of [`InterpCx::pop_stack_frame_raw`]. pub struct StackPopInfo<'tcx, Prov: Provenance> { /// Additional information about the action to be performed when returning from the popped /// stack frame. pub return_action: ReturnAction, /// [`return_cont`](Frame::return_cont) of the popped stack frame. pub return_cont: ReturnContinuation, /// [`return_place`](Frame::return_place) of the popped stack frame. pub return_place: PlaceTy<'tcx, Prov>, } /// State of a local variable including a memoized layout #[derive(Clone)] pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> { value: LocalValue<Prov>, /// Don't modify if `Some`, this is only used to prevent computing the layout twice. /// Avoids computing the layout of locals that are never actually initialized. layout: Cell<Option<TyAndLayout<'tcx>>>, } impl<Prov: Provenance> std::fmt::Debug for LocalState<'_, Prov> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("LocalState") .field("value", &self.value) .field("ty", &self.layout.get().map(|l| l.ty)) .finish() } } /// Current value of a local variable /// /// This does not store the type of the local; the type is given by `body.local_decls` and can never /// change, so by not storing here we avoid having to maintain that as an invariant. #[derive(Copy, Clone, Debug)] // Miri debug-prints these pub(super) enum LocalValue<Prov: Provenance = CtfeProvenance> { /// This local is not currently alive, and cannot be used at all. Dead, /// A normal, live local. /// Mostly for convenience, we re-use the `Operand` type here. /// This is an optimization over just always having a pointer here; /// we can thus avoid doing an allocation when the local just stores /// immediate values *and* never has its address taken. Live(Operand<Prov>), } impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> { pub fn make_live_uninit(&mut self) { self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit)); } /// This is a hack because Miri needs a way to visit all the provenance in a `LocalState` /// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type /// private. pub fn as_mplace_or_imm( &self, ) -> Option<Either<(Pointer<Option<Prov>>, MemPlaceMeta<Prov>), Immediate<Prov>>> { match self.value { LocalValue::Dead => None, LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))), LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)), } } /// Read the local's value or error if the local is not yet live or not live anymore. #[inline(always)] pub(super) fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> { match &self.value { LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? LocalValue::Live(val) => interp_ok(val), } } /// Overwrite the local. If the local can be overwritten in place, return a reference /// to do so; otherwise return the `MemPlace` to consult instead. #[inline(always)] pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> { match &mut self.value { LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? LocalValue::Live(val) => interp_ok(val), } } } /// What we store about a frame in an interpreter backtrace. #[derive(Clone, Debug)] pub struct FrameInfo<'tcx> { pub instance: ty::Instance<'tcx>, pub span: Span, } // FIXME: only used by miri, should be removed once translatable. impl<'tcx> fmt::Display for FrameInfo<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ty::tls::with(|tcx| { if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { write!(f, "inside closure") } else { // Note: this triggers a `must_produce_diag` state, which means that if we ever // get here we must emit a diagnostic. We should never display a `FrameInfo` unless // we actually want to emit a warning or error to the user. write!(f, "inside `{}`", self.instance) } }) } } impl<'tcx> FrameInfo<'tcx> { pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote { let span = self.span; if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0, has_label: false, } } else { let instance = format!("{}", self.instance); // Note: this triggers a `must_produce_diag` state, which means that if we ever get // here we must emit a diagnostic. We should never display a `FrameInfo` unless we // actually want to emit a warning or error to the user. errors::FrameNote { where_: "instance", span, instance, times: 0, has_label: false } } } } impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> { pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'tcx, Prov, Extra> { Frame { body: self.body, instance: self.instance, return_cont: self.return_cont, return_place: self.return_place, locals: self.locals, loc: self.loc, extra, tracing_span: self.tracing_span, } } } impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> { /// Get the current location within the Frame. /// /// If this is `Right`, we are not currently executing any particular statement in /// this frame (can happen e.g. during frame initialization, and during unwinding on /// frames without cleanup code). /// /// Used by [priroda](https://github.com/oli-obk/priroda). pub fn current_loc(&self) -> Either<mir::Location, Span> { self.loc } pub fn body(&self) -> &'tcx mir::Body<'tcx> { self.body } pub fn instance(&self) -> ty::Instance<'tcx> { self.instance } /// Return the `SourceInfo` of the current instruction. pub fn current_source_info(&self) -> Option<&mir::SourceInfo> { self.loc.left().map(|loc| self.body.source_info(loc)) } pub fn current_span(&self) -> Span { match self.loc { Left(loc) => self.body.source_info(loc).span, Right(span) => span, } } pub fn lint_root(&self, tcx: TyCtxt<'tcx>) -> Option<hir::HirId> { // We first try to get a HirId via the current source scope, // and fall back to `body.source`. self.current_source_info() .and_then(|source_info| match &self.body.source_scopes[source_info.scope].local_data { mir::ClearCrossCrate::Set(data) => Some(data.lint_root), mir::ClearCrossCrate::Clear => None, }) .or_else(|| { let def_id = self.body.source.def_id().as_local(); def_id.map(|def_id| tcx.local_def_id_to_hir_id(def_id)) }) } /// Returns the address of the buffer where the locals are stored. This is used by `Place` as a /// sanity check to detect bugs where we mix up which stack frame a place refers to. #[inline(always)] pub(super) fn locals_addr(&self) -> usize { self.locals.raw.as_ptr().addr() } #[must_use] pub fn generate_stacktrace_from_stack(stack: &[Self]) -> Vec<FrameInfo<'tcx>> { let mut frames = Vec::new(); // This deliberately does *not* honor `requires_caller_location` since it is used for much // more than just panics. for frame in stack.iter().rev() { let span = match frame.loc { Left(loc) => { // If the stacktrace passes through MIR-inlined source scopes, add them. let mir::SourceInfo { mut span, scope } = *frame.body.source_info(loc); let mut scope_data = &frame.body.source_scopes[scope]; while let Some((instance, call_span)) = scope_data.inlined { frames.push(FrameInfo { span, instance }); span = call_span; scope_data = &frame.body.source_scopes[scope_data.parent_scope.unwrap()]; } span } Right(span) => span, }; frames.push(FrameInfo { span, instance: frame.instance }); } trace!("generate stacktrace: {:#?}", frames); frames } } impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Very low-level helper that pushes a stack frame without initializing /// the arguments or local variables. /// /// The high-level version of this is `init_stack_frame`. #[instrument(skip(self, body, return_place, return_cont), level = "debug")] pub(crate) fn push_stack_frame_raw( &mut self, instance: ty::Instance<'tcx>, body: &'tcx mir::Body<'tcx>, return_place: &PlaceTy<'tcx, M::Provenance>, return_cont: ReturnContinuation, ) -> InterpResult<'tcx> { trace!("body: {:#?}", body); // We can push a `Root` frame if and only if the stack is empty. debug_assert_eq!( self.stack().is_empty(), matches!(return_cont, ReturnContinuation::Stop { .. }) ); // First push a stack frame so we have access to `instantiate_from_current_frame` and other // `self.frame()`-based functions. let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) }; let locals = IndexVec::from_elem(dead_local, &body.local_decls); let pre_frame = Frame { body, loc: Right(body.span), // Span used for errors caused during preamble. return_cont, return_place: return_place.clone(), locals, instance, tracing_span: SpanGuard::new(), extra: (), }; let frame = M::init_frame(self, pre_frame)?; self.stack_mut().push(frame); // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check). for &const_ in body.required_consts() { // We can't use `eval_mir_constant` here as that assumes that all required consts have // already been checked, so we need a separate tracing call. let _trace = enter_trace_span!(M, const_eval::required_consts, ?const_.const_); let c = self.instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?; c.eval(*self.tcx, self.typing_env, const_.span).map_err(|err| { err.emit_note(*self.tcx); err })?; } // Finish things up. M::after_stack_push(self)?; self.frame_mut().loc = Left(mir::Location::START); // `tracing_separate_thread` is used to instruct the tracing_chrome [tracing::Layer] in Miri // to put the "frame" span on a separate trace thread/line than other spans, to make the // visualization in <https://ui.perfetto.dev> easier to interpret. It is set to a value of // [tracing::field::Empty] so that other tracing layers (e.g. the logger) will ignore it. let span = info_span!("frame", tracing_separate_thread = Empty, frame = %instance); self.frame_mut().tracing_span.enter(span); interp_ok(()) } /// Low-level helper that pops a stack frame from the stack and returns some information about /// it. /// /// This also deallocates locals, if necessary. /// `copy_ret_val` gets called after the frame has been taken from the stack but before the locals have been deallocated. /// /// [`M::before_stack_pop`] and [`M::after_stack_pop`] are called by this function /// automatically. /// /// The high-level version of this is `return_from_current_stack_frame`. /// /// [`M::before_stack_pop`]: Machine::before_stack_pop /// [`M::after_stack_pop`]: Machine::after_stack_pop pub(super) fn pop_stack_frame_raw( &mut self, unwinding: bool, copy_ret_val: impl FnOnce(&mut Self, &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx>, ) -> InterpResult<'tcx, StackPopInfo<'tcx, M::Provenance>> { M::before_stack_pop(self)?; let frame = self.stack_mut().pop().expect("tried to pop a stack frame, but there were none"); // Copy return value (unless we are unwinding). if !unwinding { copy_ret_val(self, &frame.return_place)?; } let return_cont = frame.return_cont; let return_place = frame.return_place.clone(); // Cleanup: deallocate locals. // Usually we want to clean up (deallocate locals), but in a few rare cases we don't. // We do this while the frame is still on the stack, so errors point to the callee. let cleanup = match return_cont { ReturnContinuation::Goto { .. } => true, ReturnContinuation::Stop { cleanup, .. } => cleanup, }; let return_action = if cleanup { // We need to take the locals out, since we need to mutate while iterating. for local in &frame.locals { self.deallocate_local(local.value)?; } // Call the machine hook, which determines the next steps. let return_action = M::after_stack_pop(self, frame, unwinding)?; assert_ne!(return_action, ReturnAction::NoCleanup); return_action } else { // We also skip the machine hook when there's no cleanup. This not a real "pop" anyway. ReturnAction::NoCleanup }; interp_ok(StackPopInfo { return_action, return_cont, return_place }) } /// In the current stack frame, mark all locals as live that are not arguments and don't have /// `Storage*` annotations (this includes the return place). pub(crate) fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> { self.storage_live(mir::RETURN_PLACE)?; let body = self.body(); let always_live = always_storage_live_locals(body); for local in body.vars_and_temps_iter() { if always_live.contains(local) { self.storage_live(local)?; } } interp_ok(()) } pub fn storage_live_dyn( &mut self, local: mir::Local, meta: MemPlaceMeta<M::Provenance>, ) -> InterpResult<'tcx> { trace!("{:?} is now live", local); // We avoid `ty.is_trivially_sized` since that does something expensive for ADTs. fn is_very_trivially_sized(ty: Ty<'_>) -> bool { match ty.kind() { ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) | ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) | ty::FnDef(..) | ty::FnPtr(..) | ty::RawPtr(..) | ty::Char | ty::Ref(..) | ty::Coroutine(..) | ty::CoroutineWitness(..) | ty::Array(..) | ty::Closure(..) | ty::CoroutineClosure(..) | ty::Never | ty::Error(_) => true, ty::Str | ty::Slice(_) | ty::Dynamic(_, _) | ty::Foreign(..) => false, ty::Tuple(tys) => tys.last().is_none_or(|ty| is_very_trivially_sized(*ty)), ty::Pat(ty, ..) => is_very_trivially_sized(*ty), // We don't want to do any queries, so there is not much we can do with ADTs. ty::Adt(..) => false, ty::UnsafeBinder(ty) => is_very_trivially_sized(ty.skip_binder()), ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false, ty::Infer(ty::TyVar(_)) => false, ty::Bound(..) | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty) } } } // This is a hot function, we avoid computing the layout when possible. // `unsized_` will be `None` for sized types and `Some(layout)` for unsized types. let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) { None } else { // We need the layout. let layout = self.layout_of_local(self.frame(), local, None)?; if layout.is_sized() { None } else { Some(layout) } }; let local_val = LocalValue::Live(if let Some(layout) = unsized_ { if !meta.has_meta() { throw_unsup!(UnsizedLocal); } // Need to allocate some memory, since `Immediate::Uninit` cannot be unsized. let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?; Operand::Indirect(*dest_place.mplace()) } else { // Just make this an efficient immediate. assert!(!meta.has_meta()); // we're dropping the metadata // Make sure the machine knows this "write" is happening. (This is important so that // races involving local variable allocation can be detected by Miri.) M::after_local_write(self, local, /*storage_live*/ true)?; // Note that not calling `layout_of` here does have one real consequence: // if the type is too big, we'll only notice this when the local is actually initialized, // which is a bit too late -- we should ideally notice this already here, when the memory // is conceptually allocated. But given how rare that error is and that this is a hot function, // we accept this downside for now. Operand::Immediate(Immediate::Uninit) }); // If the local is already live, deallocate its old memory. let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val); self.deallocate_local(old)?; interp_ok(()) } /// Mark a storage as live, killing the previous content. #[inline(always)] pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> { self.storage_live_dyn(local, MemPlaceMeta::None) } pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> { assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); trace!("{:?} is now dead", local); // If the local is already dead, this is a NOP. let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead); self.deallocate_local(old)?; interp_ok(()) } fn deallocate_local(&mut self, local: LocalValue<M::Provenance>) -> InterpResult<'tcx> { if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { // All locals have a backing allocation, even if the allocation is empty // due to the local having ZST type. Hence we can `unwrap`. trace!( "deallocating local {:?}: {:?}", local, // Locals always have a `alloc_id` (they are never the result of a int2ptr). self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap()) ); self.deallocate_ptr(ptr, None, MemoryKind::Stack)?; }; interp_ok(()) } /// This is public because it is used by [Aquascope](https://github.com/cognitive-engineering-lab/aquascope/) /// to analyze all the locals in a stack frame. #[inline(always)] pub fn layout_of_local( &self, frame: &Frame<'tcx, M::Provenance, M::FrameExtra>, local: mir::Local, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, TyAndLayout<'tcx>> { let state = &frame.locals[local]; if let Some(layout) = state.layout.get() { return interp_ok(layout); } let layout = from_known_layout(self.tcx, self.typing_env, layout, || { let local_ty = frame.body.local_decls[local].ty; let local_ty = self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?; self.layout_of(local_ty).into() })?; // Layouts of locals are requested a lot, so we cache them. state.layout.set(Some(layout)); interp_ok(layout) } } impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> { pub(super) fn print( &self, allocs: &mut Vec<Option<AllocId>>, fmt: &mut std::fmt::Formatter<'_>, ) -> std::fmt::Result { match self.value { LocalValue::Dead => write!(fmt, " is dead")?, LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => { write!(fmt, " is uninitialized")? } LocalValue::Live(Operand::Indirect(mplace)) => { write!( fmt, " by {} ref {:?}:", match mplace.meta { MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"), MemPlaceMeta::None => String::new(), }, mplace.ptr, )?; allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id)); } LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => { write!(fmt, " {val:?}")?; if let Scalar::Ptr(ptr, _size) = val { allocs.push(ptr.provenance.get_alloc_id()); } } LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { write!(fmt, " ({val1:?}, {val2:?})")?; if let Scalar::Ptr(ptr, _size) = val1 { allocs.push(ptr.provenance.get_alloc_id()); } if let Scalar::Ptr(ptr, _size) = val2 { allocs.push(ptr.provenance.get_alloc_id()); } } } Ok(()) } }
rust
github
https://github.com/rust-lang/rust
compiler/rustc_const_eval/src/interpret/stack.rs
# util/queue.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """An adaptation of Py2.3/2.4's Queue module which supports reentrant behavior, using RLock instead of Lock for its mutex object. The Queue object is used exclusively by the sqlalchemy.pool.QueuePool class. This is to support the connection pool's usage of weakref callbacks to return connections to the underlying Queue, which can in extremely rare cases be invoked within the ``get()`` method of the Queue itself, producing a ``put()`` inside the ``get()`` and therefore a reentrant condition. """ from collections import deque from time import time as _time from .compat import threading __all__ = ['Empty', 'Full', 'Queue'] class Empty(Exception): "Exception raised by Queue.get(block=0)/get_nowait()." pass class Full(Exception): "Exception raised by Queue.put(block=0)/put_nowait()." pass class Queue: def __init__(self, maxsize=0): """Initialize a queue object with a given maximum size. If `maxsize` is <= 0, the queue size is infinite. """ self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the two conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.RLock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) def qsize(self): """Return the approximate size of the queue (not reliable!).""" self.mutex.acquire() n = self._qsize() self.mutex.release() return n def empty(self): """Return True if the queue is empty, False otherwise (not reliable!).""" self.mutex.acquire() n = self._empty() self.mutex.release() return n def full(self): """Return True if the queue is full, False otherwise (not reliable!).""" self.mutex.acquire() n = self._full() self.mutex.release() return n def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Full`` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the ``Full`` exception (`timeout` is ignored in that case). """ self.not_full.acquire() try: if not block: if self._full(): raise Full elif timeout is None: while self._full(): self.not_full.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._full(): remaining = endtime - _time() if remaining <= 0.0: raise Full self.not_full.wait(remaining) self._put(item) self.not_empty.notify() finally: self.not_full.release() def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the ``Full`` exception. """ return self.put(item, False) def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Empty`` exception if no item was available within that time. Otherwise (`block` is false), return an item if one is immediately available, else raise the ``Empty`` exception (`timeout` is ignored in that case). """ self.not_empty.acquire() try: if not block: if self._empty(): raise Empty elif timeout is None: while self._empty(): self.not_empty.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._empty(): remaining = endtime - _time() if remaining <= 0.0: raise Empty self.not_empty.wait(remaining) item = self._get() self.not_full.notify() return item finally: self.not_empty.release() def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the ``Empty`` exception. """ return self.get(False) # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held # Initialize the queue representation def _init(self, maxsize): self.maxsize = maxsize self.queue = deque() def _qsize(self): return len(self.queue) # Check whether the queue is empty def _empty(self): return not self.queue # Check whether the queue is full def _full(self): return self.maxsize > 0 and len(self.queue) == self.maxsize # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): return self.queue.popleft()
unknown
codeparrot/codeparrot-clean
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # インストール 使用しているDeep Learningライブラリに対して、🤗 Transformersをインストールしてキャッシュを設定、そしてオプションでオフラインで実行できるように 🤗 Transformersを設定します。 🤗 TransformersはPython 3.10+とPyTorch 2.4+で動作確認しています。 使用しているDeep Learningライブラリに合わせて、以下のインストール方法に従ってください: * [PyTorch](https://pytorch.org/get-started/locally/)のインストール手順。 ## pipでのインストール 🤗 Transformersを[仮想環境](https://docs.python.org/3/library/venv.html)にインストールする必要があります。 もし、Pythonの仮想環境に馴染みがない場合は、この[ガイド](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)をご覧ください。仮想環境によって異なるプロジェクトの管理がより簡単になり、依存関係間の互換性の問題を回避できます。 まず、プロジェクトディレクトリに仮想環境を作成することから始めましょう: ```bash python -m venv .env ``` 仮想環境を起動しましょう。LinuxとMacOsの場合は以下のコマンドで起動します: ```bash source .env/bin/activate ``` Windowsで仮想環境を起動します ```bash .env/Scripts/activate ``` これで、次のコマンドで🤗 Transformersをインストールする準備が整いました: ```bash pip install transformers ``` CPU対応のみ必要な場合、🤗 TransformersとDeep Learningライブラリを1行でインストールできるようになっていて便利です。例えば、🤗 TransformersとPyTorchを以下のように一緒にインストールできます: ```bash pip install transformers[torch] ``` 最後に、以下のコマンドを実行することで🤗 Transformersが正しくインストールされているかを確認します。学習済みモデルがダウンロードされます: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` その後、ラベルとスコアが出力されます: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## ソースからのインストール 以下のコマンドでソースから🤗 Transformersをインストールします: ```bash pip install git+https://github.com/huggingface/transformers ``` このコマンドは最新の安定版ではなく、開発における最新の`main`バージョンをインストールします。`main`バージョンは最新の開発状況に対応するのに便利です。例えば、最後の公式リリース以降にバグが修正されたが、新しいリリースがまだ展開されていない場合などです。しかし、これは`main`バージョンが常に安定しているとは限らないことを意味します。私たちは`main`バージョンの運用を維持するよう努め、ほとんどの問題は通常、数時間から1日以内に解決されます。もし問題に遭遇した場合は、より早く修正できるように[Issue](https://github.com/huggingface/transformers/issues)を作成してください! 以下のコマンドを実行して、🤗 Transformersが正しくインストールされているかどうかを確認します: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## 編集可能なインストール 必要に応じて、編集可能なインストールをします: * ソースコードの`main`バージョンを使います。 * 🤗 Transformersにコントリビュートし、コードの変更をテストする必要があります。 以下のコマンドでレポジトリをクローンして、🤗 Transformersをインストールします: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` 上記のコマンドは、レポジトリをクローンしたフォルダとPythonのライブラリをパスをリンクします。Pythonは通常のライブラリパスに加えて、あなたがクローンしたフォルダの中も見るようになります。例えば、Pythonパッケージが通常、`~/anaconda3/envs/main/lib/python3.10/site-packages/`にインストールされている場合、Pythonはクローンしたフォルダも検索するようになります: `~/transformers/`. <Tip warning={true}> ライブラリーを使い続けたい場合は、transformersフォルダーを保持しつづける必要があります。 </Tip> これで、次のコマンドで簡単にクローンを🤗 Transformersの最新版に更新できます: ```bash cd ~/transformers/ git pull ``` Python環境は次回の実行時に🤗 Transformersの`main`バージョンを見つけるようになります。 ## condaでのインストール `conda-forge`のcondaチャンネルからインストールします: ```bash conda install conda-forge::transformers ``` ## キャッシュの設定 学習済みモデルはダウンロードされ、ローカルにキャッシュされます: `~/.cache/huggingface/hub`. これはシェル環境変数`HF_HUB_CACHE`で指定されるデフォルトのディレクトリです。Windowsでは、デフォルトのディレクトリは`C:\Users\username\.cache\huggingface\hub`になっています。異なるキャッシュディレクトリを指定するために、以下のシェル環境変数を変更することが可能です。優先度は以下の順番に対応します: 1. シェル環境変数 (デフォルト): `HF_HUB_CACHE`. 2. シェル環境変数: `HF_HOME`. 3. シェル環境変数: `XDG_CACHE_HOME` + `/huggingface`. ## オフラインモード 🤗 Transformersはローカルファイルのみを使用することでファイアウォールやオフラインの環境でも動作させることができます。この動作を有効にするためには、環境変数`HF_HUB_OFFLINE=1`を設定します。 <Tip> 環境変数`HF_DATASETS_OFFLINE=1`を設定し、オフライントレーニングワークフローに[🤗 Datasets](https://huggingface.co/docs/datasets/)を追加します。 </Tip> 例えば、外部インスタンスに対してファイアウォールで保護された通常のネットワーク上でプログラムを実行する場合、通常以下のようなコマンドで実行することになります: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` オフラインインスタンスでこの同じプログラムを実行します: ```bash HF_DATASETS_OFFLINE=1 HF_HUB_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` このスクリプトは、ローカルファイルのみを検索することが分かっているので、ハングアップしたりタイムアウトを待ったりすることなく実行されるはずです。 ### オフラインで使用するためにモデルやトークナイザーを取得する オフラインで🤗 Transformersを使用するもう1つの方法は、前もってファイルをダウンロードしておき、オフラインで使用する必要があるときにそのローカルパスを指定することです。これには3つの方法があります: * [Model Hub](https://huggingface.co/models)のユーザーインターフェース上から↓アイコンをクリックしてファイルをダウンロードする方法。 ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * [`PreTrainedModel.from_pretrained`]および[`PreTrainedModel.save_pretrained`]のワークフローを使用する方法: 1. [`PreTrainedModel.from_pretrained`]で前もってファイルをダウンロードします: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. [`PreTrainedModel.save_pretrained`]で指定されたディレクトリにファイルを保存しておきます: ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. オフラインにある時、[`PreTrainedModel.from_pretrained`]に指定したディレクトリからファイルをリロードします: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * プログラム的に[huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub)ライブラリを用いて、ファイルをダウンロードする方法: 1. 仮想環境に`huggingface_hub`ライブラリをインストールします: ```bash python -m pip install huggingface_hub ``` 2. 指定のパスにファイルをダウンロードするために、[`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub)関数を使用します。例えば、以下のコマンドで、[T0](https://huggingface.co/bigscience/T0_3B)モデルの`config.json`ファイルを指定のパスにダウンロードできます: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` ファイルがダウンロードされ、ローカルにキャッシュされたら、そのローカルパスを指定してファイルをロードして使用します: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> Hubに保存されているファイルをダウンロードする方法の詳細については、[How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream)セクションを参照してください。 </Tip>
unknown
github
https://github.com/huggingface/transformers
docs/source/ja/installation.md
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the CockroachDB Software License // included in the /LICENSE file. package sql import ( "context" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/hints" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase" "github.com/cockroachdb/cockroach/pkg/sql/prep" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/fsm" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" "github.com/lib/pq/oid" ) func (ex *connExecutor) execPrepare( ctx context.Context, parseCmd PrepareStmt, ) (fsm.Event, fsm.EventPayload) { retErr := func(err error) (fsm.Event, fsm.EventPayload) { return ex.makeErrEvent(err, parseCmd.AST) } // Preparing needs a transaction because it needs to retrieve db/table // descriptors for type checking. This implicit txn will be open until // the Sync message is handled. if _, isNoTxn := ex.machine.CurState().(stateNoTxn); isNoTxn { // The one exception is that we must not open a new transaction when // preparing SHOW COMMIT TIMESTAMP. If we did, it would destroy the // information about the previous transaction. We expect to execute // this command in NoTxn. if _, ok := parseCmd.AST.(*tree.ShowCommitTimestamp); !ok { return ex.beginImplicitTxn(ctx, parseCmd.AST, ex.QualityOfService()) } } else if _, isAbortedTxn := ex.machine.CurState().(stateAborted); isAbortedTxn { if !ex.isAllowedInAbortedTxn(parseCmd.AST) { return retErr(sqlerrors.NewTransactionAbortedError("" /* customMsg */)) } } // Check if we need to auto-commit the transaction due to DDL. if ev, payload := ex.maybeAutoCommitBeforeDDL(ctx, parseCmd.AST); ev != nil { return ev, payload } ctx, sp := tracing.ChildSpan(ctx, "prepare stmt") defer sp.Finish() // The anonymous statement can be overwritten. if parseCmd.Name != "" { if ex.extraTxnState.prepStmtsNamespace.prepStmts.Has(parseCmd.Name) { err := pgerror.Newf( pgcode.DuplicatePreparedStatement, "prepared statement %q already exists", parseCmd.Name, ) return retErr(err) } } else { // Deallocate the unnamed statement, if it exists. ex.deletePreparedStmt(ctx, "") } var statementHintsCache *hints.StatementHintsCache if ex.executorType != executorTypeInternal { statementHintsCache = ex.server.cfg.StatementHintsCache } stmt := makeStatement( ctx, parseCmd.Statement, ex.server.cfg.GenerateID(), tree.FmtFlags(tree.QueryFormattingForFingerprintsMask.Get(ex.server.cfg.SV())), statementHintsCache, ) _, err := ex.addPreparedStmt( ctx, parseCmd.Name, stmt, parseCmd.TypeHints, parseCmd.RawTypeHints, prep.StatementOriginWire, ) if err != nil { return retErr(err) } return nil, nil } // addPreparedStmt creates a new prep.Statement with the provided name using // the given query. The new prepared statement is added to the connExecutor and // also returned. It is illegal to call this when a statement with that name // already exists (even for anonymous prepared statements). // // placeholderHints are used to assist in inferring placeholder types. The // rawTypeHints are optional and represent OIDs indicated for the placeholders // coming from the client via the wire protocol. func (ex *connExecutor) addPreparedStmt( ctx context.Context, name string, stmt Statement, placeholderHints tree.PlaceholderTypes, rawTypeHints []oid.Oid, origin prep.StatementOrigin, ) (*prep.Statement, error) { if ex.extraTxnState.prepStmtsNamespace.prepStmts.Has(name) { return nil, pgerror.Newf( pgcode.DuplicatePreparedStatement, "prepared statement %q already exists", name, ) } // Prepare the query. This completes the typing of placeholders. prepared, err := ex.prepare(ctx, stmt, placeholderHints, rawTypeHints, origin) if err != nil { return nil, err } if len(prepared.TypeHints) > pgwirebase.MaxPreparedStatementArgs { prepared.MemAcc().Close(ctx) return nil, pgwirebase.NewProtocolViolationErrorf( "more than %d arguments to prepared statement: %d", pgwirebase.MaxPreparedStatementArgs, len(prepared.TypeHints)) } if err := prepared.MemAcc().Grow(ctx, int64(len(name))); err != nil { prepared.MemAcc().Close(ctx) return nil, err } ex.extraTxnState.prepStmtsNamespace.prepStmts.Add(name, prepared, prepared.MemAcc().Allocated()) // Remember the inferred placeholder types so they can be reported on // Describe. First, try to preserve the hints sent by the client. prepared.InferredTypes = make([]oid.Oid, len(prepared.Types)) copy(prepared.InferredTypes, rawTypeHints) for i, it := range prepared.InferredTypes { // If the client did not provide an OID type hint, then infer the OID. if it == 0 || it == oid.T_unknown { if t, ok := prepared.ValueType(tree.PlaceholderIdx(i)); ok { prepared.InferredTypes[i] = t.Oid() } } } return prepared, nil } // prepare prepares the given statement. This is used to create the plan in the // "extended" pgwire protocol. // // placeholderHints may contain partial type information for placeholders. // prepare will populate the missing types. It can be nil. func (ex *connExecutor) prepare( ctx context.Context, stmt Statement, placeholderHints tree.PlaceholderTypes, rawTypeHints []oid.Oid, origin prep.StatementOrigin, ) (_ *prep.Statement, retErr error) { prepared := prep.NewStatement(origin, ex.sessionPreparedMon.MakeBoundAccount()) defer func() { // Make sure to close the memory account if an error is returned. if retErr != nil { prepared.MemAcc().Close(ctx) } }() if stmt.AST == nil { return prepared, nil } origNumPlaceholders := stmt.NumPlaceholders switch stmt.AST.(type) { case *tree.Prepare, *tree.CopyTo: // Special cases: // - We're preparing a SQL-level PREPARE using the // wire protocol. There's an ambiguity from the perspective of this code: // any placeholders that are inside of the statement that we're preparing // shouldn't be treated as placeholders to the PREPARE statement. So, we // edit the NumPlaceholders field to be 0 here. // - We're preparing a COPY ... TO statement. We match the Postgres // behavior, which is to treat the statement as if it had no placeholders. stmt.NumPlaceholders = 0 } var flags planFlags var udts []*types.T prepare := func(ctx context.Context, txn *kv.Txn) (err error) { p := &ex.planner if origin == prep.StatementOriginWire { // If the PREPARE command was issued as a SQL statement or through // deserialize_session, then we have already reset the planner at the very // beginning of the execution (in execStmtInOpenState). We might have also // instrumented the planner to collect execution statistics, and resetting // the planner here would break the assumptions of the instrumentation. ex.statsCollector.Reset(ex.applicationStats, ex.phaseTimes) ex.resetPlanner(ctx, p, txn, ex.server.cfg.Clock.PhysicalTime()) ex.execMon.StartNoReserved(ctx, ex.state.txnMon) defer ex.execMon.Stop(ctx) } if err := ex.maybeAdjustTxnForDDL(ctx, stmt); err != nil { return err } if placeholderHints == nil { placeholderHints = make(tree.PlaceholderTypes, stmt.NumPlaceholders) } else if rawTypeHints != nil { // If we were provided any type hints, attempt to resolve any user defined // type OIDs into types.T's. for i := range placeholderHints { if placeholderHints[i] == nil { if i >= len(rawTypeHints) { break } if types.IsOIDUserDefinedType(rawTypeHints[i]) { var err error placeholderHints[i], err = ex.planner.ResolveTypeByOID(ctx, rawTypeHints[i]) if err != nil { return err } } } } } prepared.Metadata = prep.Metadata{ PlaceholderTypesInfo: tree.PlaceholderTypesInfo{ TypeHints: placeholderHints, Types: placeholderHints, }, } prepared.Statement = stmt.Statement // When we set our prepared statement, we need to make sure to propagate // the original NumPlaceholders if we're preparing a PREPARE. prepared.Statement.NumPlaceholders = origNumPlaceholders prepared.StatementNoConstants = stmt.StmtNoConstants prepared.StatementSummary = stmt.StmtSummary prepared.Hints = stmt.Hints prepared.HintIDs = stmt.HintIDs prepared.HintsGeneration = stmt.HintsGeneration prepared.ASTWithInjectedHints = stmt.ASTWithInjectedHints // Point to the prepared state, which can be further populated during query // preparation. stmt.Prepared = prepared if stmt.NumPlaceholders > 0 { if err := tree.ProcessPlaceholderAnnotations(&ex.planner.semaCtx, stmt.AST, placeholderHints); err != nil { return err } } p.stmt = stmt p.semaCtx.Annotations = tree.MakeAnnotations(stmt.NumAnnotations) p.extendedEvalCtx.Annotations = &p.semaCtx.Annotations flags, udts, err = ex.populatePrepared(ctx, txn, placeholderHints, p, origin) // Copy the udts slice to ensure prepared statements and memos // maintain independent references. The reason for the separation is // that, when UDT versions change, we update only the prepared // statement's udts while keeping the memo's udts unchanged, so that // the memo is properly invalidated during plan generation. prepared.UDTs = make([]*types.T, len(udts)) copy(prepared.UDTs, udts) return err } // Use the existing transaction. if err := prepare(ctx, ex.state.mu.txn); err != nil { if origin != prep.StatementOriginSessionMigration { return nil, err } else { f := tree.NewFmtCtx(tree.FmtMarkRedactionNode | tree.FmtOmitNameRedaction | tree.FmtSimple) f.FormatNode(stmt.AST) redactableStmt := redact.RedactableString(f.CloseAndGetString()) log.Dev.Warningf(ctx, "could not prepare statement during session migration (%s): %v", redactableStmt, err) } } // Account for the memory used by this prepared statement. if err := prepared.MemAcc().Grow(ctx, prepared.MemoryEstimate()); err != nil { return nil, err } ex.updateOptCounters(flags) return prepared, nil } // populatePrepared analyzes and type-checks the query and populates // stmt.Prepared. func (ex *connExecutor) populatePrepared( ctx context.Context, txn *kv.Txn, placeholderHints tree.PlaceholderTypes, p *planner, origin prep.StatementOrigin, ) (planFlags, []*types.T, error) { if before := ex.server.cfg.TestingKnobs.BeforePrepare; before != nil { if err := before(ctx, ex.planner.stmt.String(), txn); err != nil { return 0, nil, err } } stmt := &p.stmt p.semaCtx.Placeholders.Init(stmt.NumPlaceholders, placeholderHints) p.extendedEvalCtx.PrepareOnly = true // If the statement is being prepared by a session migration, then we should // not evaluate the AS OF SYSTEM TIME timestamp. During session migration, // there is no way for the statement being prepared to be executed in this // transaction, so there's no need to fix the timestamp, unlike how we must // for pgwire- or SQL-level prepared statements. if origin != prep.StatementOriginSessionMigration { if err := ex.handleAOST(ctx, p.stmt.AST); err != nil { return 0, nil, err } } // PREPARE has a limited subset of statements it can be run with. Postgres // only allows SELECT, INSERT, UPDATE, DELETE and VALUES statements to be // prepared. // See: https://www.postgresql.org/docs/current/static/sql-prepare.html // However, we must be able to handle every type of statement below because // the Postgres extended protocol requires running statements via the prepare // and execute paths. flags, udts, err := p.prepareUsingOptimizer(ctx, origin) if err != nil { log.VEventf(ctx, 1, "optimizer prepare failed: %v", err) return 0, nil, err } log.VEvent(ctx, 2, "optimizer prepare succeeded") // stmt.Prepared fields have been populated. return flags, udts, nil } func (ex *connExecutor) execBind( ctx context.Context, bindCmd BindStmt, ) (fsm.Event, fsm.EventPayload) { var ps *prep.Statement retErr := func(err error) (fsm.Event, fsm.EventPayload) { if bindCmd.PreparedStatementName != "" { err = errors.WithDetailf(err, "statement name %q", bindCmd.PreparedStatementName) } if bindCmd.PortalName != "" { err = errors.WithDetailf(err, "portal name %q", bindCmd.PortalName) } if ps != nil && ps.StatementSummary != "" { err = errors.WithDetailf(err, "statement summary %q", ps.StatementSummary) } return eventNonRetryableErr{IsCommit: fsm.False}, eventNonRetryableErrPayload{err: err} } var ok bool ps, ok = ex.extraTxnState.prepStmtsNamespace.prepStmts.Get(bindCmd.PreparedStatementName) if !ok { return retErr(newPreparedStmtDNEError(ex.sessionData(), bindCmd.PreparedStatementName)) } // We need to make sure type resolution happens within a transaction. // Otherwise, for user-defined types we won't take the correct leases and // will get back out of date type information. // This code path is only used by the wire-level Bind command. The // SQL EXECUTE command (which also needs to bind and resolve types) is // handled separately in conn_executor_exec. if _, isNoTxn := ex.machine.CurState().(stateNoTxn); isNoTxn { // The one critical exception is that we must not open a transaction when // executing SHOW COMMIT TIMESTAMP as it would destroy the information // about the previously committed transaction. if _, ok := ps.AST.(*tree.ShowCommitTimestamp); !ok { return ex.beginImplicitTxn(ctx, ps.AST, ex.QualityOfService()) } } else if _, isAbortedTxn := ex.machine.CurState().(stateAborted); isAbortedTxn { if !ex.isAllowedInAbortedTxn(ps.AST) { return retErr(sqlerrors.NewTransactionAbortedError("" /* customMsg */)) } } // Check if we need to auto-commit the transaction due to DDL. if ev, payload := ex.maybeAutoCommitBeforeDDL(ctx, ps.AST); ev != nil { return ev, payload } portalName := bindCmd.PortalName // The unnamed portal can be freely overwritten. if portalName != "" { if _, ok := ex.extraTxnState.prepStmtsNamespace.portals[portalName]; ok { return retErr(pgerror.Newf( pgcode.DuplicateCursor, "portal %q already exists", portalName)) } if cursor := ex.getCursorAccessor().getCursor(tree.Name(portalName)); cursor != nil { return retErr(pgerror.Newf( pgcode.DuplicateCursor, "portal %q already exists as cursor", portalName)) } } else { // Deallocate the unnamed portal, if it exists. ex.deletePortal(ctx, "") } numQArgs := uint16(len(ps.InferredTypes)) // Decode the arguments, except for internal queries for which we just verify // that the arguments match what's expected. qargs := make(tree.QueryArguments, numQArgs) if bindCmd.InternalArgs != nil { if len(bindCmd.InternalArgs) != int(numQArgs) { return retErr( pgwirebase.NewProtocolViolationErrorf( "expected %d arguments, got %d", numQArgs, len(bindCmd.InternalArgs))) } for i, datum := range bindCmd.InternalArgs { t := ps.InferredTypes[i] if oid := datum.ResolvedType().Oid(); datum != tree.DNull && oid != t { return retErr( pgwirebase.NewProtocolViolationErrorf( "for argument %d expected OID %d, got %d", i, t, oid)) } qargs[i] = datum } } else { qArgFormatCodes := bindCmd.ArgFormatCodes // If there is only one format code, then that format code is used to decode all the // arguments. But if the number of format codes provided does not match the number of // arguments AND it's not a single format code then we cannot infer what format to use to // decode all of the arguments. if len(qArgFormatCodes) != 1 && len(qArgFormatCodes) != int(numQArgs) { return retErr(pgwirebase.NewProtocolViolationErrorf( "wrong number of format codes specified: %d for %d arguments", len(qArgFormatCodes), numQArgs)) } // If a single format code is provided and there is more than one argument to be decoded, // then expand qArgFormatCodes to the number of arguments provided. // If the number of format codes matches the number of arguments then nothing needs to be // done. if len(qArgFormatCodes) == 1 && numQArgs > 1 { fmtCode := qArgFormatCodes[0] qArgFormatCodes = make([]pgwirebase.FormatCode, numQArgs) for i := range qArgFormatCodes { qArgFormatCodes[i] = fmtCode } } if len(bindCmd.Args) != int(numQArgs) { return retErr( pgwirebase.NewProtocolViolationErrorf( "bind message supplies %d parameters, but requires %d", len(bindCmd.Args), numQArgs)) } resolve := func(ctx context.Context, txn *kv.Txn) (err error) { ex.statsCollector.Reset(ex.applicationStats, ex.phaseTimes) p := &ex.planner ex.resetPlanner(ctx, p, txn, ex.server.cfg.Clock.PhysicalTime() /* stmtTS */) ex.execMon.StartNoReserved(ctx, ex.state.txnMon) defer ex.execMon.Stop(ctx) if err := ex.handleAOST(ctx, ps.AST); err != nil { return err } for i, arg := range bindCmd.Args { k := tree.PlaceholderIdx(i) t := ps.InferredTypes[i] if arg == nil { // nil indicates a NULL argument value. qargs[k] = tree.DNull } else { typ, ok := types.OidToType[t] if !ok { // These special cases for json, json[] is here so we can // support decoding parameters with oid=json/json[] without // adding full support for these type. // TODO(sql-exp): Remove this if we support JSON. if t == oid.T_json { typ = types.Json } else if t == oid.T__json { typ = types.JSONArrayForDecodingOnly } else { var err error typ, err = ex.planner.ResolveTypeByOID(ctx, t) if err != nil { return err } } } d, err := pgwirebase.DecodeDatum( ctx, ex.planner.EvalContext(), typ, qArgFormatCodes[i], arg, p.datumAlloc, ) if err != nil { return pgerror.Wrapf(err, pgcode.ProtocolViolation, "error in argument for %s", k) } qargs[k] = d } } return nil } // Use the existing transaction. if err := resolve(ctx, ex.state.mu.txn); err != nil { return retErr(err) } } numCols := len(ps.Columns) if (len(bindCmd.OutFormats) > 1) && (len(bindCmd.OutFormats) != numCols) { err := pgwirebase.NewProtocolViolationErrorf( "expected 1 or %d for number of format codes, got %d", numCols, len(bindCmd.OutFormats)) // A user is hitting this error unexpectedly and rarely, dump extra info, // should be okay since this should be a very rare error. log.Dev.Infof(ctx, "%s outformats: %v, AST: %T, prepared statements: %s", err.Error(), bindCmd.OutFormats, ps.AST, ex.extraTxnState.prepStmtsNamespace.String()) return retErr(err) } columnFormatCodes := bindCmd.OutFormats if len(bindCmd.OutFormats) == 1 && numCols > 1 { // Apply the format code to every column. columnFormatCodes = make([]pgwirebase.FormatCode, numCols) for i := 0; i < numCols; i++ { columnFormatCodes[i] = bindCmd.OutFormats[0] } } // Create the new PreparedPortal. if err := ex.addPortal(ctx, portalName, ps, qargs, columnFormatCodes); err != nil { return retErr(err) } if log.V(2) { log.Dev.Infof(ctx, "portal: %q for %q, args %q, formats %q", portalName, ps.Statement, qargs, columnFormatCodes) } return nil, nil } // addPortal creates a new PreparedPortal on the connExecutor. // // It is illegal to call this when a portal with that name already exists (even // for anonymous portals). func (ex *connExecutor) addPortal( ctx context.Context, portalName string, stmt *prep.Statement, qargs tree.QueryArguments, outFormats []pgwirebase.FormatCode, ) error { if _, ok := ex.extraTxnState.prepStmtsNamespace.portals[portalName]; ok { panic(errors.AssertionFailedf("portal already exists: %q", portalName)) } if cursor := ex.getCursorAccessor().getCursor(tree.Name(portalName)); cursor != nil { panic(errors.AssertionFailedf("portal already exists as cursor: %q", portalName)) } portal, err := ex.makePreparedPortal(ctx, portalName, stmt, qargs, outFormats) if err != nil { return err } ex.extraTxnState.prepStmtsNamespace.portals[portalName] = portal return nil } // exhaustPortal marks a portal with the provided name as "exhausted" and // panics if there is no portal with such name. func (ex *connExecutor) exhaustPortal(portalName string) { portal, ok := ex.extraTxnState.prepStmtsNamespace.portals[portalName] if !ok { panic(errors.AssertionFailedf("portal %s doesn't exist", portalName)) } portal.exhausted = true ex.extraTxnState.prepStmtsNamespace.portals[portalName] = portal } func (ex *connExecutor) deletePreparedStmt(ctx context.Context, name string) { ex.getPrepStmtsAccessor().Delete(ctx, name) } func (ex *connExecutor) deletePortal(ctx context.Context, name string) { portal, ok := ex.extraTxnState.prepStmtsNamespace.portals[name] if !ok { return } portal.close(ctx, &ex.extraTxnState.prepStmtsNamespaceMemAcc, name) delete(ex.extraTxnState.prepStmtsNamespace.portals, name) } func (ex *connExecutor) execDelPrepStmt( ctx context.Context, delCmd DeletePreparedStmt, ) (fsm.Event, fsm.EventPayload) { switch delCmd.Type { case pgwirebase.PrepareStatement: if !ex.extraTxnState.prepStmtsNamespace.prepStmts.Has(delCmd.Name) { // The spec says "It is not an error to issue Close against a nonexistent // statement or portal name". See // https://www.postgresql.org/docs/current/static/protocol-flow.html. break } ex.deletePreparedStmt(ctx, delCmd.Name) case pgwirebase.PreparePortal: _, ok := ex.extraTxnState.prepStmtsNamespace.portals[delCmd.Name] if !ok { break } ex.deletePortal(ctx, delCmd.Name) default: panic(errors.AssertionFailedf("unknown del type: %s", delCmd.Type)) } return nil, nil } func (ex *connExecutor) execDescribe( ctx context.Context, descCmd DescribeStmt, res DescribeResult, ) (fsm.Event, fsm.EventPayload) { retErr := func(err error) (fsm.Event, fsm.EventPayload) { return eventNonRetryableErr{IsCommit: fsm.False}, eventNonRetryableErrPayload{err: err} } _, isAbortedTxn := ex.machine.CurState().(stateAborted) switch descCmd.Type { case pgwirebase.PrepareStatement: prepStmts := ex.extraTxnState.prepStmtsNamespace.prepStmts ps, ok := prepStmts.Get(string(descCmd.Name)) if !ok { return retErr(newPreparedStmtDNEError(ex.sessionData(), string(descCmd.Name))) } ast := ps.AST if execute, ok := ast.(*tree.Execute); ok { // If we're describing an EXECUTE, we need to look up the statement type // of the prepared statement that the EXECUTE refers to, or else we'll // return the wrong information for describe. innerPs, found := prepStmts.Get(string(execute.Name)) if !found { return retErr(newPreparedStmtDNEError(ex.sessionData(), string(execute.Name))) } ast = innerPs.AST } if isAbortedTxn && !ex.isAllowedInAbortedTxn(ast) { return retErr(sqlerrors.NewTransactionAbortedError("" /* customMsg */)) } res.SetInferredTypes(ps.InferredTypes) if stmtHasNoData(ast, ps.Columns) { res.SetNoDataRowDescription() } else { res.SetPrepStmtOutput(ctx, ps.Columns) } case pgwirebase.PreparePortal: // TODO(rimadeodhar): prepStmtsNamespace should also be updated to use tree.Name instead of string // for indexing internal maps. portal, ok := ex.extraTxnState.prepStmtsNamespace.portals[string(descCmd.Name)] if !ok { // Check SQL-level cursors. cursor := ex.getCursorAccessor().getCursor(descCmd.Name) if cursor == nil { return retErr(pgerror.Newf( pgcode.InvalidCursorName, "unknown portal %q", descCmd.Name)) } if isAbortedTxn { return retErr(sqlerrors.NewTransactionAbortedError("" /* customMsg */)) } // Sending a nil formatCodes is equivalent to sending all text format // codes. res.SetPortalOutput(ctx, cursor.Rows.Types(), nil /* formatCodes */) return nil, nil } ast := portal.Stmt.AST if isAbortedTxn && !ex.isAllowedInAbortedTxn(ast) { return retErr(sqlerrors.NewTransactionAbortedError("" /* customMsg */)) } if stmtHasNoData(ast, portal.Stmt.Columns) { res.SetNoDataRowDescription() } else { res.SetPortalOutput(ctx, portal.Stmt.Columns, portal.OutFormats) } default: return retErr(pgerror.Newf( pgcode.ProtocolViolation, "invalid DESCRIBE message subtype %d", errors.Safe(byte(descCmd.Type)), )) } return nil, nil } // isAllowedInAbortedTxn returns true if the statement is allowed to be // prepared and executed inside of an aborted transaction. func (ex *connExecutor) isAllowedInAbortedTxn(ast tree.Statement) bool { switch s := ast.(type) { case *tree.CommitTransaction, *tree.PrepareTransaction, *tree.RollbackTransaction, *tree.RollbackToSavepoint: return true case *tree.Savepoint: if ex.isCommitOnReleaseSavepoint(s.Name) { return true } return false default: return false } } // newPreparedStmtDNEError creates an InvalidSQLStatementName error for when a // prepared statement does not exist. func newPreparedStmtDNEError(sd *sessiondata.SessionData, name string) error { err := pgerror.Newf( pgcode.InvalidSQLStatementName, "prepared statement %q does not exist", name, ) cacheSize := sd.PreparedStatementsCacheSize if cacheSize != 0 { err = errors.WithHintf( err, "note that prepared_statements_cache_size is set to %s", string(humanizeutil.IBytes(cacheSize)), ) } return err }
go
github
https://github.com/cockroachdb/cockroach
pkg/sql/conn_executor_prepare.go
""" codecs -- Python Codec Registry, API and helpers. Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """#" import __builtin__, sys ### Registry and builtin stateless codec functions try: from _codecs import * except ImportError, why: raise SystemError('Failed to load the builtin codecs: %s' % why) __all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE", "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE", "CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder", "StreamReader", "StreamWriter", "StreamReaderWriter", "StreamRecoder", "getencoder", "getdecoder", "getincrementalencoder", "getincrementaldecoder", "getreader", "getwriter", "encode", "decode", "iterencode", "iterdecode", "strict_errors", "ignore_errors", "replace_errors", "xmlcharrefreplace_errors", "backslashreplace_errors", "register_error", "lookup_error"] ### Constants # # Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF) # and its possible byte string values # for UTF8/UTF16/UTF32 output and little/big endian machines # # UTF-8 BOM_UTF8 = '\xef\xbb\xbf' # UTF-16, little endian BOM_LE = BOM_UTF16_LE = '\xff\xfe' # UTF-16, big endian BOM_BE = BOM_UTF16_BE = '\xfe\xff' # UTF-32, little endian BOM_UTF32_LE = '\xff\xfe\x00\x00' # UTF-32, big endian BOM_UTF32_BE = '\x00\x00\xfe\xff' if sys.byteorder == 'little': # UTF-16, native endianness BOM = BOM_UTF16 = BOM_UTF16_LE # UTF-32, native endianness BOM_UTF32 = BOM_UTF32_LE else: # UTF-16, native endianness BOM = BOM_UTF16 = BOM_UTF16_BE # UTF-32, native endianness BOM_UTF32 = BOM_UTF32_BE # Old broken names (don't use in new code) BOM32_LE = BOM_UTF16_LE BOM32_BE = BOM_UTF16_BE BOM64_LE = BOM_UTF32_LE BOM64_BE = BOM_UTF32_BE ### Codec base classes (defining the API) class CodecInfo(tuple): """Codec details when looking up the codec registry""" # Private API to allow Python to blacklist the known non-Unicode # codecs in the standard library. A more general mechanism to # reliably distinguish test encodings from other codecs will hopefully # be defined for Python 3.5 # # See http://bugs.python.org/issue19619 _is_text_encoding = True # Assume codecs are text encodings by default def __new__(cls, encode, decode, streamreader=None, streamwriter=None, incrementalencoder=None, incrementaldecoder=None, name=None, _is_text_encoding=None): self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) self.name = name self.encode = encode self.decode = decode self.incrementalencoder = incrementalencoder self.incrementaldecoder = incrementaldecoder self.streamwriter = streamwriter self.streamreader = streamreader if _is_text_encoding is not None: self._is_text_encoding = _is_text_encoding return self def __repr__(self): return "<%s.%s object for encoding %s at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, id(self)) class Codec: """ Defines the interface for stateless encoders/decoders. The .encode()/.decode() methods may use different error handling schemes by providing the errors argument. These string values are predefined: 'strict' - raise a ValueError error (or a subclass) 'ignore' - ignore the character and continue with the next 'replace' - replace with a suitable replacement character; Python will use the official U+FFFD REPLACEMENT CHARACTER for the builtin Unicode codecs on decoding and '?' on encoding. 'xmlcharrefreplace' - Replace with the appropriate XML character reference (only for encoding). 'backslashreplace' - Replace with backslashed escape sequences (only for encoding). The set of allowed values can be extended via register_error. """ def encode(self, input, errors='strict'): """ Encodes the object input and returns a tuple (output object, length consumed). errors defines the error handling to apply. It defaults to 'strict' handling. The method may not store state in the Codec instance. Use StreamWriter for codecs which have to keep state in order to make encoding efficient. The encoder must be able to handle zero length input and return an empty object of the output object type in this situation. """ raise NotImplementedError def decode(self, input, errors='strict'): """ Decodes the object input and returns a tuple (output object, length consumed). input must be an object which provides the bf_getreadbuf buffer slot. Python strings, buffer objects and memory mapped files are examples of objects providing this slot. errors defines the error handling to apply. It defaults to 'strict' handling. The method may not store state in the Codec instance. Use StreamReader for codecs which have to keep state in order to make decoding efficient. The decoder must be able to handle zero length input and return an empty object of the output object type in this situation. """ raise NotImplementedError class IncrementalEncoder(object): """ An IncrementalEncoder encodes an input in multiple steps. The input can be passed piece by piece to the encode() method. The IncrementalEncoder remembers the state of the Encoding process between calls to encode(). """ def __init__(self, errors='strict'): """ Creates an IncrementalEncoder instance. The IncrementalEncoder may use different error handling schemes by providing the errors keyword argument. See the module docstring for a list of possible values. """ self.errors = errors self.buffer = "" def encode(self, input, final=False): """ Encodes input and returns the resulting object. """ raise NotImplementedError def reset(self): """ Resets the encoder to the initial state. """ def getstate(self): """ Return the current state of the encoder. """ return 0 def setstate(self, state): """ Set the current state of the encoder. state must have been returned by getstate(). """ class BufferedIncrementalEncoder(IncrementalEncoder): """ This subclass of IncrementalEncoder can be used as the baseclass for an incremental encoder if the encoder must keep some of the output in a buffer between calls to encode(). """ def __init__(self, errors='strict'): IncrementalEncoder.__init__(self, errors) self.buffer = "" # unencoded input that is kept between calls to encode() def _buffer_encode(self, input, errors, final): # Overwrite this method in subclasses: It must encode input # and return an (output, length consumed) tuple raise NotImplementedError def encode(self, input, final=False): # encode input (taking the buffer into account) data = self.buffer + input (result, consumed) = self._buffer_encode(data, self.errors, final) # keep unencoded input until the next call self.buffer = data[consumed:] return result def reset(self): IncrementalEncoder.reset(self) self.buffer = "" def getstate(self): return self.buffer or 0 def setstate(self, state): self.buffer = state or "" class IncrementalDecoder(object): """ An IncrementalDecoder decodes an input in multiple steps. The input can be passed piece by piece to the decode() method. The IncrementalDecoder remembers the state of the decoding process between calls to decode(). """ def __init__(self, errors='strict'): """ Creates a IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring for a list of possible values. """ self.errors = errors def decode(self, input, final=False): """ Decodes input and returns the resulting object. """ raise NotImplementedError def reset(self): """ Resets the decoder to the initial state. """ def getstate(self): """ Return the current state of the decoder. This must be a (buffered_input, additional_state_info) tuple. buffered_input must be a bytes object containing bytes that were passed to decode() that have not yet been converted. additional_state_info must be a non-negative integer representing the state of the decoder WITHOUT yet having processed the contents of buffered_input. In the initial state and after reset(), getstate() must return (b"", 0). """ return (b"", 0) def setstate(self, state): """ Set the current state of the decoder. state must have been returned by getstate(). The effect of setstate((b"", 0)) must be equivalent to reset(). """ class BufferedIncrementalDecoder(IncrementalDecoder): """ This subclass of IncrementalDecoder can be used as the baseclass for an incremental decoder if the decoder must be able to handle incomplete byte sequences. """ def __init__(self, errors='strict'): IncrementalDecoder.__init__(self, errors) self.buffer = "" # undecoded input that is kept between calls to decode() def _buffer_decode(self, input, errors, final): # Overwrite this method in subclasses: It must decode input # and return an (output, length consumed) tuple raise NotImplementedError def decode(self, input, final=False): # decode input (taking the buffer into account) data = self.buffer + input (result, consumed) = self._buffer_decode(data, self.errors, final) # keep undecoded input until the next call self.buffer = data[consumed:] return result def reset(self): IncrementalDecoder.reset(self) self.buffer = "" def getstate(self): # additional state info is always 0 return (self.buffer, 0) def setstate(self, state): # ignore additional state info self.buffer = state[0] # # The StreamWriter and StreamReader class provide generic working # interfaces which can be used to implement new encoding submodules # very easily. See encodings/utf_8.py for an example on how this is # done. # class StreamWriter(Codec): def __init__(self, stream, errors='strict'): """ Creates a StreamWriter instance. stream must be a file-like object open for writing (binary) data. The StreamWriter may use different error handling schemes by providing the errors keyword argument. These parameters are predefined: 'strict' - raise a ValueError (or a subclass) 'ignore' - ignore the character and continue with the next 'replace'- replace with a suitable replacement character 'xmlcharrefreplace' - Replace with the appropriate XML character reference. 'backslashreplace' - Replace with backslashed escape sequences (only for encoding). The set of allowed parameter values can be extended via register_error. """ self.stream = stream self.errors = errors def write(self, object): """ Writes the object's contents encoded to self.stream. """ data, consumed = self.encode(object, self.errors) self.stream.write(data) def writelines(self, list): """ Writes the concatenated list of strings to the stream using .write(). """ self.write(''.join(list)) def reset(self): """ Flushes and resets the codec buffers used for keeping state. Calling this method should ensure that the data on the output is put into a clean state, that allows appending of new fresh data without having to rescan the whole stream to recover state. """ pass def seek(self, offset, whence=0): self.stream.seek(offset, whence) if whence == 0 and offset == 0: self.reset() def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### class StreamReader(Codec): def __init__(self, stream, errors='strict'): """ Creates a StreamReader instance. stream must be a file-like object open for reading (binary) data. The StreamReader may use different error handling schemes by providing the errors keyword argument. These parameters are predefined: 'strict' - raise a ValueError (or a subclass) 'ignore' - ignore the character and continue with the next 'replace'- replace with a suitable replacement character; The set of allowed parameter values can be extended via register_error. """ self.stream = stream self.errors = errors self.bytebuffer = "" # For str->str decoding this will stay a str # For str->unicode decoding the first read will promote it to unicode self.charbuffer = "" self.linebuffer = None def decode(self, input, errors='strict'): raise NotImplementedError def read(self, size=-1, chars=-1, firstline=False): """ Decodes data from the stream self.stream and returns the resulting object. chars indicates the number of characters to read from the stream. read() will never return more than chars characters, but it might return less, if there are not enough characters available. size indicates the approximate maximum number of bytes to read from the stream for decoding purposes. The decoder can modify this setting as appropriate. The default value -1 indicates to read and decode as much as possible. size is intended to prevent having to decode huge files in one step. If firstline is true, and a UnicodeDecodeError happens after the first line terminator in the input only the first line will be returned, the rest of the input will be kept until the next call to read(). The method should use a greedy read strategy meaning that it should read as much data as is allowed within the definition of the encoding and the given size, e.g. if optional encoding endings or state markers are available on the stream, these should be read too. """ # If we have lines cached, first merge them back into characters if self.linebuffer: self.charbuffer = "".join(self.linebuffer) self.linebuffer = None # read until we get the required number of characters (if available) while True: # can the request be satisfied from the character buffer? if chars >= 0: if len(self.charbuffer) >= chars: break elif size >= 0: if len(self.charbuffer) >= size: break # we need more data if size < 0: newdata = self.stream.read() else: newdata = self.stream.read(size) # decode bytes (those remaining from the last call included) data = self.bytebuffer + newdata try: newchars, decodedbytes = self.decode(data, self.errors) except UnicodeDecodeError, exc: if firstline: newchars, decodedbytes = self.decode(data[:exc.start], self.errors) lines = newchars.splitlines(True) if len(lines)<=1: raise else: raise # keep undecoded bytes until the next call self.bytebuffer = data[decodedbytes:] # put new characters in the character buffer self.charbuffer += newchars # there was no data available if not newdata: break if chars < 0: # Return everything we've got result = self.charbuffer self.charbuffer = "" else: # Return the first chars characters result = self.charbuffer[:chars] self.charbuffer = self.charbuffer[chars:] return result def readline(self, size=None, keepends=True): """ Read one line from the input stream and return the decoded data. size, if given, is passed as size argument to the read() method. """ # If we have lines cached from an earlier read, return # them unconditionally if self.linebuffer: line = self.linebuffer[0] del self.linebuffer[0] if len(self.linebuffer) == 1: # revert to charbuffer mode; we might need more data # next time self.charbuffer = self.linebuffer[0] self.linebuffer = None if not keepends: line = line.splitlines(False)[0] return line readsize = size or 72 line = "" # If size is given, we call read() only once while True: data = self.read(readsize, firstline=True) if data: # If we're at a "\r" read one extra character (which might # be a "\n") to get a proper line ending. If the stream is # temporarily exhausted we return the wrong line ending. if data.endswith("\r"): data += self.read(size=1, chars=1) line += data lines = line.splitlines(True) if lines: if len(lines) > 1: # More than one line result; the first line is a full line # to return line = lines[0] del lines[0] if len(lines) > 1: # cache the remaining lines lines[-1] += self.charbuffer self.linebuffer = lines self.charbuffer = None else: # only one remaining line, put it back into charbuffer self.charbuffer = lines[0] + self.charbuffer if not keepends: line = line.splitlines(False)[0] break line0withend = lines[0] line0withoutend = lines[0].splitlines(False)[0] if line0withend != line0withoutend: # We really have a line end # Put the rest back together and keep it until the next call self.charbuffer = "".join(lines[1:]) + self.charbuffer if keepends: line = line0withend else: line = line0withoutend break # we didn't get anything or this was our only try if not data or size is not None: if line and not keepends: line = line.splitlines(False)[0] break if readsize<8000: readsize *= 2 return line def readlines(self, sizehint=None, keepends=True): """ Read all lines available on the input stream and return them as list of lines. Line breaks are implemented using the codec's decoder method and are included in the list entries. sizehint, if given, is ignored since there is no efficient way to finding the true end-of-line. """ data = self.read() return data.splitlines(keepends) def reset(self): """ Resets the codec buffers used for keeping state. Note that no stream repositioning should take place. This method is primarily intended to be able to recover from decoding errors. """ self.bytebuffer = "" self.charbuffer = u"" self.linebuffer = None def seek(self, offset, whence=0): """ Set the input stream's current position. Resets the codec buffers used for keeping state. """ self.stream.seek(offset, whence) self.reset() def next(self): """ Return the next decoded line from the input stream.""" line = self.readline() if line: return line raise StopIteration def __iter__(self): return self def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### class StreamReaderWriter: """ StreamReaderWriter instances allow wrapping streams which work in both read and write modes. The design is such that one can use the factory functions returned by the codec.lookup() function to construct the instance. """ # Optional attributes set by the file wrappers below encoding = 'unknown' def __init__(self, stream, Reader, Writer, errors='strict'): """ Creates a StreamReaderWriter instance. stream must be a Stream-like object. Reader, Writer must be factory functions or classes providing the StreamReader, StreamWriter interface resp. Error handling is done in the same way as defined for the StreamWriter/Readers. """ self.stream = stream self.reader = Reader(stream, errors) self.writer = Writer(stream, errors) self.errors = errors def read(self, size=-1): return self.reader.read(size) def readline(self, size=None): return self.reader.readline(size) def readlines(self, sizehint=None): return self.reader.readlines(sizehint) def next(self): """ Return the next decoded line from the input stream.""" return self.reader.next() def __iter__(self): return self def write(self, data): return self.writer.write(data) def writelines(self, list): return self.writer.writelines(list) def reset(self): self.reader.reset() self.writer.reset() def seek(self, offset, whence=0): self.stream.seek(offset, whence) self.reader.reset() if whence == 0 and offset == 0: self.writer.reset() def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) # these are needed to make "with codecs.open(...)" work properly def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### class StreamRecoder: """ StreamRecoder instances provide a frontend - backend view of encoding data. They use the complete set of APIs returned by the codecs.lookup() function to implement their task. Data written to the stream is first decoded into an intermediate format (which is dependent on the given codec combination) and then written to the stream using an instance of the provided Writer class. In the other direction, data is read from the stream using a Reader instance and then return encoded data to the caller. """ # Optional attributes set by the file wrappers below data_encoding = 'unknown' file_encoding = 'unknown' def __init__(self, stream, encode, decode, Reader, Writer, errors='strict'): """ Creates a StreamRecoder instance which implements a two-way conversion: encode and decode work on the frontend (the input to .read() and output of .write()) while Reader and Writer work on the backend (reading and writing to the stream). You can use these objects to do transparent direct recodings from e.g. latin-1 to utf-8 and back. stream must be a file-like object. encode, decode must adhere to the Codec interface, Reader, Writer must be factory functions or classes providing the StreamReader, StreamWriter interface resp. encode and decode are needed for the frontend translation, Reader and Writer for the backend translation. Unicode is used as intermediate encoding. Error handling is done in the same way as defined for the StreamWriter/Readers. """ self.stream = stream self.encode = encode self.decode = decode self.reader = Reader(stream, errors) self.writer = Writer(stream, errors) self.errors = errors def read(self, size=-1): data = self.reader.read(size) data, bytesencoded = self.encode(data, self.errors) return data def readline(self, size=None): if size is None: data = self.reader.readline() else: data = self.reader.readline(size) data, bytesencoded = self.encode(data, self.errors) return data def readlines(self, sizehint=None): data = self.reader.read() data, bytesencoded = self.encode(data, self.errors) return data.splitlines(1) def next(self): """ Return the next decoded line from the input stream.""" data = self.reader.next() data, bytesencoded = self.encode(data, self.errors) return data def __iter__(self): return self def write(self, data): data, bytesdecoded = self.decode(data, self.errors) return self.writer.write(data) def writelines(self, list): data = ''.join(list) data, bytesdecoded = self.decode(data, self.errors) return self.writer.write(data) def reset(self): self.reader.reset() self.writer.reset() def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### Shortcuts def open(filename, mode='rb', encoding=None, errors='strict', buffering=1): """ Open an encoded file using the given mode and return a wrapped version providing transparent encoding/decoding. Note: The wrapped version will only accept the object format defined by the codecs, i.e. Unicode objects for most builtin codecs. Output is also codec dependent and will usually be Unicode as well. Files are always opened in binary mode, even if no binary mode was specified. This is done to avoid data loss due to encodings using 8-bit values. The default file mode is 'rb' meaning to open the file in binary read mode. encoding specifies the encoding which is to be used for the file. errors may be given to define the error handling. It defaults to 'strict' which causes ValueErrors to be raised in case an encoding error occurs. buffering has the same meaning as for the builtin open() API. It defaults to line buffered. The returned wrapped file object provides an extra attribute .encoding which allows querying the used encoding. This attribute is only available if an encoding was specified as parameter. """ if encoding is not None: if 'U' in mode: # No automatic conversion of '\n' is done on reading and writing mode = mode.strip().replace('U', '') if mode[:1] not in set('rwa'): mode = 'r' + mode if 'b' not in mode: # Force opening of the file in binary mode mode = mode + 'b' file = __builtin__.open(filename, mode, buffering) if encoding is None: return file info = lookup(encoding) srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors) # Add attributes to simplify introspection srw.encoding = encoding return srw def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'): """ Return a wrapped version of file which provides transparent encoding translation. Strings written to the wrapped file are interpreted according to the given data_encoding and then written to the original file as string using file_encoding. The intermediate encoding will usually be Unicode but depends on the specified codecs. Strings are read from the file using file_encoding and then passed back to the caller as string using data_encoding. If file_encoding is not given, it defaults to data_encoding. errors may be given to define the error handling. It defaults to 'strict' which causes ValueErrors to be raised in case an encoding error occurs. The returned wrapped file object provides two extra attributes .data_encoding and .file_encoding which reflect the given parameters of the same name. The attributes can be used for introspection by Python programs. """ if file_encoding is None: file_encoding = data_encoding data_info = lookup(data_encoding) file_info = lookup(file_encoding) sr = StreamRecoder(file, data_info.encode, data_info.decode, file_info.streamreader, file_info.streamwriter, errors) # Add attributes to simplify introspection sr.data_encoding = data_encoding sr.file_encoding = file_encoding return sr ### Helpers for codec lookup def getencoder(encoding): """ Lookup up the codec for the given encoding and return its encoder function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).encode def getdecoder(encoding): """ Lookup up the codec for the given encoding and return its decoder function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).decode def getincrementalencoder(encoding): """ Lookup up the codec for the given encoding and return its IncrementalEncoder class or factory function. Raises a LookupError in case the encoding cannot be found or the codecs doesn't provide an incremental encoder. """ encoder = lookup(encoding).incrementalencoder if encoder is None: raise LookupError(encoding) return encoder def getincrementaldecoder(encoding): """ Lookup up the codec for the given encoding and return its IncrementalDecoder class or factory function. Raises a LookupError in case the encoding cannot be found or the codecs doesn't provide an incremental decoder. """ decoder = lookup(encoding).incrementaldecoder if decoder is None: raise LookupError(encoding) return decoder def getreader(encoding): """ Lookup up the codec for the given encoding and return its StreamReader class or factory function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).streamreader def getwriter(encoding): """ Lookup up the codec for the given encoding and return its StreamWriter class or factory function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).streamwriter def iterencode(iterator, encoding, errors='strict', **kwargs): """ Encoding iterator. Encodes the input strings from the iterator using a IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. """ encoder = getincrementalencoder(encoding)(errors, **kwargs) for input in iterator: output = encoder.encode(input) if output: yield output output = encoder.encode("", True) if output: yield output def iterdecode(iterator, encoding, errors='strict', **kwargs): """ Decoding iterator. Decodes the input strings from the iterator using a IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. """ decoder = getincrementaldecoder(encoding)(errors, **kwargs) for input in iterator: output = decoder.decode(input) if output: yield output output = decoder.decode("", True) if output: yield output ### Helpers for charmap-based codecs def make_identity_dict(rng): """ make_identity_dict(rng) -> dict Return a dictionary where elements of the rng sequence are mapped to themselves. """ res = {} for i in rng: res[i]=i return res def make_encoding_map(decoding_map): """ Creates an encoding map from a decoding map. If a target mapping in the decoding map occurs multiple times, then that target is mapped to None (undefined mapping), causing an exception when encountered by the charmap codec during translation. One example where this happens is cp875.py which decodes multiple character to \\u001a. """ m = {} for k,v in decoding_map.items(): if not v in m: m[v] = k else: m[v] = None return m ### error handlers try: strict_errors = lookup_error("strict") ignore_errors = lookup_error("ignore") replace_errors = lookup_error("replace") xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace") backslashreplace_errors = lookup_error("backslashreplace") except LookupError: # In --disable-unicode builds, these error handler are missing strict_errors = None ignore_errors = None replace_errors = None xmlcharrefreplace_errors = None backslashreplace_errors = None # Tell modulefinder that using codecs probably needs the encodings # package _false = 0 if _false: import encodings ### Tests if __name__ == '__main__': # Make stdout translate Latin-1 output into UTF-8 output sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8') # Have stdin translate Latin-1 input into UTF-8 input sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
unknown
codeparrot/codeparrot-clean
"""Package with general repository related functions""" import os from string import digits from gitdb.exc import ( BadObject, BadName ) from git.refs import SymbolicReference from git.objects import Object from gitdb.util import ( join, isdir, isfile, dirname, hex_to_bin, bin_to_hex ) from git.compat import xrange __all__ = ('rev_parse', 'is_git_dir', 'touch', 'find_git_dir', 'name_to_object', 'short_to_long', 'deref_tag', 'to_commit') def touch(filename): fp = open(filename, "ab") fp.close() return filename def is_git_dir(d): """ This is taken from the git setup.c:is_git_directory function.""" if isdir(d) and \ isdir(join(d, 'objects')) and \ isdir(join(d, 'refs')): headref = join(d, 'HEAD') return isfile(headref) or \ (os.path.islink(headref) and os.readlink(headref).startswith('refs')) return False def find_git_dir(d): if is_git_dir(d): return d try: with open(d) as fp: content = fp.read().rstrip() except (IOError, OSError): # it's probably not a file pass else: if content.startswith('gitdir: '): path = content[8:] if not os.path.isabs(path): path = join(dirname(d), path) return find_git_dir(path) # end handle exception return None def short_to_long(odb, hexsha): """:return: long hexadecimal sha1 from the given less-than-40 byte hexsha or None if no candidate could be found. :param hexsha: hexsha with less than 40 byte""" try: return bin_to_hex(odb.partial_to_complete_sha_hex(hexsha)) except BadObject: return None # END exception handling def name_to_object(repo, name, return_ref=False): """ :return: object specified by the given name, hexshas ( short and long ) as well as references are supported :param return_ref: if name specifies a reference, we will return the reference instead of the object. Otherwise it will raise BadObject or BadName """ hexsha = None # is it a hexsha ? Try the most common ones, which is 7 to 40 if repo.re_hexsha_shortened.match(name): if len(name) != 40: # find long sha for short sha hexsha = short_to_long(repo.odb, name) else: hexsha = name # END handle short shas # END find sha if it matches # if we couldn't find an object for what seemed to be a short hexsha # try to find it as reference anyway, it could be named 'aaa' for instance if hexsha is None: for base in ('%s', 'refs/%s', 'refs/tags/%s', 'refs/heads/%s', 'refs/remotes/%s', 'refs/remotes/%s/HEAD'): try: hexsha = SymbolicReference.dereference_recursive(repo, base % name) if return_ref: return SymbolicReference(repo, base % name) # END handle symbolic ref break except ValueError: pass # END for each base # END handle hexsha # didn't find any ref, this is an error if return_ref: raise BadObject("Couldn't find reference named %r" % name) # END handle return ref # tried everything ? fail if hexsha is None: raise BadName(name) # END assert hexsha was found return Object.new_from_sha(repo, hex_to_bin(hexsha)) def deref_tag(tag): """Recursively dereference a tag and return the resulting object""" while True: try: tag = tag.object except AttributeError: break # END dereference tag return tag def to_commit(obj): """Convert the given object to a commit if possible and return it""" if obj.type == 'tag': obj = deref_tag(obj) if obj.type != "commit": raise ValueError("Cannot convert object %r to type commit" % obj) # END verify type return obj def rev_parse(repo, rev): """ :return: Object at the given revision, either Commit, Tag, Tree or Blob :param rev: git-rev-parse compatible revision specification as string, please see http://www.kernel.org/pub/software/scm/git/docs/git-rev-parse.html for details :raise BadObject: if the given revision could not be found :raise ValueError: If rev couldn't be parsed :raise IndexError: If invalid reflog index is specified""" # colon search mode ? if rev.startswith(':/'): # colon search mode raise NotImplementedError("commit by message search ( regex )") # END handle search obj = None ref = None output_type = "commit" start = 0 parsed_to = 0 lr = len(rev) while start < lr: if rev[start] not in "^~:@": start += 1 continue # END handle start token = rev[start] if obj is None: # token is a rev name if start == 0: ref = repo.head.ref else: if token == '@': ref = name_to_object(repo, rev[:start], return_ref=True) else: obj = name_to_object(repo, rev[:start]) # END handle token # END handle refname if ref is not None: obj = ref.commit # END handle ref # END initialize obj on first token start += 1 # try to parse {type} if start < lr and rev[start] == '{': end = rev.find('}', start) if end == -1: raise ValueError("Missing closing brace to define type in %s" % rev) output_type = rev[start + 1:end] # exclude brace # handle type if output_type == 'commit': pass # default elif output_type == 'tree': try: obj = to_commit(obj).tree except (AttributeError, ValueError): pass # error raised later # END exception handling elif output_type in ('', 'blob'): if obj.type == 'tag': obj = deref_tag(obj) else: # cannot do anything for non-tags pass # END handle tag elif token == '@': # try single int assert ref is not None, "Requre Reference to access reflog" revlog_index = None try: # transform reversed index into the format of our revlog revlog_index = -(int(output_type) + 1) except ValueError: # TODO: Try to parse the other date options, using parse_date # maybe raise NotImplementedError("Support for additional @{...} modes not implemented") # END handle revlog index try: entry = ref.log_entry(revlog_index) except IndexError: raise IndexError("Invalid revlog index: %i" % revlog_index) # END handle index out of bound obj = Object.new_from_sha(repo, hex_to_bin(entry.newhexsha)) # make it pass the following checks output_type = None else: raise ValueError("Invalid output type: %s ( in %s )" % (output_type, rev)) # END handle output type # empty output types don't require any specific type, its just about dereferencing tags if output_type and obj.type != output_type: raise ValueError("Could not accomodate requested object type %r, got %s" % (output_type, obj.type)) # END verify ouput type start = end + 1 # skip brace parsed_to = start continue # END parse type # try to parse a number num = 0 if token != ":": found_digit = False while start < lr: if rev[start] in digits: num = num * 10 + int(rev[start]) start += 1 found_digit = True else: break # END handle number # END number parse loop # no explicit number given, 1 is the default # It could be 0 though if not found_digit: num = 1 # END set default num # END number parsing only if non-blob mode parsed_to = start # handle hiererarchy walk try: if token == "~": obj = to_commit(obj) for item in xrange(num): obj = obj.parents[0] # END for each history item to walk elif token == "^": obj = to_commit(obj) # must be n'th parent if num: obj = obj.parents[num - 1] elif token == ":": if obj.type != "tree": obj = obj.tree # END get tree type obj = obj[rev[start:]] parsed_to = lr else: raise ValueError("Invalid token: %r" % token) # END end handle tag except (IndexError, AttributeError): raise BadName("Invalid revision spec '%s' - not enough parent commits to reach '%s%i'" % (rev, token, num)) # END exception handling # END parse loop # still no obj ? Its probably a simple name if obj is None: obj = name_to_object(repo, rev) parsed_to = lr # END handle simple name if obj is None: raise ValueError("Revision specifier could not be parsed: %s" % rev) if parsed_to != lr: raise ValueError("Didn't consume complete rev spec %s, consumed part: %s" % (rev, rev[:parsed_to])) return obj
unknown
codeparrot/codeparrot-clean
prelude: | obj1 = Object.new obj2 = Object.new benchmark: vm_neq: | obj1 != obj2 loop_count: 30000000
unknown
github
https://github.com/ruby/ruby
benchmark/vm_neq.yml
/* Copyright 2019 Glen Joseph Fernandes (glenjofe@gmail.com) Distributed under the Boost Software License, Version 1.0. (http://www.boost.org/LICENSE_1_0.txt) */ #ifndef BOOST_CORE_USE_DEFAULT_HPP #define BOOST_CORE_USE_DEFAULT_HPP namespace boost { struct use_default { }; } /* boost */ #endif
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/core/use_default.hpp
from __future__ import unicode_literals import fnmatch import glob import io import os import re import sys from functools import total_ordering from itertools import dropwhile import django from django.conf import settings from django.core.files.temp import NamedTemporaryFile from django.core.management.base import BaseCommand, CommandError from django.core.management.utils import ( find_command, handle_extensions, popen_wrapper, ) from django.utils import six from django.utils._os import upath from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_str from django.utils.functional import cached_property from django.utils.jslex import prepare_js_for_gettext from django.utils.text import get_text_list plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL) STATUS_OK = 0 NO_LOCALE_DIR = object() def check_programs(*programs): for program in programs: if find_command(program) is None: raise CommandError("Can't find %s. Make sure you have GNU " "gettext tools 0.15 or newer installed." % program) def gettext_popen_wrapper(args, os_err_exc_type=CommandError, stdout_encoding="utf-8"): """ Makes sure text obtained from stdout of gettext utilities is Unicode. """ # This both decodes utf-8 and cleans line endings. Simply using # popen_wrapper(universal_newlines=True) doesn't properly handle the # encoding. This goes back to popen's flaky support for encoding: # https://bugs.python.org/issue6135. This is a solution for #23271, #21928. # No need to do anything on Python 2 because it's already a byte-string there. manual_io_wrapper = six.PY3 and stdout_encoding != DEFAULT_LOCALE_ENCODING stdout, stderr, status_code = popen_wrapper(args, os_err_exc_type=os_err_exc_type, universal_newlines=not manual_io_wrapper) if manual_io_wrapper: stdout = io.TextIOWrapper(io.BytesIO(stdout), encoding=stdout_encoding).read() if six.PY2: stdout = stdout.decode(stdout_encoding) return stdout, stderr, status_code @total_ordering class TranslatableFile(object): def __init__(self, dirpath, file_name, locale_dir): self.file = file_name self.dirpath = dirpath self.locale_dir = locale_dir def __repr__(self): return "<TranslatableFile: %s>" % os.sep.join([self.dirpath, self.file]) def __eq__(self, other): return self.path == other.path def __lt__(self, other): return self.path < other.path @property def path(self): return os.path.join(self.dirpath, self.file) class BuildFile(object): """ Represents the state of a translatable file during the build process. """ def __init__(self, command, domain, translatable): self.command = command self.domain = domain self.translatable = translatable @cached_property def is_templatized(self): if self.domain == 'djangojs': return self.command.gettext_version < (0, 18, 3) elif self.domain == 'django': file_ext = os.path.splitext(self.translatable.file)[1] return file_ext != '.py' return False @cached_property def path(self): return self.translatable.path @cached_property def work_path(self): """ Path to a file which is being fed into GNU gettext pipeline. This may be either a translatable or its preprocessed version. """ if not self.is_templatized: return self.path extension = { 'djangojs': 'c', 'django': 'py', }.get(self.domain) filename = '%s.%s' % (self.translatable.file, extension) return os.path.join(self.translatable.dirpath, filename) def preprocess(self): """ Preprocess (if necessary) a translatable file before passing it to xgettext GNU gettext utility. """ from django.utils.translation import templatize if not self.is_templatized: return with io.open(self.path, 'r', encoding=settings.FILE_CHARSET) as fp: src_data = fp.read() if self.domain == 'djangojs': content = prepare_js_for_gettext(src_data) elif self.domain == 'django': content = templatize(src_data, self.path[2:]) with io.open(self.work_path, 'w', encoding='utf-8') as fp: fp.write(content) def postprocess_messages(self, msgs): """ Postprocess messages generated by xgettext GNU gettext utility. Transform paths as if these messages were generated from original translatable files rather than from preprocessed versions. """ if not self.is_templatized: return msgs # Remove '.py' suffix if os.name == 'nt': # Preserve '.\' prefix on Windows to respect gettext behavior old_path = self.work_path new_path = self.path else: old_path = self.work_path[2:] new_path = self.path[2:] return re.sub( r'^(#: .*)(' + re.escape(old_path) + r')', lambda match: match.group().replace(old_path, new_path), msgs, flags=re.MULTILINE ) def cleanup(self): """ Remove a preprocessed copy of a translatable file (if any). """ if self.is_templatized: # This check is needed for the case of a symlinked file and its # source being processed inside a single group (locale dir); # removing either of those two removes both. if os.path.exists(self.work_path): os.unlink(self.work_path) def write_pot_file(potfile, msgs): """ Write the :param potfile: POT file with the :param msgs: contents, previously making sure its format is valid. """ if os.path.exists(potfile): # Strip the header msgs = '\n'.join(dropwhile(len, msgs.split('\n'))) else: msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8') with io.open(potfile, 'a', encoding='utf-8') as fp: fp.write(msgs) class Command(BaseCommand): help = ("Runs over the entire source tree of the current directory and " "pulls out all strings marked for translation. It creates (or updates) a message " "file in the conf/locale (in the django tree) or locale (for projects and " "applications) directory.\n\nYou must run this command with one of either the " "--locale, --exclude or --all options.") translatable_file_class = TranslatableFile build_file_class = BuildFile requires_system_checks = False leave_locale_alone = True msgmerge_options = ['-q', '--previous'] msguniq_options = ['--to-code=utf-8'] msgattrib_options = ['--no-obsolete'] xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators'] def add_arguments(self, parser): parser.add_argument('--locale', '-l', default=[], dest='locale', action='append', help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). ' 'Can be used multiple times.') parser.add_argument('--exclude', '-x', default=[], dest='exclude', action='append', help='Locales to exclude. Default is none. Can be used multiple times.') parser.add_argument('--domain', '-d', default='django', dest='domain', help='The domain of the message files (default: "django").') parser.add_argument('--all', '-a', action='store_true', dest='all', default=False, help='Updates the message files for all existing locales.') parser.add_argument('--extension', '-e', dest='extensions', help='The file extension(s) to examine (default: "html,txt,py", or "js" ' 'if the domain is "djangojs"). Separate multiple extensions with ' 'commas, or use -e multiple times.', action='append') parser.add_argument('--symlinks', '-s', action='store_true', dest='symlinks', default=False, help='Follows symlinks to directories when examining ' 'source code and templates for translation strings.') parser.add_argument('--ignore', '-i', action='append', dest='ignore_patterns', default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. ' 'Use multiple times to ignore more.') parser.add_argument('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns', default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.") parser.add_argument('--no-wrap', action='store_true', dest='no_wrap', default=False, help="Don't break long message lines into several lines.") parser.add_argument('--no-location', action='store_true', dest='no_location', default=False, help="Don't write '#: filename:line' lines.") parser.add_argument('--no-obsolete', action='store_true', dest='no_obsolete', default=False, help="Remove obsolete message strings.") parser.add_argument('--keep-pot', action='store_true', dest='keep_pot', default=False, help="Keep .pot file after making messages. Useful when debugging.") def handle(self, *args, **options): locale = options.get('locale') exclude = options.get('exclude') self.domain = options.get('domain') self.verbosity = options.get('verbosity') process_all = options.get('all') extensions = options.get('extensions') self.symlinks = options.get('symlinks') # Need to ensure that the i18n framework is enabled if settings.configured: settings.USE_I18N = True else: settings.configure(USE_I18N=True) ignore_patterns = options.get('ignore_patterns') if options.get('use_default_ignore_patterns'): ignore_patterns += ['CVS', '.*', '*~', '*.pyc'] self.ignore_patterns = list(set(ignore_patterns)) # Avoid messing with mutable class variables if options.get('no_wrap'): self.msgmerge_options = self.msgmerge_options[:] + ['--no-wrap'] self.msguniq_options = self.msguniq_options[:] + ['--no-wrap'] self.msgattrib_options = self.msgattrib_options[:] + ['--no-wrap'] self.xgettext_options = self.xgettext_options[:] + ['--no-wrap'] if options.get('no_location'): self.msgmerge_options = self.msgmerge_options[:] + ['--no-location'] self.msguniq_options = self.msguniq_options[:] + ['--no-location'] self.msgattrib_options = self.msgattrib_options[:] + ['--no-location'] self.xgettext_options = self.xgettext_options[:] + ['--no-location'] self.no_obsolete = options.get('no_obsolete') self.keep_pot = options.get('keep_pot') if self.domain not in ('django', 'djangojs'): raise CommandError("currently makemessages only supports domains " "'django' and 'djangojs'") if self.domain == 'djangojs': exts = extensions if extensions else ['js'] else: exts = extensions if extensions else ['html', 'txt', 'py'] self.extensions = handle_extensions(exts) if (locale is None and not exclude and not process_all) or self.domain is None: raise CommandError("Type '%s help %s' for usage information." % ( os.path.basename(sys.argv[0]), sys.argv[1])) if self.verbosity > 1: self.stdout.write('examining files with the extensions: %s\n' % get_text_list(list(self.extensions), 'and')) self.invoked_for_django = False self.locale_paths = [] self.default_locale_path = None if os.path.isdir(os.path.join('conf', 'locale')): self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))] self.default_locale_path = self.locale_paths[0] self.invoked_for_django = True else: self.locale_paths.extend(settings.LOCALE_PATHS) # Allow to run makemessages inside an app dir if os.path.isdir('locale'): self.locale_paths.append(os.path.abspath('locale')) if self.locale_paths: self.default_locale_path = self.locale_paths[0] if not os.path.exists(self.default_locale_path): os.makedirs(self.default_locale_path) # Build locale list locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path)) all_locales = map(os.path.basename, locale_dirs) # Account for excluded locales if process_all: locales = all_locales else: locales = locale or all_locales locales = set(locales) - set(exclude) if locales: check_programs('msguniq', 'msgmerge', 'msgattrib') check_programs('xgettext') try: potfiles = self.build_potfiles() # Build po files for each selected locale for locale in locales: if self.verbosity > 0: self.stdout.write("processing locale %s\n" % locale) for potfile in potfiles: self.write_po_file(potfile, locale) finally: if not self.keep_pot: self.remove_potfiles() @cached_property def gettext_version(self): # Gettext tools will output system-encoded bytestrings instead of UTF-8, # when looking up the version. It's especially a problem on Windows. out, err, status = gettext_popen_wrapper( ['xgettext', '--version'], stdout_encoding=DEFAULT_LOCALE_ENCODING, ) m = re.search(r'(\d+)\.(\d+)\.?(\d+)?', out) if m: return tuple(int(d) for d in m.groups() if d is not None) else: raise CommandError("Unable to get gettext version. Is it installed?") def build_potfiles(self): """ Build pot files and apply msguniq to them. """ file_list = self.find_files(".") self.remove_potfiles() self.process_files(file_list) potfiles = [] for path in self.locale_paths: potfile = os.path.join(path, '%s.pot' % str(self.domain)) if not os.path.exists(potfile): continue args = ['msguniq'] + self.msguniq_options + [potfile] msgs, errors, status = gettext_popen_wrapper(args) if errors: if status != STATUS_OK: raise CommandError( "errors happened while running msguniq\n%s" % errors) elif self.verbosity > 0: self.stdout.write(errors) with io.open(potfile, 'w', encoding='utf-8') as fp: fp.write(msgs) potfiles.append(potfile) return potfiles def remove_potfiles(self): for path in self.locale_paths: pot_path = os.path.join(path, '%s.pot' % str(self.domain)) if os.path.exists(pot_path): os.unlink(pot_path) def find_files(self, root): """ Helper method to get all files in the given root. Also check that there is a matching locale dir for each file. """ def is_ignored(path, ignore_patterns): """ Check if the given path should be ignored or not. """ filename = os.path.basename(path) ignore = lambda pattern: (fnmatch.fnmatchcase(filename, pattern) or fnmatch.fnmatchcase(path, pattern)) return any(ignore(pattern) for pattern in ignore_patterns) ignore_patterns = [os.path.normcase(p) for p in self.ignore_patterns] dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}} norm_patterns = [] for p in ignore_patterns: for dir_suffix in dir_suffixes: if p.endswith(dir_suffix): norm_patterns.append(p[:-len(dir_suffix)]) break else: norm_patterns.append(p) all_files = [] ignored_roots = [os.path.normpath(p) for p in (settings.MEDIA_ROOT, settings.STATIC_ROOT) if p] for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks): for dirname in dirnames[:]: if (is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns) or os.path.join(os.path.abspath(dirpath), dirname) in ignored_roots): dirnames.remove(dirname) if self.verbosity > 1: self.stdout.write('ignoring directory %s\n' % dirname) elif dirname == 'locale': dirnames.remove(dirname) self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname)) for filename in filenames: file_path = os.path.normpath(os.path.join(dirpath, filename)) file_ext = os.path.splitext(filename)[1] if file_ext not in self.extensions or is_ignored(file_path, self.ignore_patterns): if self.verbosity > 1: self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath)) else: locale_dir = None for path in self.locale_paths: if os.path.abspath(dirpath).startswith(os.path.dirname(path)): locale_dir = path break if not locale_dir: locale_dir = self.default_locale_path if not locale_dir: locale_dir = NO_LOCALE_DIR all_files.append(self.translatable_file_class(dirpath, filename, locale_dir)) return sorted(all_files) def process_files(self, file_list): """ Group translatable files by locale directory and run pot file build process for each group. """ file_groups = {} for translatable in file_list: file_group = file_groups.setdefault(translatable.locale_dir, []) file_group.append(translatable) for locale_dir, files in file_groups.items(): self.process_locale_dir(locale_dir, files) def process_locale_dir(self, locale_dir, files): """ Extract translatable literals from the specified files, creating or updating the POT file for a given locale directory. Uses the xgettext GNU gettext utility. """ build_files = [] for translatable in files: if self.verbosity > 1: self.stdout.write('processing file %s in %s\n' % ( translatable.file, translatable.dirpath )) if self.domain not in ('djangojs', 'django'): continue build_file = self.build_file_class(self, self.domain, translatable) try: build_file.preprocess() except UnicodeDecodeError as e: self.stdout.write( 'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % ( translatable.file, translatable.dirpath, e, ) ) continue build_files.append(build_file) if self.domain == 'djangojs': is_templatized = build_file.is_templatized args = [ 'xgettext', '-d', self.domain, '--language=%s' % ('C' if is_templatized else 'JavaScript',), '--keyword=gettext_noop', '--keyword=gettext_lazy', '--keyword=ngettext_lazy:1,2', '--keyword=pgettext:1c,2', '--keyword=npgettext:1c,2,3', '--output=-', ] elif self.domain == 'django': args = [ 'xgettext', '-d', self.domain, '--language=Python', '--keyword=gettext_noop', '--keyword=gettext_lazy', '--keyword=ngettext_lazy:1,2', '--keyword=ugettext_noop', '--keyword=ugettext_lazy', '--keyword=ungettext_lazy:1,2', '--keyword=pgettext:1c,2', '--keyword=npgettext:1c,2,3', '--keyword=pgettext_lazy:1c,2', '--keyword=npgettext_lazy:1c,2,3', '--output=-', ] else: return input_files = [bf.work_path for bf in build_files] with NamedTemporaryFile(mode='w+') as input_files_list: input_files_list.write(force_str('\n'.join(input_files), encoding=DEFAULT_LOCALE_ENCODING)) input_files_list.flush() args.extend(['--files-from', input_files_list.name]) args.extend(self.xgettext_options) msgs, errors, status = gettext_popen_wrapper(args) if errors: if status != STATUS_OK: for build_file in build_files: build_file.cleanup() raise CommandError( 'errors happened while running xgettext on %s\n%s' % ('\n'.join(input_files), errors) ) elif self.verbosity > 0: # Print warnings self.stdout.write(errors) if msgs: if locale_dir is NO_LOCALE_DIR: file_path = os.path.normpath(build_files[0].path) raise CommandError( 'Unable to find a locale path to store translations for ' 'file %s' % file_path ) for build_file in build_files: msgs = build_file.postprocess_messages(msgs) potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain)) write_pot_file(potfile, msgs) for build_file in build_files: build_file.cleanup() def write_po_file(self, potfile, locale): """ Creates or updates the PO file for self.domain and :param locale:. Uses contents of the existing :param potfile:. Uses msgmerge, and msgattrib GNU gettext utilities. """ basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES') if not os.path.isdir(basedir): os.makedirs(basedir) pofile = os.path.join(basedir, '%s.po' % str(self.domain)) if os.path.exists(pofile): args = ['msgmerge'] + self.msgmerge_options + [pofile, potfile] msgs, errors, status = gettext_popen_wrapper(args) if errors: if status != STATUS_OK: raise CommandError( "errors happened while running msgmerge\n%s" % errors) elif self.verbosity > 0: self.stdout.write(errors) else: with io.open(potfile, 'r', encoding='utf-8') as fp: msgs = fp.read() if not self.invoked_for_django: msgs = self.copy_plural_forms(msgs, locale) msgs = msgs.replace( "#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "") with io.open(pofile, 'w', encoding='utf-8') as fp: fp.write(msgs) if self.no_obsolete: args = ['msgattrib'] + self.msgattrib_options + ['-o', pofile, pofile] msgs, errors, status = gettext_popen_wrapper(args) if errors: if status != STATUS_OK: raise CommandError( "errors happened while running msgattrib\n%s" % errors) elif self.verbosity > 0: self.stdout.write(errors) def copy_plural_forms(self, msgs, locale): """ Copies plural forms header contents from a Django catalog of locale to the msgs string, inserting it at the right place. msgs should be the contents of a newly created .po file. """ django_dir = os.path.normpath(os.path.join(os.path.dirname(upath(django.__file__)))) if self.domain == 'djangojs': domains = ('djangojs', 'django') else: domains = ('django',) for domain in domains: django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain) if os.path.exists(django_po): with io.open(django_po, 'r', encoding='utf-8') as fp: m = plural_forms_re.search(fp.read()) if m: plural_form_line = force_str(m.group('value')) if self.verbosity > 1: self.stdout.write("copying plural forms: %s\n" % plural_form_line) lines = [] found = False for line in msgs.split('\n'): if not found and (not line or plural_forms_re.search(line)): line = '%s\n' % plural_form_line found = True lines.append(line) msgs = '\n'.join(lines) break return msgs
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.internal.MemoryRecords; import org.apache.kafka.common.record.internal.Records; import java.util.ArrayList; import java.util.Collections; import java.util.EnumMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; /** * Possible error codes. * - {@link Errors#GROUP_AUTHORIZATION_FAILED} * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} * - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} * - {@link Errors#NOT_LEADER_OR_FOLLOWER} * - {@link Errors#UNKNOWN_TOPIC_ID} * - {@link Errors#INVALID_RECORD_STATE} * - {@link Errors#KAFKA_STORAGE_ERROR} * - {@link Errors#CORRUPT_MESSAGE} * - {@link Errors#INVALID_REQUEST} * - {@link Errors#UNKNOWN_SERVER_ERROR} */ public class ShareFetchResponse extends AbstractResponse { private final ShareFetchResponseData data; private ShareFetchResponse(ShareFetchResponseData data) { super(ApiKeys.SHARE_FETCH); this.data = data; } public Errors error() { return Errors.forCode(data.errorCode()); } @Override public ShareFetchResponseData data() { return data; } @Override public Map<Errors, Integer> errorCounts() { Map<Errors, Integer> counts = new EnumMap<>(Errors.class); updateErrorCounts(counts, Errors.forCode(data.errorCode())); data.responses().forEach( topic -> topic.partitions().forEach( partition -> updateErrorCounts(counts, Errors.forCode(partition.errorCode())) ) ); return counts; } public LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> responseData(Map<Uuid, String> topicNames) { final LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> responseData = new LinkedHashMap<>(); data.responses().forEach(topicResponse -> { String name = topicNames.get(topicResponse.topicId()); if (name != null) { topicResponse.partitions().forEach(partitionData -> responseData.put(new TopicIdPartition(topicResponse.topicId(), new TopicPartition(name, partitionData.partitionIndex())), partitionData)); } }); return responseData; } @Override public int throttleTimeMs() { return data.throttleTimeMs(); } @Override public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } /** * Creates a {@link org.apache.kafka.common.requests.ShareFetchResponse} from the given byte buffer. * Unlike {@link org.apache.kafka.common.requests.ShareFetchResponse#of(Errors, int, LinkedHashMap, List, int)}, * this method doesn't convert null records to {@link org.apache.kafka.common.record.internal.MemoryRecords#EMPTY}. * * <p><strong>This method should only be used in client-side.</strong></p> */ public static ShareFetchResponse parse(Readable readable, short version) { return new ShareFetchResponse( new ShareFetchResponseData(readable, version) ); } /** * Returns `partition.records` as `Records` (instead of `BaseRecords`). If `records` is `null`, returns `MemoryRecords.EMPTY`. * * @param partition partition data * @return Records or empty record if the records in PartitionData is null. */ public static Records recordsOrFail(ShareFetchResponseData.PartitionData partition) { if (partition.records() == null) return MemoryRecords.EMPTY; if (partition.records() instanceof Records) return (Records) partition.records(); throw new ClassCastException("The record type is " + partition.records().getClass().getSimpleName() + ", which is not a subtype of " + Records.class.getSimpleName() + ". This method is only safe to call if the `ShareFetchResponse` was deserialized from bytes."); } /** * Convenience method to find the size of a response. * * @param version The version of the request * @param partIterator The partition iterator. * @return The response size in bytes. */ public static int sizeOf(short version, Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> partIterator) { // Since the throttleTimeMs and metadata field sizes are constant and fixed, we can // use arbitrary values here without affecting the result. ShareFetchResponseData data = toMessage(Errors.NONE, 0, partIterator, Collections.emptyList(), 0); ObjectSerializationCache cache = new ObjectSerializationCache(); return 4 + data.size(cache, version); } /** * @return The size in bytes of the records. 0 is returned if records of input partition is null. */ public static int recordsSize(ShareFetchResponseData.PartitionData partition) { return partition.records() == null ? 0 : partition.records().sizeInBytes(); } /** * Creates a {@link org.apache.kafka.common.requests.ShareFetchResponse} from the given data. * This method converts null records to {@link org.apache.kafka.common.record.internal.MemoryRecords#EMPTY} * to ensure consistent record representation in the response. * * <p><strong>This method should only be used in server-side.</strong></p> */ public static ShareFetchResponse of(Errors error, int throttleTimeMs, LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> responseData, List<Node> nodeEndpoints, int acquisitionLockTimeout) { return new ShareFetchResponse(toMessage(error, throttleTimeMs, responseData.entrySet().iterator(), nodeEndpoints, acquisitionLockTimeout)); } private static ShareFetchResponseData toMessage(Errors error, int throttleTimeMs, Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> partIterator, List<Node> nodeEndpoints, int acquisitionLockTimeout) { ShareFetchResponseData.ShareFetchableTopicResponseCollection topicResponses = new ShareFetchResponseData.ShareFetchableTopicResponseCollection(); while (partIterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> entry = partIterator.next(); ShareFetchResponseData.PartitionData partitionData = entry.getValue(); // Since PartitionData alone doesn't know the partition ID, we set it here partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); // To protect the clients from failing due to null records, // we always convert null records to MemoryRecords.EMPTY // We will propose a KIP to change the schema definitions in the future if (partitionData.records() == null) partitionData.setRecords(MemoryRecords.EMPTY); // Checking if the topic is already present in the map ShareFetchResponseData.ShareFetchableTopicResponse topicResponse = topicResponses.find(entry.getKey().topicId()); if (topicResponse == null) { topicResponse = new ShareFetchResponseData.ShareFetchableTopicResponse() .setTopicId(entry.getKey().topicId()) .setPartitions(new ArrayList<>()); topicResponses.add(topicResponse); } topicResponse.partitions().add(partitionData); } ShareFetchResponseData data = new ShareFetchResponseData(); // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list nodeEndpoints.forEach(endpoint -> data.nodeEndpoints().add( new ShareFetchResponseData.NodeEndpoint() .setNodeId(endpoint.id()) .setHost(endpoint.host()) .setPort(endpoint.port()) .setRack(endpoint.rack()))); return data.setThrottleTimeMs(throttleTimeMs) .setErrorCode(error.code()) .setAcquisitionLockTimeoutMs(acquisitionLockTimeout) .setResponses(topicResponses); } public static ShareFetchResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) { return partitionResponse(topicIdPartition.topicPartition().partition(), error); } private static ShareFetchResponseData.PartitionData partitionResponse(int partition, Errors error) { return new ShareFetchResponseData.PartitionData() .setPartitionIndex(partition) .setErrorCode(error.code()) .setRecords(MemoryRecords.EMPTY); } }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example lists all creative groups. Tags: creativeGroups.list """ __author__ = ('api.jimper@gmail.com (Jonathon Imperiosi)') import argparse import sys from apiclient import sample_tools from oauth2client import client # Declare command-line flags. argparser = argparse.ArgumentParser(add_help=False) argparser.add_argument( 'profile_id', type=int, help='The ID of the profile to get creative groups for') def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'dfareporting', 'v2.0', __doc__, __file__, parents=[argparser], scope=['https://www.googleapis.com/auth/dfareporting', 'https://www.googleapis.com/auth/dfatrafficking']) profile_id = flags.profile_id try: # Construct the request. request = service.creativeGroups().list(profileId=profile_id) while True: # Execute request and print response. response = request.execute() for group in response['creativeGroups']: print ('Found creative group with ID %s and name "%s".' % (group['id'], group['name'])) if response['creativeGroups'] and response['nextPageToken']: request = service.advertisers().list_next(request, response) else: break except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize') if __name__ == '__main__': main(sys.argv)
unknown
codeparrot/codeparrot-clean
from django.test import TestCase import datetime from decimal import * from transactions.models import Portfolio, Transaction from securities.models import Security, Price, SecuritySplit from django.utils import timezone # Create your tests here. class TransactionTests(TestCase): def setUp(self): pass def tearDown(self): pass def test_transaction_total_with_odd_values(self): """Adding Transaction should lead to correct total""" time = timezone.now().date() sec = Security.objects.create(name='TestSec', aliases='Test Alias', isin_id='DETest', yahoo_id='ABC', type='Stock') pf = Portfolio.objects.create(name='Test') t = Transaction() transaction = t.add('b', pf, sec, time, 100, 100, 100) self.assertEqual(transaction.total, Decimal(-10100)) def test_adding_transaction_with_future_date(self): """Adding Transaction with future date should return None""" time = timezone.now().date() + datetime.timedelta(days=30) sec = Security.objects.create(name='TestSec', aliases='Test Alias', isin_id='DETest', yahoo_id='ABC', type='Stock') pf = Portfolio.objects.create(name='Test') t = Transaction() self.assertRaises(NameError, t.add, 'b', pf, sec, time, 100, 100, 100) def a_test_portfolio_overview(self): sec = Security.objects.create(name='TestSec', aliases='Test Alias', isin_id='DETest', yahoo_id='ABC', type='Stock') pf = Portfolio.objects.create(name='Test') t = Transaction() time = timezone.now().date() p = Price() p.add(sec, time, Decimal(111)) t.add('b', pf, sec, time, 100, 100, 100) p = t.list_pf(pf.name, time) self.assertEqual(p, [{ 'nominal': Decimal('100'), 'invest': Decimal('-10100'), 'profit': Decimal('1000'), 'name': 'TestSec', 'value': Decimal('11100'), 'price': Decimal('111'), 'cost': Decimal('100')}, { 'name': 'Total', 'value': Decimal('11100')}]) def test_stock_splits_quantity(self): pf = Portfolio.objects.create(name='Test') sec = Security() price = Price() mysec = sec.add('Test', ['SomeAliasString'], '', 'APC.DE', 'Stock') split = SecuritySplit() time_price_1 = timezone.now() - datetime.timedelta(days=20) time_split = timezone.now() - datetime.timedelta(days=10) time_price_2 = timezone.now() - datetime.timedelta(days=5) price.add(mysec, time_price_1, 100) price.add(mysec, time_price_2, 15) split.add(mysec, time_split, 7) t = Transaction() t.add('b', pf, mysec, time_price_1, 100, 100, 100) t.add('b', pf, mysec, time_split, 90, 100, 100) t.add('b', pf, mysec, time_price_2, 80, 100, 100) result = t.get_total_for_portfolio(pf.name, time_price_1) # print(result[mysec]['nominal']) self.assertEqual(result[mysec]['nominal'], Decimal('700')) # import pdb; pdb.set_trace() result = t.get_total_for_portfolio(pf.name, time_split) # print(result[mysec]['nominal']) self.assertEqual(result[mysec]['nominal'], Decimal('790')) result = t.get_total_for_portfolio(pf.name, time_price_2) # print(result[mysec]['nominal']) self.assertEqual(result[mysec]['nominal'], Decimal('870')) def test_list_pf(self): pf = Portfolio.objects.create(name='Test') sec = Security() price = Price() mysec1 = sec.add('Test', ['SomeAliasString'], '', 'APC.DE', 'Stock') mysec2 = sec.add('Test2', ['SomeAliasString'], '', 'APC.DE', 'Stock') time_price_1 = timezone.now() - datetime.timedelta(days=20) time_price_2 = timezone.now() - datetime.timedelta(days=5) time_price_3 = timezone.now() - datetime.timedelta(days=2) time_price_4 = timezone.now() - datetime.timedelta(days=1) price.add(mysec1, time_price_1, 100) price.add(mysec1, time_price_2, 15) price.add(mysec1, time_price_3, 15) price.add(mysec1, time_price_4, 10) price.add(mysec2, time_price_1, 40) price.add(mysec2, time_price_2, 50) price.add(mysec2, time_price_3, 60) price.add(mysec2, time_price_4, 70) t = Transaction() # def add(self, transaction_type, portfolio, stock_id, date, nominal, price, cost): t.add('b', pf, mysec1, time_price_1, 10, 100, 0) t.add('b', pf, mysec1, time_price_2, 10, 50, 0) t.add('s', pf, mysec1, time_price_3, 10, 15, 0) t.add('b', pf, mysec2, time_price_1, 10, 40, 0) result = t.list_pf(pf.name, time_price_1, time_price_4) # print() # for item in result: # print(item['name'], item['profit']) # print() self.assertEqual(result[0]['profit'], Decimal('-1250')) self.assertEqual(result[1]['profit'], Decimal('300')) self.assertEqual(result[2]['profit'], Decimal('-950'))
unknown
codeparrot/codeparrot-clean
"""Simple registration request and response parsing and object representation This module contains objects representing simple registration requests and responses that can be used with both OpenID relying parties and OpenID providers. 1. The relying party creates a request object and adds it to the C{L{AuthRequest<openid.consumer.consumer.AuthRequest>}} object before making the C{checkid_} request to the OpenID provider:: auth_request.addExtension(SRegRequest(required=['email'])) 2. The OpenID provider extracts the simple registration request from the OpenID request using C{L{SRegRequest.fromOpenIDRequest}}, gets the user's approval and data, creates a C{L{SRegResponse}} object and adds it to the C{id_res} response:: sreg_req = SRegRequest.fromOpenIDRequest(checkid_request) # [ get the user's approval and data, informing the user that # the fields in sreg_response were requested ] sreg_resp = SRegResponse.extractResponse(sreg_req, user_data) sreg_resp.toMessage(openid_response.fields) 3. The relying party uses C{L{SRegResponse.fromSuccessResponse}} to extract the data from the OpenID response:: sreg_resp = SRegResponse.fromSuccessResponse(success_response) @since: 2.0 @var sreg_data_fields: The names of the data fields that are listed in the sreg spec, and a description of them in English @var sreg_uri: The preferred URI to use for the simple registration namespace and XRD Type value """ from openid.message import registerNamespaceAlias, \ NamespaceAliasRegistrationError from openid.extension import Extension from openid import oidutil try: basestring #pylint:disable-msg=W0104 except NameError: # For Python 2.2 basestring = (str, unicode) #pylint:disable-msg=W0622 __all__ = [ 'SRegRequest', 'SRegResponse', 'data_fields', 'ns_uri', 'ns_uri_1_0', 'ns_uri_1_1', 'supportsSReg', ] # The data fields that are listed in the sreg spec data_fields = { 'fullname':'Full Name', 'nickname':'Nickname', 'dob':'Date of Birth', 'email':'E-mail Address', 'gender':'Gender', 'postcode':'Postal Code', 'country':'Country', 'language':'Language', 'timezone':'Time Zone', } def checkFieldName(field_name): """Check to see that the given value is a valid simple registration data field name. @raise ValueError: if the field name is not a valid simple registration data field name """ if field_name not in data_fields: raise ValueError('%r is not a defined simple registration field' % (field_name,)) # URI used in the wild for Yadis documents advertising simple # registration support ns_uri_1_0 = 'http://openid.net/sreg/1.0' # URI in the draft specification for simple registration 1.1 # <http://openid.net/specs/openid-simple-registration-extension-1_1-01.html> ns_uri_1_1 = 'http://openid.net/extensions/sreg/1.1' # This attribute will always hold the preferred URI to use when adding # sreg support to an XRDS file or in an OpenID namespace declaration. ns_uri = ns_uri_1_1 try: registerNamespaceAlias(ns_uri_1_1, 'sreg') except NamespaceAliasRegistrationError, e: oidutil.log('registerNamespaceAlias(%r, %r) failed: %s' % (ns_uri_1_1, 'sreg', str(e),)) def supportsSReg(endpoint): """Does the given endpoint advertise support for simple registration? @param endpoint: The endpoint object as returned by OpenID discovery @type endpoint: openid.consumer.discover.OpenIDEndpoint @returns: Whether an sreg type was advertised by the endpoint @rtype: bool """ return (endpoint.usesExtension(ns_uri_1_1) or endpoint.usesExtension(ns_uri_1_0)) class SRegNamespaceError(ValueError): """The simple registration namespace was not found and could not be created using the expected name (there's another extension using the name 'sreg') This is not I{illegal}, for OpenID 2, although it probably indicates a problem, since it's not expected that other extensions will re-use the alias that is in use for OpenID 1. If this is an OpenID 1 request, then there is no recourse. This should not happen unless some code has modified the namespaces for the message that is being processed. """ def getSRegNS(message): """Extract the simple registration namespace URI from the given OpenID message. Handles OpenID 1 and 2, as well as both sreg namespace URIs found in the wild, as well as missing namespace definitions (for OpenID 1) @param message: The OpenID message from which to parse simple registration fields. This may be a request or response message. @type message: C{L{openid.message.Message}} @returns: the sreg namespace URI for the supplied message. The message may be modified to define a simple registration namespace. @rtype: C{str} @raise ValueError: when using OpenID 1 if the message defines the 'sreg' alias to be something other than a simple registration type. """ # See if there exists an alias for one of the two defined simple # registration types. for sreg_ns_uri in [ns_uri_1_1, ns_uri_1_0]: alias = message.namespaces.getAlias(sreg_ns_uri) if alias is not None: break else: # There is no alias for either of the types, so try to add # one. We default to using the modern value (1.1) sreg_ns_uri = ns_uri_1_1 try: message.namespaces.addAlias(ns_uri_1_1, 'sreg') except KeyError, why: # An alias for the string 'sreg' already exists, but it's # defined for something other than simple registration raise SRegNamespaceError(why[0]) # we know that sreg_ns_uri defined, because it's defined in the # else clause of the loop as well, so disable the warning return sreg_ns_uri #pylint:disable-msg=W0631 class SRegRequest(Extension): """An object to hold the state of a simple registration request. @ivar required: A list of the required fields in this simple registration request @type required: [str] @ivar optional: A list of the optional fields in this simple registration request @type optional: [str] @ivar policy_url: The policy URL that was provided with the request @type policy_url: str or NoneType @group Consumer: requestField, requestFields, getExtensionArgs, addToOpenIDRequest @group Server: fromOpenIDRequest, parseExtensionArgs """ ns_alias = 'sreg' def __init__(self, required=None, optional=None, policy_url=None, sreg_ns_uri=ns_uri): """Initialize an empty simple registration request""" Extension.__init__(self) self.required = [] self.optional = [] self.policy_url = policy_url self.ns_uri = sreg_ns_uri if required: self.requestFields(required, required=True, strict=True) if optional: self.requestFields(optional, required=False, strict=True) # Assign getSRegNS to a static method so that it can be # overridden for testing. _getSRegNS = staticmethod(getSRegNS) def fromOpenIDRequest(cls, request): """Create a simple registration request that contains the fields that were requested in the OpenID request with the given arguments @param request: The OpenID request @type request: openid.server.CheckIDRequest @returns: The newly created simple registration request @rtype: C{L{SRegRequest}} """ self = cls() # Since we're going to mess with namespace URI mapping, don't # mutate the object that was passed in. message = request.message.copy() self.ns_uri = self._getSRegNS(message) args = message.getArgs(self.ns_uri) self.parseExtensionArgs(args) return self fromOpenIDRequest = classmethod(fromOpenIDRequest) def parseExtensionArgs(self, args, strict=False): """Parse the unqualified simple registration request parameters and add them to this object. This method is essentially the inverse of C{L{getExtensionArgs}}. This method restores the serialized simple registration request fields. If you are extracting arguments from a standard OpenID checkid_* request, you probably want to use C{L{fromOpenIDRequest}}, which will extract the sreg namespace and arguments from the OpenID request. This method is intended for cases where the OpenID server needs more control over how the arguments are parsed than that method provides. >>> args = message.getArgs(ns_uri) >>> request.parseExtensionArgs(args) @param args: The unqualified simple registration arguments @type args: {str:str} @param strict: Whether requests with fields that are not defined in the simple registration specification should be tolerated (and ignored) @type strict: bool @returns: None; updates this object """ for list_name in ['required', 'optional']: required = (list_name == 'required') items = args.get(list_name) if items: for field_name in items.split(','): try: self.requestField(field_name, required, strict) except ValueError: if strict: raise self.policy_url = args.get('policy_url') def allRequestedFields(self): """A list of all of the simple registration fields that were requested, whether they were required or optional. @rtype: [str] """ return self.required + self.optional def wereFieldsRequested(self): """Have any simple registration fields been requested? @rtype: bool """ return bool(self.allRequestedFields()) def __contains__(self, field_name): """Was this field in the request?""" return (field_name in self.required or field_name in self.optional) def requestField(self, field_name, required=False, strict=False): """Request the specified field from the OpenID user @param field_name: the unqualified simple registration field name @type field_name: str @param required: whether the given field should be presented to the user as being a required to successfully complete the request @param strict: whether to raise an exception when a field is added to a request more than once @raise ValueError: when the field requested is not a simple registration field or strict is set and the field was requested more than once """ checkFieldName(field_name) if strict: if field_name in self.required or field_name in self.optional: raise ValueError('That field has already been requested') else: if field_name in self.required: return if field_name in self.optional: if required: self.optional.remove(field_name) else: return if required: self.required.append(field_name) else: self.optional.append(field_name) def requestFields(self, field_names, required=False, strict=False): """Add the given list of fields to the request @param field_names: The simple registration data fields to request @type field_names: [str] @param required: Whether these values should be presented to the user as required @param strict: whether to raise an exception when a field is added to a request more than once @raise ValueError: when a field requested is not a simple registration field or strict is set and a field was requested more than once """ if isinstance(field_names, basestring): raise TypeError('Fields should be passed as a list of ' 'strings (not %r)' % (type(field_names),)) for field_name in field_names: self.requestField(field_name, required, strict=strict) def getExtensionArgs(self): """Get a dictionary of unqualified simple registration arguments representing this request. This method is essentially the inverse of C{L{parseExtensionArgs}}. This method serializes the simple registration request fields. @rtype: {str:str} """ args = {} if self.required: args['required'] = ','.join(self.required) if self.optional: args['optional'] = ','.join(self.optional) if self.policy_url: args['policy_url'] = self.policy_url return args class SRegResponse(Extension): """Represents the data returned in a simple registration response inside of an OpenID C{id_res} response. This object will be created by the OpenID server, added to the C{id_res} response object, and then extracted from the C{id_res} message by the Consumer. @ivar data: The simple registration data, keyed by the unqualified simple registration name of the field (i.e. nickname is keyed by C{'nickname'}) @ivar ns_uri: The URI under which the simple registration data was stored in the response message. @group Server: extractResponse @group Consumer: fromSuccessResponse @group Read-only dictionary interface: keys, iterkeys, items, iteritems, __iter__, get, __getitem__, keys, has_key """ ns_alias = 'sreg' def __init__(self, data=None, sreg_ns_uri=ns_uri): Extension.__init__(self) if data is None: self.data = {} else: self.data = data self.ns_uri = sreg_ns_uri def extractResponse(cls, request, data): """Take a C{L{SRegRequest}} and a dictionary of simple registration values and create a C{L{SRegResponse}} object containing that data. @param request: The simple registration request object @type request: SRegRequest @param data: The simple registration data for this response, as a dictionary from unqualified simple registration field name to string (unicode) value. For instance, the nickname should be stored under the key 'nickname'. @type data: {str:str} @returns: a simple registration response object @rtype: SRegResponse """ self = cls() self.ns_uri = request.ns_uri for field in request.allRequestedFields(): value = data.get(field) if value is not None: self.data[field] = value return self extractResponse = classmethod(extractResponse) # Assign getSRegArgs to a static method so that it can be # overridden for testing _getSRegNS = staticmethod(getSRegNS) def fromSuccessResponse(cls, success_response, signed_only=True): """Create a C{L{SRegResponse}} object from a successful OpenID library response (C{L{openid.consumer.consumer.SuccessResponse}}) response message @param success_response: A SuccessResponse from consumer.complete() @type success_response: C{L{openid.consumer.consumer.SuccessResponse}} @param signed_only: Whether to process only data that was signed in the id_res message from the server. @type signed_only: bool @rtype: SRegResponse @returns: A simple registration response containing the data that was supplied with the C{id_res} response. """ self = cls() self.ns_uri = self._getSRegNS(success_response.message) if signed_only: args = success_response.getSignedNS(self.ns_uri) else: args = success_response.message.getArgs(self.ns_uri) if not args: return None for field_name in data_fields: if field_name in args: self.data[field_name] = args[field_name] return self fromSuccessResponse = classmethod(fromSuccessResponse) def getExtensionArgs(self): """Get the fields to put in the simple registration namespace when adding them to an id_res message. @see: openid.extension """ return self.data # Read-only dictionary interface def get(self, field_name, default=None): """Like dict.get, except that it checks that the field name is defined by the simple registration specification""" checkFieldName(field_name) return self.data.get(field_name, default) def items(self): """All of the data values in this simple registration response """ return self.data.items() def iteritems(self): return self.data.iteritems() def keys(self): return self.data.keys() def iterkeys(self): return self.data.iterkeys() def has_key(self, key): return key in self def __contains__(self, field_name): checkFieldName(field_name) return field_name in self.data def __iter__(self): return iter(self.data) def __getitem__(self, field_name): checkFieldName(field_name) return self.data[field_name] def __nonzero__(self): return bool(self.data)
unknown
codeparrot/codeparrot-clean
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package upgrade import ( "crypto/sha256" "crypto/x509" "fmt" "math/big" "os" "path/filepath" "strings" "testing" "time" "go.etcd.io/etcd/client/pkg/v3/transport" v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/clientcmd" certutil "k8s.io/client-go/util/cert" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta4" "k8s.io/kubernetes/cmd/kubeadm/app/constants" certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/renewal" controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane" etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd" kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" certstestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" "k8s.io/kubernetes/cmd/kubeadm/app/util/errors" etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" pkiutiltesting "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil/testing" testutil "k8s.io/kubernetes/cmd/kubeadm/test" ) const ( waitForHashes = "wait-for-hashes" waitForHashChange = "wait-for-hash-change" waitForPodsWithLabel = "wait-for-pods-with-label" ) var testConfiguration = fmt.Sprintf(` apiVersion: %s kind: InitConfiguration nodeRegistration: name: foo localAPIEndpoint: advertiseAddress: 192.168.2.2 bindPort: 6443 bootstrapTokens: - token: ce3aa5.5ec8455bb76b379f ttl: 24h --- apiVersion: %[1]s kind: ClusterConfiguration apiServer: certSANs: null extraArgs: null certificatesDir: %%s etcd: local: dataDir: %%s image: "" imageRepository: registry.k8s.io kubernetesVersion: %%s networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 `, kubeadmapiv1.SchemeGroupVersion.String()) // fakeWaiter is a fake apiclient.Waiter that returns errors it was initialized with type fakeWaiter struct { errsToReturn map[string]error } func NewFakeStaticPodWaiter(errsToReturn map[string]error) apiclient.Waiter { return &fakeWaiter{ errsToReturn: errsToReturn, } } // WaitForControlPlaneComponents just returns a dummy nil, to indicate that the program should just proceed func (w *fakeWaiter) WaitForControlPlaneComponents(podsMap map[string]*v1.Pod, apiServerAddress string) error { return nil } // WaitForPodsWithLabel just returns an error if set from errsToReturn func (w *fakeWaiter) WaitForPodsWithLabel(kvLabel string) error { return w.errsToReturn[waitForPodsWithLabel] } // SetTimeout is a no-op; we don't use it in this implementation func (w *fakeWaiter) SetTimeout(_ time.Duration) {} // WaitForStaticPodControlPlaneHashes returns an error if set from errsToReturn func (w *fakeWaiter) WaitForStaticPodControlPlaneHashes(_ string) (map[string]string, error) { return map[string]string{}, w.errsToReturn[waitForHashes] } // WaitForStaticPodSingleHash returns an error if set from errsToReturn func (w *fakeWaiter) WaitForStaticPodSingleHash(_ string, _ string) (string, error) { return "", w.errsToReturn[waitForHashes] } // WaitForStaticPodHashChange returns an error if set from errsToReturn func (w *fakeWaiter) WaitForStaticPodHashChange(_, _, _ string) error { return w.errsToReturn[waitForHashChange] } // WaitForHKubelet returns a dummy nil just to implement the interface func (w *fakeWaiter) WaitForKubelet(_ string, _ int32) error { return nil } type fakeStaticPodPathManager struct { kubernetesDir string patchesDir string realManifestDir string tempManifestDir string backupManifestDir string backupEtcdDir string MoveFileFunc func(string, string) error } func NewFakeStaticPodPathManager(moveFileFunc func(string, string) error) (StaticPodPathManager, error) { kubernetesDir, err := os.MkdirTemp("", "kubeadm-pathmanager-") if err != nil { return nil, errors.Wrapf(err, "couldn't create a temporary directory for the upgrade") } realManifestDir := filepath.Join(kubernetesDir, constants.ManifestsSubDirName) if err := os.Mkdir(realManifestDir, 0700); err != nil { return nil, errors.Wrapf(err, "couldn't create a realManifestDir for the upgrade") } upgradedManifestDir := filepath.Join(kubernetesDir, "upgraded-manifests") if err := os.Mkdir(upgradedManifestDir, 0700); err != nil { return nil, errors.Wrapf(err, "couldn't create a upgradedManifestDir for the upgrade") } backupManifestDir := filepath.Join(kubernetesDir, "backup-manifests") if err := os.Mkdir(backupManifestDir, 0700); err != nil { return nil, errors.Wrap(err, "couldn't create a backupManifestDir for the upgrade") } backupEtcdDir := filepath.Join(kubernetesDir, "kubeadm-backup-etcd") if err := os.Mkdir(backupEtcdDir, 0700); err != nil { return nil, err } return &fakeStaticPodPathManager{ kubernetesDir: kubernetesDir, realManifestDir: realManifestDir, tempManifestDir: upgradedManifestDir, backupManifestDir: backupManifestDir, backupEtcdDir: backupEtcdDir, MoveFileFunc: moveFileFunc, }, nil } func (spm *fakeStaticPodPathManager) MoveFile(oldPath, newPath string) error { return spm.MoveFileFunc(oldPath, newPath) } func (spm *fakeStaticPodPathManager) KubernetesDir() string { return spm.kubernetesDir } func (spm *fakeStaticPodPathManager) PatchesDir() string { return spm.patchesDir } func (spm *fakeStaticPodPathManager) RealManifestPath(component string) string { return constants.GetStaticPodFilepath(component, spm.realManifestDir) } func (spm *fakeStaticPodPathManager) RealManifestDir() string { return spm.realManifestDir } func (spm *fakeStaticPodPathManager) TempManifestPath(component string) string { return constants.GetStaticPodFilepath(component, spm.tempManifestDir) } func (spm *fakeStaticPodPathManager) TempManifestDir() string { return spm.tempManifestDir } func (spm *fakeStaticPodPathManager) BackupManifestPath(component string) string { return constants.GetStaticPodFilepath(component, spm.backupManifestDir) } func (spm *fakeStaticPodPathManager) BackupManifestDir() string { return spm.backupManifestDir } func (spm *fakeStaticPodPathManager) BackupEtcdDir() string { return spm.backupEtcdDir } func (spm *fakeStaticPodPathManager) CleanupDirs() error { if err := os.RemoveAll(spm.TempManifestDir()); err != nil { return err } if err := os.RemoveAll(spm.BackupManifestDir()); err != nil { return err } return os.RemoveAll(spm.BackupEtcdDir()) } type fakeTLSEtcdClient struct{ TLS bool } func (c fakeTLSEtcdClient) WaitForClusterAvailable(retries int, retryInterval time.Duration) (bool, error) { return true, nil } func (c fakeTLSEtcdClient) CheckClusterHealth() error { return nil } func (c fakeTLSEtcdClient) Sync() error { return nil } func (c fakeTLSEtcdClient) ListMembers() ([]etcdutil.Member, error) { return []etcdutil.Member{}, nil } func (c fakeTLSEtcdClient) AddMemberAsLearner(name string, peerAddrs string) ([]etcdutil.Member, error) { return []etcdutil.Member{}, nil } func (c fakeTLSEtcdClient) AddMember(name string, peerAddrs string) ([]etcdutil.Member, error) { return []etcdutil.Member{}, nil } func (c fakeTLSEtcdClient) MemberPromote(learnerID uint64) error { return nil } func (c fakeTLSEtcdClient) GetMemberID(peerURL string) (uint64, error) { return 0, nil } func (c fakeTLSEtcdClient) RemoveMember(id uint64) ([]etcdutil.Member, error) { return []etcdutil.Member{}, nil } type fakePodManifestEtcdClient struct{ ManifestDir, CertificatesDir string } func (c fakePodManifestEtcdClient) WaitForClusterAvailable(retries int, retryInterval time.Duration) (bool, error) { return true, nil } func (c fakePodManifestEtcdClient) CheckClusterHealth() error { // Make sure the certificates generated from the upgrade are readable from disk tlsInfo := transport.TLSInfo{ CertFile: filepath.Join(c.CertificatesDir, constants.EtcdCACertName), KeyFile: filepath.Join(c.CertificatesDir, constants.EtcdHealthcheckClientCertName), TrustedCAFile: filepath.Join(c.CertificatesDir, constants.EtcdHealthcheckClientKeyName), } _, err := tlsInfo.ClientConfig() return err } func (c fakePodManifestEtcdClient) Sync() error { return nil } func (c fakePodManifestEtcdClient) ListMembers() ([]etcdutil.Member, error) { return []etcdutil.Member{}, nil } func (c fakePodManifestEtcdClient) AddMemberAsLearner(name string, peerAddrs string) ([]etcdutil.Member, error) { return []etcdutil.Member{}, nil } func (c fakePodManifestEtcdClient) AddMember(name string, peerAddrs string) ([]etcdutil.Member, error) { return []etcdutil.Member{}, nil } func (c fakePodManifestEtcdClient) MemberPromote(learnerID uint64) error { return nil } func (c fakePodManifestEtcdClient) GetMemberID(peerURL string) (uint64, error) { return 0, nil } func (c fakePodManifestEtcdClient) RemoveMember(id uint64) ([]etcdutil.Member, error) { return []etcdutil.Member{}, nil } func TestStaticPodControlPlane(t *testing.T) { tests := []struct { description string waitErrsToReturn map[string]error moveFileFunc func(string, string) error skipKubeConfig string expectedErr bool manifestShouldChange bool }{ { description: "error-free case should succeed", waitErrsToReturn: map[string]error{ waitForHashes: nil, waitForHashChange: nil, waitForPodsWithLabel: nil, }, moveFileFunc: os.Rename, expectedErr: false, manifestShouldChange: true, }, { description: "any wait error should result in a rollback and an abort 1", waitErrsToReturn: map[string]error{ waitForHashes: errors.New("boo! failed"), waitForHashChange: nil, waitForPodsWithLabel: nil, }, moveFileFunc: os.Rename, expectedErr: true, manifestShouldChange: false, }, { description: "any wait error should result in a rollback and an abort 2", waitErrsToReturn: map[string]error{ waitForHashes: nil, waitForHashChange: errors.New("boo! failed"), waitForPodsWithLabel: nil, }, moveFileFunc: os.Rename, expectedErr: true, manifestShouldChange: false, }, { description: "any wait error should result in a rollback and an abort 3", waitErrsToReturn: map[string]error{ waitForHashes: nil, waitForHashChange: nil, waitForPodsWithLabel: errors.New("boo! failed"), }, moveFileFunc: os.Rename, expectedErr: true, manifestShouldChange: false, }, { description: "any path-moving error should result in a rollback and an abort 1", waitErrsToReturn: map[string]error{ waitForHashes: nil, waitForHashChange: nil, waitForPodsWithLabel: nil, }, moveFileFunc: func(oldPath, newPath string) error { // fail for kube-apiserver move if strings.Contains(newPath, "kube-apiserver") { return errors.New("moving the kube-apiserver file failed") } return os.Rename(oldPath, newPath) }, expectedErr: true, manifestShouldChange: false, }, { description: "any path-moving error should result in a rollback and an abort 2", waitErrsToReturn: map[string]error{ waitForHashes: nil, waitForHashChange: nil, waitForPodsWithLabel: nil, }, moveFileFunc: func(oldPath, newPath string) error { // fail for kube-controller-manager move if strings.Contains(newPath, "kube-controller-manager") { return errors.New("moving the kube-apiserver file failed") } return os.Rename(oldPath, newPath) }, expectedErr: true, manifestShouldChange: false, }, { description: "any path-moving error should result in a rollback and an abort; even though this is the last component (kube-apiserver and kube-controller-manager healthy)", waitErrsToReturn: map[string]error{ waitForHashes: nil, waitForHashChange: nil, waitForPodsWithLabel: nil, }, moveFileFunc: func(oldPath, newPath string) error { // fail for kube-scheduler move if strings.Contains(newPath, "kube-scheduler") { return errors.New("moving the kube-apiserver file failed") } return os.Rename(oldPath, newPath) }, expectedErr: true, manifestShouldChange: false, }, { description: "any cert renew error should result in a rollback and an abort; even though this is the last component (kube-apiserver and kube-controller-manager healthy)", waitErrsToReturn: map[string]error{ waitForHashes: nil, waitForHashChange: nil, waitForPodsWithLabel: nil, }, moveFileFunc: os.Rename, skipKubeConfig: constants.SchedulerKubeConfigFileName, expectedErr: true, manifestShouldChange: false, }, { description: "any cert renew error should result in a rollback and an abort; even though this is admin.conf (kube-apiserver and kube-controller-manager and kube-scheduler healthy)", waitErrsToReturn: map[string]error{ waitForHashes: nil, waitForHashChange: nil, waitForPodsWithLabel: nil, }, moveFileFunc: os.Rename, skipKubeConfig: constants.AdminKubeConfigFileName, expectedErr: true, manifestShouldChange: false, }, { description: "super-admin.conf is renewed if it exists", waitErrsToReturn: map[string]error{ waitForHashes: nil, waitForHashChange: nil, waitForPodsWithLabel: nil, }, moveFileFunc: os.Rename, expectedErr: false, manifestShouldChange: true, }, { description: "no error is thrown if super-admin.conf does not exist", waitErrsToReturn: map[string]error{ waitForHashes: nil, waitForHashChange: nil, waitForPodsWithLabel: nil, }, moveFileFunc: os.Rename, skipKubeConfig: constants.SuperAdminKubeConfigFileName, expectedErr: false, manifestShouldChange: true, }, } for i := range tests { rt := tests[i] t.Run(rt.description, func(t *testing.T) { pkiutiltesting.Reset() waiter := NewFakeStaticPodWaiter(rt.waitErrsToReturn) pathMgr, err := NewFakeStaticPodPathManager(rt.moveFileFunc) if err != nil { t.Fatalf("couldn't run NewFakeStaticPodPathManager: %v", err) } defer os.RemoveAll(pathMgr.(*fakeStaticPodPathManager).KubernetesDir()) tmpKubernetesDir := pathMgr.(*fakeStaticPodPathManager).KubernetesDir() tempCertsDir, err := os.MkdirTemp("", "kubeadm-certs") if err != nil { t.Fatalf("couldn't create temporary certificates directory: %v", err) } defer os.RemoveAll(tempCertsDir) tmpEtcdDataDir, err := os.MkdirTemp("", "kubeadm-etcd-data") if err != nil { t.Fatalf("couldn't create temporary etcd data directory: %v", err) } defer os.RemoveAll(tmpEtcdDataDir) oldcfg, err := getConfig("v1.3.0", tempCertsDir, tmpEtcdDataDir) if err != nil { t.Fatalf("couldn't create config: %v", err) } tree, err := certsphase.GetCertsWithoutEtcd().AsMap().CertTree() if err != nil { t.Fatalf("couldn't get cert tree: %v", err) } if err := tree.CreateTree(oldcfg); err != nil { t.Fatalf("couldn't get create cert tree: %v", err) } for _, kubeConfig := range []string{ constants.AdminKubeConfigFileName, constants.SuperAdminKubeConfigFileName, constants.SchedulerKubeConfigFileName, constants.ControllerManagerKubeConfigFileName, } { if rt.skipKubeConfig == kubeConfig { continue } if err := kubeconfigphase.CreateKubeConfigFile(kubeConfig, tmpKubernetesDir, oldcfg); err != nil { t.Fatalf("couldn't create kubeconfig %q: %v", kubeConfig, err) } } // Initialize the directory with v1.7 manifests; should then be upgraded to v1.8 using the method err = controlplanephase.CreateInitStaticPodManifestFiles(pathMgr.RealManifestDir(), pathMgr.PatchesDir(), oldcfg, false /* isDryRun */) if err != nil { t.Fatalf("couldn't run CreateInitStaticPodManifestFiles: %v", err) } err = etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.RealManifestDir(), pathMgr.PatchesDir(), oldcfg.NodeRegistration.Name, &oldcfg.ClusterConfiguration, &oldcfg.LocalAPIEndpoint, false /* isDryRun */) if err != nil { t.Fatalf("couldn't run CreateLocalEtcdStaticPodManifestFile: %v", err) } // Get a hash of the v1.7 API server manifest to compare later (was the file re-written) oldHash, err := getAPIServerHash(pathMgr.RealManifestDir()) if err != nil { t.Fatalf("couldn't read temp file: %v", err) } newcfg, err := getConfig(constants.CurrentKubernetesVersion.String(), tempCertsDir, tmpEtcdDataDir) if err != nil { t.Fatalf("couldn't create config: %v", err) } // create the kubeadm etcd certs caCert, caKey, err := certsphase.KubeadmCertEtcdCA().CreateAsCA(newcfg) if err != nil { t.Fatalf("couldn't create new CA certificate: %v", err) } for _, cert := range []*certsphase.KubeadmCert{ certsphase.KubeadmCertEtcdServer(), certsphase.KubeadmCertEtcdPeer(), certsphase.KubeadmCertEtcdHealthcheck(), certsphase.KubeadmCertEtcdAPIClient(), } { if err := cert.CreateFromCA(newcfg, caCert, caKey); err != nil { t.Fatalf("couldn't create certificate %s: %v", cert.Name, err) } } actualErr := StaticPodControlPlane( nil, waiter, pathMgr, newcfg, true, true, fakeTLSEtcdClient{ TLS: false, }, fakePodManifestEtcdClient{ ManifestDir: pathMgr.RealManifestDir(), CertificatesDir: newcfg.CertificatesDir, }, ) if (actualErr != nil) != rt.expectedErr { t.Errorf( "failed UpgradeStaticPodControlPlane\n%s\n\texpected error: %t\n\tgot: %t\n\tactual error: %v", rt.description, rt.expectedErr, (actualErr != nil), actualErr, ) } newHash, err := getAPIServerHash(pathMgr.RealManifestDir()) if err != nil { t.Fatalf("couldn't read temp file: %v", err) } if (oldHash != newHash) != rt.manifestShouldChange { t.Errorf( "failed StaticPodControlPlane\n%s\n\texpected manifest change: %t\n\tgot: %t\n\tnewHash: %v", rt.description, rt.manifestShouldChange, (oldHash != newHash), newHash, ) } }) } } func getAPIServerHash(dir string) (string, error) { manifestPath := constants.GetStaticPodFilepath(constants.KubeAPIServer, dir) fileBytes, err := os.ReadFile(manifestPath) if err != nil { return "", err } return fmt.Sprintf("%x", sha256.Sum256(fileBytes)), nil } func getConfig(version, certsDir, etcdDataDir string) (*kubeadmapi.InitConfiguration, error) { configBytes := []byte(fmt.Sprintf(testConfiguration, certsDir, etcdDataDir, version)) // Unmarshal the config return configutil.BytesToInitConfiguration(configBytes, true /* skipCRIDetect */) } func getTempDir(t *testing.T, name string) (string, func()) { dir, err := os.MkdirTemp(os.TempDir(), name) if err != nil { t.Fatalf("couldn't make temporary directory: %v", err) } return dir, func() { os.RemoveAll(dir) } } func TestCleanupDirs(t *testing.T) { tests := []struct { name string keepManifest, keepEtcd bool }{ { name: "save manifest backup", keepManifest: true, }, { name: "save both etcd and manifest", keepManifest: true, keepEtcd: true, }, { name: "save nothing", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { realKubernetesDir, cleanup := getTempDir(t, "realKubernetesDir") defer cleanup() tempManifestDir, cleanup := getTempDir(t, "tempManifestDir") defer cleanup() backupManifestDir, cleanup := getTempDir(t, "backupManifestDir") defer cleanup() backupEtcdDir, cleanup := getTempDir(t, "backupEtcdDir") defer cleanup() mgr := NewKubeStaticPodPathManager(realKubernetesDir, "", tempManifestDir, backupManifestDir, backupEtcdDir, test.keepManifest, test.keepEtcd) err := mgr.CleanupDirs() if err != nil { t.Errorf("unexpected error cleaning up: %v", err) } if _, err := os.Stat(tempManifestDir); !os.IsNotExist(err) { t.Errorf("%q should not have existed", tempManifestDir) } _, err = os.Stat(backupManifestDir) if test.keepManifest { if err != nil { t.Errorf("unexpected error getting backup manifest dir") } } else { if !os.IsNotExist(err) { t.Error("expected backup manifest to not exist") } } _, err = os.Stat(backupEtcdDir) if test.keepEtcd { if err != nil { t.Errorf("unexpected error getting backup etcd dir") } } else { if !os.IsNotExist(err) { t.Error("expected backup etcd dir to not exist") } } }) } } func TestRenewCertsByComponent(t *testing.T) { caCert, caKey := certstestutil.SetupCertificateAuthority(t) tests := []struct { name string component string externalCA bool externalFrontProxyCA bool skipCreateEtcdCA bool shouldErrorOnRenew bool certsShouldExist []*certsphase.KubeadmCert certsShouldBeRenewed []*certsphase.KubeadmCert // NB. If empty, it will assume certsShouldBeRenewed == certsShouldExist kubeConfigShouldExist []string }{ { name: "all CA exist, all certs should be rotated for etcd", component: constants.Etcd, certsShouldExist: []*certsphase.KubeadmCert{ certsphase.KubeadmCertEtcdServer(), certsphase.KubeadmCertEtcdPeer(), certsphase.KubeadmCertEtcdHealthcheck(), }, }, { name: "all CA exist, all certs should be rotated for apiserver", component: constants.KubeAPIServer, certsShouldExist: []*certsphase.KubeadmCert{ certsphase.KubeadmCertEtcdAPIClient(), certsphase.KubeadmCertAPIServer(), certsphase.KubeadmCertKubeletClient(), certsphase.KubeadmCertFrontProxyClient(), }, }, { name: "external CA, renew only certificates not signed by CA for apiserver", component: constants.KubeAPIServer, certsShouldExist: []*certsphase.KubeadmCert{ certsphase.KubeadmCertEtcdAPIClient(), certsphase.KubeadmCertFrontProxyClient(), certsphase.KubeadmCertAPIServer(), certsphase.KubeadmCertKubeletClient(), }, certsShouldBeRenewed: []*certsphase.KubeadmCert{ certsphase.KubeadmCertEtcdAPIClient(), certsphase.KubeadmCertFrontProxyClient(), }, externalCA: true, }, { name: "external front-proxy-CA, renew only certificates not signed by front-proxy-CA for apiserver", component: constants.KubeAPIServer, certsShouldExist: []*certsphase.KubeadmCert{ certsphase.KubeadmCertEtcdAPIClient(), certsphase.KubeadmCertFrontProxyClient(), certsphase.KubeadmCertAPIServer(), certsphase.KubeadmCertKubeletClient(), }, certsShouldBeRenewed: []*certsphase.KubeadmCert{ certsphase.KubeadmCertEtcdAPIClient(), certsphase.KubeadmCertAPIServer(), certsphase.KubeadmCertKubeletClient(), }, externalFrontProxyCA: true, }, { name: "all CA exist, should be rotated for scheduler", component: constants.KubeScheduler, kubeConfigShouldExist: []string{ constants.SchedulerKubeConfigFileName, }, }, { name: "all CA exist, should be rotated for controller manager", component: constants.KubeControllerManager, kubeConfigShouldExist: []string{ constants.ControllerManagerKubeConfigFileName, }, }, { name: "missing a cert to renew", component: constants.Etcd, shouldErrorOnRenew: true, certsShouldExist: []*certsphase.KubeadmCert{ certsphase.KubeadmCertEtcdServer(), certsphase.KubeadmCertEtcdPeer(), }, }, { name: "no CA, cannot continue", component: constants.Etcd, skipCreateEtcdCA: true, shouldErrorOnRenew: true, }, } for i := range tests { test := tests[i] t.Run(test.name, func(t *testing.T) { pkiutiltesting.Reset() // Setup up basic requisites tmpDir := t.TempDir() cfg := testutil.GetDefaultInternalConfig(t) cfg.CertificatesDir = tmpDir if err := pkiutil.WriteCertAndKey(tmpDir, constants.CACertAndKeyBaseName, caCert, caKey); err != nil { t.Fatalf("couldn't write out CA: %v", err) } if test.externalCA { os.Remove(filepath.Join(tmpDir, constants.CAKeyName)) } if err := pkiutil.WriteCertAndKey(tmpDir, constants.FrontProxyCACertAndKeyBaseName, caCert, caKey); err != nil { t.Fatalf("couldn't write out front-proxy-CA: %v", err) } if test.externalFrontProxyCA { os.Remove(filepath.Join(tmpDir, constants.FrontProxyCAKeyName)) } if !test.skipCreateEtcdCA { if err := pkiutil.WriteCertAndKey(tmpDir, constants.EtcdCACertAndKeyBaseName, caCert, caKey); err != nil { t.Fatalf("couldn't write out etcd-CA: %v", err) } } certMaps := make(map[string]big.Int) // Create expected certs and load to recorde the serial numbers for _, kubeCert := range test.certsShouldExist { if err := kubeCert.CreateFromCA(cfg, caCert, caKey); err != nil { t.Fatalf("couldn't create certificate %q: %v", kubeCert.Name, err) } cert, err := pkiutil.TryLoadCertFromDisk(tmpDir, kubeCert.BaseName) if err != nil { t.Fatalf("couldn't load certificate %q: %v", kubeCert.Name, err) } certMaps[kubeCert.Name] = *cert.SerialNumber } // Create expected kubeconfigs for _, kubeConfig := range test.kubeConfigShouldExist { if err := kubeconfigphase.CreateKubeConfigFile(kubeConfig, tmpDir, cfg); err != nil { t.Fatalf("couldn't create kubeconfig %q: %v", kubeConfig, err) } newCerts, err := getEmbeddedCerts(tmpDir, kubeConfig) if err != nil { t.Fatalf("error reading embedded certs from %s: %v", kubeConfig, err) } certMaps[kubeConfig] = *newCerts[0].SerialNumber } // Renew everything rm, err := renewal.NewManager(&cfg.ClusterConfiguration, tmpDir) if err != nil { t.Fatalf("Failed to create the certificate renewal manager: %v", err) } err = renewCertsByComponent(cfg, test.component, rm) if test.shouldErrorOnRenew { if err == nil { t.Fatal("expected renewal error, got nothing") } // expected error, got error return } if err != nil { t.Fatalf("couldn't renew certificates: %v", err) } // See if the certificate serial numbers change for _, kubeCert := range test.certsShouldExist { newCert, err := pkiutil.TryLoadCertFromDisk(tmpDir, kubeCert.BaseName) if err != nil { t.Errorf("couldn't load new certificate %q: %v", kubeCert.Name, err) continue } oldSerial := certMaps[kubeCert.Name] shouldBeRenewed := true if test.certsShouldBeRenewed != nil { shouldBeRenewed = false for _, x := range test.certsShouldBeRenewed { if x.Name == kubeCert.Name { shouldBeRenewed = true } } } if shouldBeRenewed && oldSerial.Cmp(newCert.SerialNumber) == 0 { t.Errorf("certifitate %v was not reissued when expected", kubeCert.Name) } if !shouldBeRenewed && oldSerial.Cmp(newCert.SerialNumber) != 0 { t.Errorf("certifitate %v was reissued when not expected", kubeCert.Name) } } // See if the embedded certificate serial numbers change for _, kubeConfig := range test.kubeConfigShouldExist { newCerts, err := getEmbeddedCerts(tmpDir, kubeConfig) if err != nil { t.Fatalf("error reading embedded certs from %s: %v", kubeConfig, err) } oldSerial := certMaps[kubeConfig] if oldSerial.Cmp(newCerts[0].SerialNumber) == 0 { t.Errorf("certifitate %v was not reissued", kubeConfig) } } }) } } func getEmbeddedCerts(tmpDir, kubeConfig string) ([]*x509.Certificate, error) { kubeconfigPath := filepath.Join(tmpDir, kubeConfig) newConfig, err := clientcmd.LoadFromFile(kubeconfigPath) if err != nil { return nil, errors.Wrapf(err, "failed to load kubeconfig file %s", kubeconfigPath) } authInfoName := newConfig.Contexts[newConfig.CurrentContext].AuthInfo authInfo := newConfig.AuthInfos[authInfoName] return certutil.ParseCertsPEM(authInfo.ClientCertificateData) } func TestGetPathManagerForUpgrade(t *testing.T) { externalEtcd := &kubeadmapi.InitConfiguration{ ClusterConfiguration: kubeadmapi.ClusterConfiguration{ Etcd: kubeadmapi.Etcd{ External: &kubeadmapi.ExternalEtcd{ Endpoints: []string{"10.100.0.1:2379", "10.100.0.2:2379", "10.100.0.3:2379"}, }, }, }, } stackedEtcd := &kubeadmapi.InitConfiguration{} tests := []struct { name string cfg *kubeadmapi.InitConfiguration etcdUpgrade bool shouldDeleteEtcd bool }{ { name: "external etcd but no etcd upgrade", cfg: externalEtcd, etcdUpgrade: false, shouldDeleteEtcd: true, }, { name: "external etcd with etcd upgrade", cfg: externalEtcd, etcdUpgrade: true, shouldDeleteEtcd: true, }, { name: "stacked etcd but no etcd upgrade", cfg: stackedEtcd, etcdUpgrade: false, shouldDeleteEtcd: true, }, { name: "stacked etcd with etcd upgrade", cfg: stackedEtcd, etcdUpgrade: true, shouldDeleteEtcd: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { // Use a temporary directory tmpdir, err := os.MkdirTemp("", "TestGetPathManagerForUpgrade") if err != nil { t.Fatalf("unexpected error making temporary directory: %v", err) } defer func() { os.RemoveAll(tmpdir) }() pathmgr, err := GetPathManagerForUpgrade(tmpdir, "", test.cfg, test.etcdUpgrade) if err != nil { t.Fatalf("unexpected error creating path manager: %v", err) } if _, err := os.Stat(pathmgr.BackupManifestDir()); os.IsNotExist(err) { t.Errorf("expected manifest dir %s to exist, but it did not (%v)", pathmgr.BackupManifestDir(), err) } if _, err := os.Stat(pathmgr.BackupEtcdDir()); os.IsNotExist(err) { t.Errorf("expected etcd dir %s to exist, but it did not (%v)", pathmgr.BackupEtcdDir(), err) } if err := pathmgr.CleanupDirs(); err != nil { t.Fatalf("unexpected error cleaning up directories: %v", err) } if _, err := os.Stat(pathmgr.BackupManifestDir()); os.IsNotExist(err) { t.Errorf("expected manifest dir %s to exist, but it did not (%v)", pathmgr.BackupManifestDir(), err) } if test.shouldDeleteEtcd { if _, err := os.Stat(pathmgr.BackupEtcdDir()); !os.IsNotExist(err) { t.Errorf("expected etcd dir %s not to exist, but it did (%v)", pathmgr.BackupEtcdDir(), err) } } else { if _, err := os.Stat(pathmgr.BackupEtcdDir()); os.IsNotExist(err) { t.Errorf("expected etcd dir %s to exist, but it did not", pathmgr.BackupEtcdDir()) } } }) } } func TestGetEtcdImageTagFromStaticPod(t *testing.T) { const expectedEtcdVersion = "3.1.12" const etcdStaticPod = `apiVersion: v1 kind: Pod metadata: labels: component: etcd tier: control-plane name: etcd namespace: kube-system spec: containers: - name: etcd image: registry.k8s.io/etcd:` + expectedEtcdVersion manifestsDir, err := os.MkdirTemp("", "GetEtcdImageTagFromStaticPod-test-manifests") if err != nil { t.Fatalf("Unable to create temporary directory: %v", err) } defer os.RemoveAll(manifestsDir) if err = os.WriteFile(constants.GetStaticPodFilepath(constants.Etcd, manifestsDir), []byte(etcdStaticPod), 0644); err != nil { t.Fatalf("Unable to create test static pod manifest: %v", err) } got, err := GetEtcdImageTagFromStaticPod(manifestsDir) if err != nil { t.Errorf("unexpected error: %v", err) } else if got != expectedEtcdVersion { t.Errorf("unexpected result:\n\tgot: %q\n\texpected: %q", got, expectedEtcdVersion) } }
go
github
https://github.com/kubernetes/kubernetes
cmd/kubeadm/app/phases/upgrade/staticpods_test.go
# Added Fortran compiler support to config. Currently useful only for # try_compile call. try_run works but is untested for most of Fortran # compilers (they must define linker_exe first). # Pearu Peterson import os import signal import subprocess import sys import textwrap import warnings from distutils.command.config import config as old_config from distutils.command.config import LANG_EXT from distutils import log from distutils.file_util import copy_file from distutils.ccompiler import CompileError, LinkError import distutils from numpy.distutils.exec_command import filepath_from_subprocess_output from numpy.distutils.mingw32ccompiler import generate_manifest from numpy.distutils.command.autodist import (check_gcc_function_attribute, check_gcc_function_attribute_with_intrinsics, check_gcc_variable_attribute, check_inline, check_restrict, check_compiler_gcc4) LANG_EXT['f77'] = '.f' LANG_EXT['f90'] = '.f90' class config(old_config): old_config.user_options += [ ('fcompiler=', None, "specify the Fortran compiler type"), ] def initialize_options(self): self.fcompiler = None old_config.initialize_options(self) def _check_compiler (self): old_config._check_compiler(self) from numpy.distutils.fcompiler import FCompiler, new_fcompiler if sys.platform == 'win32' and (self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw')): # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: # initialize call query_vcvarsall, which throws an IOError, and # causes an error along the way without much information. We try to # catch it here, hoping it is early enough, and print an helpful # message instead of Error: None. if not self.compiler.initialized: try: self.compiler.initialize() except IOError as e: msg = textwrap.dedent("""\ Could not initialize compiler instance: do you have Visual Studio installed? If you are trying to build with MinGW, please use "python setup.py build -c mingw32" instead. If you have Visual Studio installed, check it is correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2, VS 2010 for >= 3.3). Original exception was: %s, and the Compiler class was %s ============================================================================""") \ % (e, self.compiler.__class__.__name__) print(textwrap.dedent("""\ ============================================================================""")) raise distutils.errors.DistutilsPlatformError(msg) # After MSVC is initialized, add an explicit /MANIFEST to linker # flags. See issues gh-4245 and gh-4101 for details. Also # relevant are issues 4431 and 16296 on the Python bug tracker. from distutils import msvc9compiler if msvc9compiler.get_build_version() >= 10: for ldflags in [self.compiler.ldflags_shared, self.compiler.ldflags_shared_debug]: if '/MANIFEST' not in ldflags: ldflags.append('/MANIFEST') if not isinstance(self.fcompiler, FCompiler): self.fcompiler = new_fcompiler(compiler=self.fcompiler, dry_run=self.dry_run, force=1, c_compiler=self.compiler) if self.fcompiler is not None: self.fcompiler.customize(self.distribution) if self.fcompiler.get_version(): self.fcompiler.customize_cmd(self) self.fcompiler.show_customization() def _wrap_method(self, mth, lang, args): from distutils.ccompiler import CompileError from distutils.errors import DistutilsExecError save_compiler = self.compiler if lang in ['f77', 'f90']: self.compiler = self.fcompiler try: ret = mth(*((self,)+args)) except (DistutilsExecError, CompileError) as e: str(e) self.compiler = save_compiler raise CompileError self.compiler = save_compiler return ret def _compile (self, body, headers, include_dirs, lang): src, obj = self._wrap_method(old_config._compile, lang, (body, headers, include_dirs, lang)) # _compile in unixcompiler.py sometimes creates .d dependency files. # Clean them up. self.temp_files.append(obj + '.d') return src, obj def _link (self, body, headers, include_dirs, libraries, library_dirs, lang): if self.compiler.compiler_type=='msvc': libraries = (libraries or [])[:] library_dirs = (library_dirs or [])[:] if lang in ['f77', 'f90']: lang = 'c' # always use system linker when using MSVC compiler if self.fcompiler: for d in self.fcompiler.library_dirs or []: # correct path when compiling in Cygwin but with # normal Win Python if d.startswith('/usr/lib'): try: d = subprocess.check_output(['cygpath', '-w', d]) except (OSError, subprocess.CalledProcessError): pass else: d = filepath_from_subprocess_output(d) library_dirs.append(d) for libname in self.fcompiler.libraries or []: if libname not in libraries: libraries.append(libname) for libname in libraries: if libname.startswith('msvc'): continue fileexists = False for libdir in library_dirs or []: libfile = os.path.join(libdir, '%s.lib' % (libname)) if os.path.isfile(libfile): fileexists = True break if fileexists: continue # make g77-compiled static libs available to MSVC fileexists = False for libdir in library_dirs: libfile = os.path.join(libdir, 'lib%s.a' % (libname)) if os.path.isfile(libfile): # copy libname.a file to name.lib so that MSVC linker # can find it libfile2 = os.path.join(libdir, '%s.lib' % (libname)) copy_file(libfile, libfile2) self.temp_files.append(libfile2) fileexists = True break if fileexists: continue log.warn('could not find library %r in directories %s' \ % (libname, library_dirs)) elif self.compiler.compiler_type == 'mingw32': generate_manifest(self) return self._wrap_method(old_config._link, lang, (body, headers, include_dirs, libraries, library_dirs, lang)) def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): self._check_compiler() return self.try_compile( "/* we need a dummy line to make distutils happy */", [header], include_dirs) def check_decl(self, symbol, headers=None, include_dirs=None): self._check_compiler() body = textwrap.dedent(""" int main(void) { #ifndef %s (void) %s; #endif ; return 0; }""") % (symbol, symbol) return self.try_compile(body, headers, include_dirs) def check_macro_true(self, symbol, headers=None, include_dirs=None): self._check_compiler() body = textwrap.dedent(""" int main(void) { #if %s #else #error false or undefined macro #endif ; return 0; }""") % (symbol,) return self.try_compile(body, headers, include_dirs) def check_type(self, type_name, headers=None, include_dirs=None, library_dirs=None): """Check type availability. Return True if the type can be compiled, False otherwise""" self._check_compiler() # First check the type can be compiled body = textwrap.dedent(r""" int main(void) { if ((%(name)s *) 0) return 0; if (sizeof (%(name)s)) return 0; } """) % {'name': type_name} st = False try: try: self._compile(body % {'type': type_name}, headers, include_dirs, 'c') st = True except distutils.errors.CompileError: st = False finally: self._clean() return st def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): """Check size of a given type.""" self._check_compiler() # First check the type can be compiled body = textwrap.dedent(r""" typedef %(type)s npy_check_sizeof_type; int main (void) { static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; test_array [0] = 0 ; return 0; } """) self._compile(body % {'type': type_name}, headers, include_dirs, 'c') self._clean() if expected: body = textwrap.dedent(r""" typedef %(type)s npy_check_sizeof_type; int main (void) { static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; test_array [0] = 0 ; return 0; } """) for size in expected: try: self._compile(body % {'type': type_name, 'size': size}, headers, include_dirs, 'c') self._clean() return size except CompileError: pass # this fails to *compile* if size > sizeof(type) body = textwrap.dedent(r""" typedef %(type)s npy_check_sizeof_type; int main (void) { static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; test_array [0] = 0 ; return 0; } """) # The principle is simple: we first find low and high bounds of size # for the type, where low/high are looked up on a log scale. Then, we # do a binary search to find the exact size between low and high low = 0 mid = 0 while True: try: self._compile(body % {'type': type_name, 'size': mid}, headers, include_dirs, 'c') self._clean() break except CompileError: #log.info("failure to test for bound %d" % mid) low = mid + 1 mid = 2 * mid + 1 high = mid # Binary search: while low != high: mid = (high - low) // 2 + low try: self._compile(body % {'type': type_name, 'size': mid}, headers, include_dirs, 'c') self._clean() high = mid except CompileError: low = mid + 1 return low def check_func(self, func, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=False, call=False, call_args=None): # clean up distutils's config a bit: add void to main(), and # return a value. self._check_compiler() body = [] if decl: if type(decl) == str: body.append(decl) else: body.append("int %s (void);" % func) # Handle MSVC intrinsics: force MS compiler to make a function call. # Useful to test for some functions when built with optimization on, to # avoid build error because the intrinsic and our 'fake' test # declaration do not match. body.append("#ifdef _MSC_VER") body.append("#pragma function(%s)" % func) body.append("#endif") body.append("int main (void) {") if call: if call_args is None: call_args = '' body.append(" %s(%s);" % (func, call_args)) else: body.append(" %s;" % func) body.append(" return 0;") body.append("}") body = '\n'.join(body) + "\n" return self.try_link(body, headers, include_dirs, libraries, library_dirs) def check_funcs_once(self, funcs, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=False, call=False, call_args=None): """Check a list of functions at once. This is useful to speed up things, since all the functions in the funcs list will be put in one compilation unit. Arguments --------- funcs : seq list of functions to test include_dirs : seq list of header paths libraries : seq list of libraries to link the code snippet to library_dirs : seq list of library paths decl : dict for every (key, value), the declaration in the value will be used for function in key. If a function is not in the dictionary, no declaration will be used. call : dict for every item (f, value), if the value is True, a call will be done to the function f. """ self._check_compiler() body = [] if decl: for f, v in decl.items(): if v: body.append("int %s (void);" % f) # Handle MS intrinsics. See check_func for more info. body.append("#ifdef _MSC_VER") for func in funcs: body.append("#pragma function(%s)" % func) body.append("#endif") body.append("int main (void) {") if call: for f in funcs: if f in call and call[f]: if not (call_args and f in call_args and call_args[f]): args = '' else: args = call_args[f] body.append(" %s(%s);" % (f, args)) else: body.append(" %s;" % f) else: for f in funcs: body.append(" %s;" % f) body.append(" return 0;") body.append("}") body = '\n'.join(body) + "\n" return self.try_link(body, headers, include_dirs, libraries, library_dirs) def check_inline(self): """Return the inline keyword recognized by the compiler, empty string otherwise.""" return check_inline(self) def check_restrict(self): """Return the restrict keyword recognized by the compiler, empty string otherwise.""" return check_restrict(self) def check_compiler_gcc4(self): """Return True if the C compiler is gcc >= 4.""" return check_compiler_gcc4(self) def check_gcc_function_attribute(self, attribute, name): return check_gcc_function_attribute(self, attribute, name) def check_gcc_function_attribute_with_intrinsics(self, attribute, name, code, include): return check_gcc_function_attribute_with_intrinsics(self, attribute, name, code, include) def check_gcc_variable_attribute(self, attribute): return check_gcc_variable_attribute(self, attribute) def get_output(self, body, headers=None, include_dirs=None, libraries=None, library_dirs=None, lang="c", use_tee=None): """Try to compile, link to an executable, and run a program built from 'body' and 'headers'. Returns the exit status code of the program and its output. """ # 2008-11-16, RemoveMe warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" "Usage of get_output is deprecated: please do not \n" "use it anymore, and avoid configuration checks \n" "involving running executable on the target machine.\n" "+++++++++++++++++++++++++++++++++++++++++++++++++\n", DeprecationWarning, stacklevel=2) self._check_compiler() exitcode, output = 255, '' try: grabber = GrabStdout() try: src, obj, exe = self._link(body, headers, include_dirs, libraries, library_dirs, lang) grabber.restore() except Exception: output = grabber.data grabber.restore() raise exe = os.path.join('.', exe) try: # specify cwd arg for consistency with # historic usage pattern of exec_command() # also, note that exe appears to be a string, # which exec_command() handled, but we now # use a list for check_output() -- this assumes # that exe is always a single command output = subprocess.check_output([exe], cwd='.') except subprocess.CalledProcessError as exc: exitstatus = exc.returncode output = '' except OSError: # preserve the EnvironmentError exit status # used historically in exec_command() exitstatus = 127 output = '' else: output = filepath_from_subprocess_output(output) if hasattr(os, 'WEXITSTATUS'): exitcode = os.WEXITSTATUS(exitstatus) if os.WIFSIGNALED(exitstatus): sig = os.WTERMSIG(exitstatus) log.error('subprocess exited with signal %d' % (sig,)) if sig == signal.SIGINT: # control-C raise KeyboardInterrupt else: exitcode = exitstatus log.info("success!") except (CompileError, LinkError): log.info("failure.") self._clean() return exitcode, output class GrabStdout: def __init__(self): self.sys_stdout = sys.stdout self.data = '' sys.stdout = self def write (self, data): self.sys_stdout.write(data) self.data += data def flush (self): self.sys_stdout.flush() def restore(self): sys.stdout = self.sys_stdout
unknown
codeparrot/codeparrot-clean
// run // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This test makes sure that ambiguously live arguments work correctly. package main import ( "runtime" ) type HeapObj [8]int64 type StkObj struct { h *HeapObj } var n int var c int = -1 func gc() { // encourage heap object to be collected, and have its finalizer run. runtime.GC() runtime.GC() runtime.GC() n++ } var null StkObj var sink *HeapObj //go:noinline func use(p *StkObj) { } //go:noinline func f(s StkObj, b bool) { var p *StkObj if b { p = &s } else { p = &null } // use is required here to prevent the conditional // code above from being executed after the first gc() call. use(p) // If b==false, h should be collected here. gc() // 0 sink = p.h gc() // 1 sink = nil // If b==true, h should be collected here. gc() // 2 } func fTrue() { var s StkObj s.h = new(HeapObj) c = -1 n = 0 runtime.SetFinalizer(s.h, func(h *HeapObj) { // Remember at what phase the heap object was collected. c = n }) f(s, true) if c != 2 { panic("bad liveness") } } func fFalse() { var s StkObj s.h = new(HeapObj) c = -1 n = 0 runtime.SetFinalizer(s.h, func(h *HeapObj) { // Remember at what phase the heap object was collected. c = n }) f(s, false) if c != 0 { panic("bad liveness") } } func main() { fTrue() fFalse() }
go
github
https://github.com/golang/go
test/stackobj3.go
// ignore-tidy-filelength use std::iter; use std::ops::ControlFlow; use either::Either; use hir::{ClosureKind, Path}; use rustc_data_structures::fx::FxIndexSet; use rustc_errors::codes::*; use rustc_errors::{Applicability, Diag, MultiSpan, struct_span_code_err}; use rustc_hir as hir; use rustc_hir::def::{DefKind, Res}; use rustc_hir::intravisit::{Visitor, walk_block, walk_expr}; use rustc_hir::{CoroutineDesugaring, CoroutineKind, CoroutineSource, LangItem, PatField}; use rustc_middle::bug; use rustc_middle::hir::nested_filter::OnlyBodies; use rustc_middle::mir::{ self, AggregateKind, BindingForm, BorrowKind, ClearCrossCrate, ConstraintCategory, FakeBorrowKind, FakeReadCause, LocalDecl, LocalInfo, LocalKind, Location, MutBorrowKind, Operand, Place, PlaceRef, PlaceTy, ProjectionElem, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, VarBindingForm, VarDebugInfoContents, }; use rustc_middle::ty::print::PrintTraitRefExt as _; use rustc_middle::ty::{ self, PredicateKind, Ty, TyCtxt, TypeSuperVisitable, TypeVisitor, Upcast, suggest_constraining_type_params, }; use rustc_mir_dataflow::move_paths::{InitKind, MoveOutIndex, MovePathIndex}; use rustc_span::def_id::{DefId, LocalDefId}; use rustc_span::hygiene::DesugaringKind; use rustc_span::{BytePos, Ident, Span, Symbol, kw, sym}; use rustc_trait_selection::error_reporting::InferCtxtErrorExt; use rustc_trait_selection::error_reporting::traits::FindExprBySpan; use rustc_trait_selection::error_reporting::traits::call_kind::CallKind; use rustc_trait_selection::infer::InferCtxtExt; use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _; use rustc_trait_selection::traits::{ Obligation, ObligationCause, ObligationCtxt, supertrait_def_ids, }; use tracing::{debug, instrument}; use super::explain_borrow::{BorrowExplanation, LaterUseKind}; use super::{DescribePlaceOpt, RegionName, RegionNameSource, UseSpans}; use crate::borrow_set::{BorrowData, TwoPhaseActivation}; use crate::diagnostics::conflict_errors::StorageDeadOrDrop::LocalStorageDead; use crate::diagnostics::{CapturedMessageOpt, call_kind, find_all_local_uses}; use crate::prefixes::IsPrefixOf; use crate::{InitializationRequiringAction, MirBorrowckCtxt, WriteKind, borrowck_errors}; #[derive(Debug)] struct MoveSite { /// Index of the "move out" that we found. The `MoveData` can /// then tell us where the move occurred. moi: MoveOutIndex, /// `true` if we traversed a back edge while walking from the point /// of error to the move site. traversed_back_edge: bool, } /// Which case a StorageDeadOrDrop is for. #[derive(Copy, Clone, PartialEq, Eq, Debug)] enum StorageDeadOrDrop<'tcx> { LocalStorageDead, BoxedStorageDead, Destructor(Ty<'tcx>), } impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { pub(crate) fn report_use_of_moved_or_uninitialized( &mut self, location: Location, desired_action: InitializationRequiringAction, (moved_place, used_place, span): (PlaceRef<'tcx>, PlaceRef<'tcx>, Span), mpi: MovePathIndex, ) { debug!( "report_use_of_moved_or_uninitialized: location={:?} desired_action={:?} \ moved_place={:?} used_place={:?} span={:?} mpi={:?}", location, desired_action, moved_place, used_place, span, mpi ); let use_spans = self.move_spans(moved_place, location).or_else(|| self.borrow_spans(span, location)); let span = use_spans.args_or_use(); let (move_site_vec, maybe_reinitialized_locations) = self.get_moved_indexes(location, mpi); debug!( "report_use_of_moved_or_uninitialized: move_site_vec={:?} use_spans={:?}", move_site_vec, use_spans ); let move_out_indices: Vec<_> = move_site_vec.iter().map(|move_site| move_site.moi).collect(); if move_out_indices.is_empty() { let root_local = used_place.local; if !self.uninitialized_error_reported.insert(root_local) { debug!( "report_use_of_moved_or_uninitialized place: error about {:?} suppressed", root_local ); return; } let err = self.report_use_of_uninitialized( mpi, used_place, moved_place, desired_action, span, use_spans, ); self.buffer_error(err); } else { if let Some((reported_place, _)) = self.has_move_error(&move_out_indices) { if used_place.is_prefix_of(*reported_place) { debug!( "report_use_of_moved_or_uninitialized place: error suppressed mois={:?}", move_out_indices ); return; } } let is_partial_move = move_site_vec.iter().any(|move_site| { let move_out = self.move_data.moves[(*move_site).moi]; let moved_place = &self.move_data.move_paths[move_out.path].place; // `*(_1)` where `_1` is a `Box` is actually a move out. let is_box_move = moved_place.as_ref().projection == [ProjectionElem::Deref] && self.body.local_decls[moved_place.local].ty.is_box(); !is_box_move && used_place != moved_place.as_ref() && used_place.is_prefix_of(moved_place.as_ref()) }); let partial_str = if is_partial_move { "partial " } else { "" }; let partially_str = if is_partial_move { "partially " } else { "" }; let mut err = self.cannot_act_on_moved_value( span, desired_action.as_noun(), partially_str, self.describe_place_with_options( moved_place, DescribePlaceOpt { including_downcast: true, including_tuple_field: true }, ), ); let reinit_spans = maybe_reinitialized_locations .iter() .take(3) .map(|loc| { self.move_spans(self.move_data.move_paths[mpi].place.as_ref(), *loc) .args_or_use() }) .collect::<Vec<Span>>(); let reinits = maybe_reinitialized_locations.len(); if reinits == 1 { err.span_label(reinit_spans[0], "this reinitialization might get skipped"); } else if reinits > 1 { err.span_note( MultiSpan::from_spans(reinit_spans), if reinits <= 3 { format!("these {reinits} reinitializations might get skipped") } else { format!( "these 3 reinitializations and {} other{} might get skipped", reinits - 3, if reinits == 4 { "" } else { "s" } ) }, ); } let closure = self.add_moved_or_invoked_closure_note(location, used_place, &mut err); let mut is_loop_move = false; let mut seen_spans = FxIndexSet::default(); for move_site in &move_site_vec { let move_out = self.move_data.moves[(*move_site).moi]; let moved_place = &self.move_data.move_paths[move_out.path].place; let move_spans = self.move_spans(moved_place.as_ref(), move_out.source); let move_span = move_spans.args_or_use(); let is_move_msg = move_spans.for_closure(); let is_loop_message = location == move_out.source || move_site.traversed_back_edge; if location == move_out.source { is_loop_move = true; } let mut has_suggest_reborrow = false; if !seen_spans.contains(&move_span) { self.suggest_ref_or_clone( mpi, &mut err, move_spans, moved_place.as_ref(), &mut has_suggest_reborrow, closure, ); let msg_opt = CapturedMessageOpt { is_partial_move, is_loop_message, is_move_msg, is_loop_move, has_suggest_reborrow, maybe_reinitialized_locations_is_empty: maybe_reinitialized_locations .is_empty(), }; self.explain_captures( &mut err, span, move_span, move_spans, *moved_place, msg_opt, ); } seen_spans.insert(move_span); } use_spans.var_path_only_subdiag(&mut err, desired_action); if !is_loop_move { err.span_label( span, format!( "value {} here after {partial_str}move", desired_action.as_verb_in_past_tense(), ), ); } let ty = used_place.ty(self.body, self.infcx.tcx).ty; let needs_note = match ty.kind() { ty::Closure(id, _) => { self.infcx.tcx.closure_kind_origin(id.expect_local()).is_none() } _ => true, }; let mpi = self.move_data.moves[move_out_indices[0]].path; let place = &self.move_data.move_paths[mpi].place; let ty = place.ty(self.body, self.infcx.tcx).ty; if self.infcx.param_env.caller_bounds().iter().any(|c| { c.as_trait_clause().is_some_and(|pred| { pred.skip_binder().self_ty() == ty && self.infcx.tcx.is_fn_trait(pred.def_id()) }) }) { // Suppress the next suggestion since we don't want to put more bounds onto // something that already has `Fn`-like bounds (or is a closure), so we can't // restrict anyways. } else { let copy_did = self.infcx.tcx.require_lang_item(LangItem::Copy, span); self.suggest_adding_bounds(&mut err, ty, copy_did, span); } let opt_name = self.describe_place_with_options( place.as_ref(), DescribePlaceOpt { including_downcast: true, including_tuple_field: true }, ); let note_msg = match opt_name { Some(name) => format!("`{name}`"), None => "value".to_owned(), }; if needs_note { if let Some(local) = place.as_local() { let span = self.body.local_decls[local].source_info.span; err.subdiagnostic(crate::session_diagnostics::TypeNoCopy::Label { is_partial_move, ty, place: &note_msg, span, }); } else { err.subdiagnostic(crate::session_diagnostics::TypeNoCopy::Note { is_partial_move, ty, place: &note_msg, }); }; } if let UseSpans::FnSelfUse { kind: CallKind::DerefCoercion { deref_target_span, deref_target_ty, .. }, .. } = use_spans { err.note(format!( "{} occurs due to deref coercion to `{deref_target_ty}`", desired_action.as_noun(), )); // Check first whether the source is accessible (issue #87060) if let Some(deref_target_span) = deref_target_span && self.infcx.tcx.sess.source_map().is_span_accessible(deref_target_span) { err.span_note(deref_target_span, "deref defined here"); } } self.buffer_move_error(move_out_indices, (used_place, err)); } } fn suggest_ref_or_clone( &self, mpi: MovePathIndex, err: &mut Diag<'infcx>, move_spans: UseSpans<'tcx>, moved_place: PlaceRef<'tcx>, has_suggest_reborrow: &mut bool, moved_or_invoked_closure: bool, ) { let move_span = match move_spans { UseSpans::ClosureUse { capture_kind_span, .. } => capture_kind_span, _ => move_spans.args_or_use(), }; struct ExpressionFinder<'hir> { expr_span: Span, expr: Option<&'hir hir::Expr<'hir>>, pat: Option<&'hir hir::Pat<'hir>>, parent_pat: Option<&'hir hir::Pat<'hir>>, tcx: TyCtxt<'hir>, } impl<'hir> Visitor<'hir> for ExpressionFinder<'hir> { type NestedFilter = OnlyBodies; fn maybe_tcx(&mut self) -> Self::MaybeTyCtxt { self.tcx } fn visit_expr(&mut self, e: &'hir hir::Expr<'hir>) { if e.span == self.expr_span { self.expr = Some(e); } hir::intravisit::walk_expr(self, e); } fn visit_pat(&mut self, p: &'hir hir::Pat<'hir>) { if p.span == self.expr_span { self.pat = Some(p); } if let hir::PatKind::Binding(hir::BindingMode::NONE, _, i, sub) = p.kind { if i.span == self.expr_span || p.span == self.expr_span { self.pat = Some(p); } // Check if we are in a situation of `ident @ ident` where we want to suggest // `ref ident @ ref ident` or `ref ident @ Struct { ref ident }`. if let Some(subpat) = sub && self.pat.is_none() { self.visit_pat(subpat); if self.pat.is_some() { self.parent_pat = Some(p); } return; } } hir::intravisit::walk_pat(self, p); } } let tcx = self.infcx.tcx; if let Some(body) = tcx.hir_maybe_body_owned_by(self.mir_def_id()) { let expr = body.value; let place = &self.move_data.move_paths[mpi].place; let span = place.as_local().map(|local| self.body.local_decls[local].source_info.span); let mut finder = ExpressionFinder { expr_span: move_span, expr: None, pat: None, parent_pat: None, tcx, }; finder.visit_expr(expr); if let Some(span) = span && let Some(expr) = finder.expr { for (_, expr) in tcx.hir_parent_iter(expr.hir_id) { if let hir::Node::Expr(expr) = expr { if expr.span.contains(span) { // If the let binding occurs within the same loop, then that // loop isn't relevant, like in the following, the outermost `loop` // doesn't play into `x` being moved. // ``` // loop { // let x = String::new(); // loop { // foo(x); // } // } // ``` break; } if let hir::ExprKind::Loop(.., loop_span) = expr.kind { err.span_label(loop_span, "inside of this loop"); } } } let typeck = self.infcx.tcx.typeck(self.mir_def_id()); let parent = self.infcx.tcx.parent_hir_node(expr.hir_id); let (def_id, args, offset) = if let hir::Node::Expr(parent_expr) = parent && let hir::ExprKind::MethodCall(_, _, args, _) = parent_expr.kind { let def_id = typeck.type_dependent_def_id(parent_expr.hir_id); (def_id, args, 1) } else if let hir::Node::Expr(parent_expr) = parent && let hir::ExprKind::Call(call, args) = parent_expr.kind && let ty::FnDef(def_id, _) = typeck.node_type(call.hir_id).kind() { (Some(*def_id), args, 0) } else { (None, &[][..], 0) }; let ty = place.ty(self.body, self.infcx.tcx).ty; let mut can_suggest_clone = true; if let Some(def_id) = def_id && let Some(pos) = args.iter().position(|arg| arg.hir_id == expr.hir_id) { // The move occurred as one of the arguments to a function call. Is that // argument generic? `def_id` can't be a closure here, so using `fn_sig` is fine let arg_param = if self.infcx.tcx.def_kind(def_id).is_fn_like() && let sig = self.infcx.tcx.fn_sig(def_id).instantiate_identity().skip_binder() && let Some(arg_ty) = sig.inputs().get(pos + offset) && let ty::Param(arg_param) = arg_ty.kind() { Some(arg_param) } else { None }; // If the moved value is a mut reference, it is used in a // generic function and it's type is a generic param, it can be // reborrowed to avoid moving. // for example: // struct Y(u32); // x's type is '& mut Y' and it is used in `fn generic<T>(x: T) {}`. if let ty::Ref(_, _, hir::Mutability::Mut) = ty.kind() && arg_param.is_some() { *has_suggest_reborrow = true; self.suggest_reborrow(err, expr.span, moved_place); return; } // If the moved place is used generically by the callee and a reference to it // would still satisfy any bounds on its type, suggest borrowing. if let Some(&param) = arg_param && let hir::Node::Expr(call_expr) = parent && let Some(ref_mutability) = self.suggest_borrow_generic_arg( err, typeck, call_expr, def_id, param, moved_place, pos + offset, ty, expr.span, ) { can_suggest_clone = ref_mutability.is_mut(); } else if let Some(local_def_id) = def_id.as_local() && let node = self.infcx.tcx.hir_node_by_def_id(local_def_id) && let Some(fn_decl) = node.fn_decl() && let Some(ident) = node.ident() && let Some(arg) = fn_decl.inputs.get(pos + offset) { // If we can't suggest borrowing in the call, but the function definition // is local, instead offer changing the function to borrow that argument. let mut span: MultiSpan = arg.span.into(); span.push_span_label( arg.span, "this parameter takes ownership of the value".to_string(), ); let descr = match node.fn_kind() { Some(hir::intravisit::FnKind::ItemFn(..)) | None => "function", Some(hir::intravisit::FnKind::Method(..)) => "method", Some(hir::intravisit::FnKind::Closure) => "closure", }; span.push_span_label(ident.span, format!("in this {descr}")); err.span_note( span, format!( "consider changing this parameter type in {descr} `{ident}` to \ borrow instead if owning the value isn't necessary", ), ); } } if let hir::Node::Expr(parent_expr) = parent && let hir::ExprKind::Call(call_expr, _) = parent_expr.kind && let hir::ExprKind::Path(qpath) = call_expr.kind && tcx.qpath_is_lang_item(qpath, LangItem::IntoIterIntoIter) { // Do not suggest `.clone()` in a `for` loop, we already suggest borrowing. } else if let UseSpans::FnSelfUse { kind: CallKind::Normal { .. }, .. } = move_spans { // We already suggest cloning for these cases in `explain_captures`. } else if moved_or_invoked_closure { // Do not suggest `closure.clone()()`. } else if let UseSpans::ClosureUse { closure_kind: ClosureKind::Coroutine(CoroutineKind::Desugared(_, CoroutineSource::Block)), .. } = move_spans && can_suggest_clone { self.suggest_cloning(err, place.as_ref(), ty, expr, Some(move_spans)); } else if self.suggest_hoisting_call_outside_loop(err, expr) && can_suggest_clone { // The place where the type moves would be misleading to suggest clone. // #121466 self.suggest_cloning(err, place.as_ref(), ty, expr, Some(move_spans)); } } self.suggest_ref_for_dbg_args(expr, place, move_span, err); // it's useless to suggest inserting `ref` when the span don't comes from local code if let Some(pat) = finder.pat && !move_span.is_dummy() && !self.infcx.tcx.sess.source_map().is_imported(move_span) { let mut sugg = vec![(pat.span.shrink_to_lo(), "ref ".to_string())]; if let Some(pat) = finder.parent_pat { sugg.insert(0, (pat.span.shrink_to_lo(), "ref ".to_string())); } err.multipart_suggestion_verbose( "borrow this binding in the pattern to avoid moving the value", sugg, Applicability::MachineApplicable, ); } } } // for dbg!(x) which may take ownership, suggest dbg!(&x) instead // but here we actually do not check whether the macro name is `dbg!` // so that we may extend the scope a bit larger to cover more cases fn suggest_ref_for_dbg_args( &self, body: &hir::Expr<'_>, place: &Place<'tcx>, move_span: Span, err: &mut Diag<'infcx>, ) { let var_info = self.body.var_debug_info.iter().find(|info| match info.value { VarDebugInfoContents::Place(ref p) => p == place, _ => false, }); let Some(var_info) = var_info else { return }; let arg_name = var_info.name; struct MatchArgFinder { expr_span: Span, match_arg_span: Option<Span>, arg_name: Symbol, } impl Visitor<'_> for MatchArgFinder { fn visit_expr(&mut self, e: &hir::Expr<'_>) { // dbg! is expanded into a match pattern, we need to find the right argument span if let hir::ExprKind::Match(expr, ..) = &e.kind && let hir::ExprKind::Path(hir::QPath::Resolved( _, path @ Path { segments: [seg], .. }, )) = &expr.kind && seg.ident.name == self.arg_name && self.expr_span.source_callsite().contains(expr.span) { self.match_arg_span = Some(path.span); } hir::intravisit::walk_expr(self, e); } } let mut finder = MatchArgFinder { expr_span: move_span, match_arg_span: None, arg_name }; finder.visit_expr(body); if let Some(macro_arg_span) = finder.match_arg_span { err.span_suggestion_verbose( macro_arg_span.shrink_to_lo(), "consider borrowing instead of transferring ownership", "&", Applicability::MachineApplicable, ); } } pub(crate) fn suggest_reborrow( &self, err: &mut Diag<'infcx>, span: Span, moved_place: PlaceRef<'tcx>, ) { err.span_suggestion_verbose( span.shrink_to_lo(), format!( "consider creating a fresh reborrow of {} here", self.describe_place(moved_place) .map(|n| format!("`{n}`")) .unwrap_or_else(|| "the mutable reference".to_string()), ), "&mut *", Applicability::MachineApplicable, ); } /// If a place is used after being moved as an argument to a function, the function is generic /// in that argument, and a reference to the argument's type would still satisfy the function's /// bounds, suggest borrowing. This covers, e.g., borrowing an `impl Fn()` argument being passed /// in an `impl FnOnce()` position. /// Returns `Some(mutability)` when suggesting to borrow with mutability `mutability`, or `None` /// if no suggestion is made. fn suggest_borrow_generic_arg( &self, err: &mut Diag<'_>, typeck: &ty::TypeckResults<'tcx>, call_expr: &hir::Expr<'tcx>, callee_did: DefId, param: ty::ParamTy, moved_place: PlaceRef<'tcx>, moved_arg_pos: usize, moved_arg_ty: Ty<'tcx>, place_span: Span, ) -> Option<ty::Mutability> { let tcx = self.infcx.tcx; let sig = tcx.fn_sig(callee_did).instantiate_identity().skip_binder(); let clauses = tcx.predicates_of(callee_did); let generic_args = match call_expr.kind { // For method calls, generic arguments are attached to the call node. hir::ExprKind::MethodCall(..) => typeck.node_args_opt(call_expr.hir_id)?, // For normal calls, generic arguments are in the callee's type. // This diagnostic is only run for `FnDef` callees. hir::ExprKind::Call(callee, _) if let &ty::FnDef(_, args) = typeck.node_type(callee.hir_id).kind() => { args } _ => return None, }; // First, is there at least one method on one of `param`'s trait bounds? // This keeps us from suggesting borrowing the argument to `mem::drop`, e.g. if !clauses.instantiate_identity(tcx).predicates.iter().any(|clause| { clause.as_trait_clause().is_some_and(|tc| { tc.self_ty().skip_binder().is_param(param.index) && tc.polarity() == ty::PredicatePolarity::Positive && supertrait_def_ids(tcx, tc.def_id()) .flat_map(|trait_did| tcx.associated_items(trait_did).in_definition_order()) .any(|item| item.is_method()) }) }) { return None; } // Try borrowing a shared reference first, then mutably. if let Some(mutbl) = [ty::Mutability::Not, ty::Mutability::Mut].into_iter().find(|&mutbl| { let re = self.infcx.tcx.lifetimes.re_erased; let ref_ty = Ty::new_ref(self.infcx.tcx, re, moved_arg_ty, mutbl); // Ensure that substituting `ref_ty` in the callee's signature doesn't break // other inputs or the return type. let new_args = tcx.mk_args_from_iter(generic_args.iter().enumerate().map( |(i, arg)| { if i == param.index as usize { ref_ty.into() } else { arg } }, )); let can_subst = |ty: Ty<'tcx>| { // Normalize before comparing to see through type aliases and projections. let old_ty = ty::EarlyBinder::bind(ty).instantiate(tcx, generic_args); let new_ty = ty::EarlyBinder::bind(ty).instantiate(tcx, new_args); if let Ok(old_ty) = tcx.try_normalize_erasing_regions( self.infcx.typing_env(self.infcx.param_env), old_ty, ) && let Ok(new_ty) = tcx.try_normalize_erasing_regions( self.infcx.typing_env(self.infcx.param_env), new_ty, ) { old_ty == new_ty } else { false } }; if !can_subst(sig.output()) || sig .inputs() .iter() .enumerate() .any(|(i, &input_ty)| i != moved_arg_pos && !can_subst(input_ty)) { return false; } // Test the callee's predicates, substituting in `ref_ty` for the moved argument type. clauses.instantiate(tcx, new_args).predicates.iter().all(|&(mut clause)| { // Normalize before testing to see through type aliases and projections. if let Ok(normalized) = tcx.try_normalize_erasing_regions( self.infcx.typing_env(self.infcx.param_env), clause, ) { clause = normalized; } self.infcx.predicate_must_hold_modulo_regions(&Obligation::new( tcx, ObligationCause::dummy(), self.infcx.param_env, clause, )) }) }) { let place_desc = if let Some(desc) = self.describe_place(moved_place) { format!("`{desc}`") } else { "here".to_owned() }; err.span_suggestion_verbose( place_span.shrink_to_lo(), format!("consider {}borrowing {place_desc}", mutbl.mutably_str()), mutbl.ref_prefix_str(), Applicability::MaybeIncorrect, ); Some(mutbl) } else { None } } fn report_use_of_uninitialized( &self, mpi: MovePathIndex, used_place: PlaceRef<'tcx>, moved_place: PlaceRef<'tcx>, desired_action: InitializationRequiringAction, span: Span, use_spans: UseSpans<'tcx>, ) -> Diag<'infcx> { // We need all statements in the body where the binding was assigned to later find all // the branching code paths where the binding *wasn't* assigned to. let inits = &self.move_data.init_path_map[mpi]; let move_path = &self.move_data.move_paths[mpi]; let decl_span = self.body.local_decls[move_path.place.local].source_info.span; let mut spans_set = FxIndexSet::default(); for init_idx in inits { let init = &self.move_data.inits[*init_idx]; let span = init.span(self.body); if !span.is_dummy() { spans_set.insert(span); } } let spans: Vec<_> = spans_set.into_iter().collect(); let (name, desc) = match self.describe_place_with_options( moved_place, DescribePlaceOpt { including_downcast: true, including_tuple_field: true }, ) { Some(name) => (format!("`{name}`"), format!("`{name}` ")), None => ("the variable".to_string(), String::new()), }; let path = match self.describe_place_with_options( used_place, DescribePlaceOpt { including_downcast: true, including_tuple_field: true }, ) { Some(name) => format!("`{name}`"), None => "value".to_string(), }; // We use the statements were the binding was initialized, and inspect the HIR to look // for the branching codepaths that aren't covered, to point at them. let tcx = self.infcx.tcx; let body = tcx.hir_body_owned_by(self.mir_def_id()); let mut visitor = ConditionVisitor { tcx, spans, name, errors: vec![] }; visitor.visit_body(&body); let spans = visitor.spans; let mut show_assign_sugg = false; let isnt_initialized = if let InitializationRequiringAction::PartialAssignment | InitializationRequiringAction::Assignment = desired_action { // The same error is emitted for bindings that are *sometimes* initialized and the ones // that are *partially* initialized by assigning to a field of an uninitialized // binding. We differentiate between them for more accurate wording here. "isn't fully initialized" } else if !spans.iter().any(|i| { // We filter these to avoid misleading wording in cases like the following, // where `x` has an `init`, but it is in the same place we're looking at: // ``` // let x; // x += 1; // ``` !i.contains(span) // We filter these to avoid incorrect main message on `match-cfg-fake-edges.rs` && !visitor .errors .iter() .map(|(sp, _)| *sp) .any(|sp| span < sp && !sp.contains(span)) }) { show_assign_sugg = true; "isn't initialized" } else { "is possibly-uninitialized" }; let used = desired_action.as_general_verb_in_past_tense(); let mut err = struct_span_code_err!( self.dcx(), span, E0381, "{used} binding {desc}{isnt_initialized}" ); use_spans.var_path_only_subdiag(&mut err, desired_action); if let InitializationRequiringAction::PartialAssignment | InitializationRequiringAction::Assignment = desired_action { err.help( "partial initialization isn't supported, fully initialize the binding with a \ default value and mutate it, or use `std::mem::MaybeUninit`", ); } err.span_label(span, format!("{path} {used} here but it {isnt_initialized}")); let mut shown = false; for (sp, label) in visitor.errors { if sp < span && !sp.overlaps(span) { // When we have a case like `match-cfg-fake-edges.rs`, we don't want to mention // match arms coming after the primary span because they aren't relevant: // ``` // let x; // match y { // _ if { x = 2; true } => {} // _ if { // x; //~ ERROR // false // } => {} // _ => {} // We don't want to point to this. // }; // ``` err.span_label(sp, label); shown = true; } } if !shown { for sp in &spans { if *sp < span && !sp.overlaps(span) { err.span_label(*sp, "binding initialized here in some conditions"); } } } err.span_label(decl_span, "binding declared here but left uninitialized"); if show_assign_sugg { struct LetVisitor { decl_span: Span, sugg_span: Option<Span>, } impl<'v> Visitor<'v> for LetVisitor { fn visit_stmt(&mut self, ex: &'v hir::Stmt<'v>) { if self.sugg_span.is_some() { return; } // FIXME: We make sure that this is a normal top-level binding, // but we could suggest `todo!()` for all uninitialized bindings in the pattern if let hir::StmtKind::Let(hir::LetStmt { span, ty, init: None, pat, .. }) = &ex.kind && let hir::PatKind::Binding(..) = pat.kind && span.contains(self.decl_span) { self.sugg_span = ty.map_or(Some(self.decl_span), |ty| Some(ty.span)); } hir::intravisit::walk_stmt(self, ex); } } let mut visitor = LetVisitor { decl_span, sugg_span: None }; visitor.visit_body(&body); if let Some(span) = visitor.sugg_span { self.suggest_assign_value(&mut err, moved_place, span); } } err } fn suggest_assign_value( &self, err: &mut Diag<'_>, moved_place: PlaceRef<'tcx>, sugg_span: Span, ) { let ty = moved_place.ty(self.body, self.infcx.tcx).ty; debug!("ty: {:?}, kind: {:?}", ty, ty.kind()); let Some(assign_value) = self.infcx.err_ctxt().ty_kind_suggestion(self.infcx.param_env, ty) else { return; }; err.span_suggestion_verbose( sugg_span.shrink_to_hi(), "consider assigning a value", format!(" = {assign_value}"), Applicability::MaybeIncorrect, ); } /// In a move error that occurs on a call within a loop, we try to identify cases where cloning /// the value would lead to a logic error. We infer these cases by seeing if the moved value is /// part of the logic to break the loop, either through an explicit `break` or if the expression /// is part of a `while let`. fn suggest_hoisting_call_outside_loop(&self, err: &mut Diag<'_>, expr: &hir::Expr<'_>) -> bool { let tcx = self.infcx.tcx; let mut can_suggest_clone = true; // If the moved value is a locally declared binding, we'll look upwards on the expression // tree until the scope where it is defined, and no further, as suggesting to move the // expression beyond that point would be illogical. let local_hir_id = if let hir::ExprKind::Path(hir::QPath::Resolved( _, hir::Path { res: hir::def::Res::Local(local_hir_id), .. }, )) = expr.kind { Some(local_hir_id) } else { // This case would be if the moved value comes from an argument binding, we'll just // look within the entire item, that's fine. None }; /// This will allow us to look for a specific `HirId`, in our case `local_hir_id` where the /// binding was declared, within any other expression. We'll use it to search for the /// binding declaration within every scope we inspect. struct Finder { hir_id: hir::HirId, } impl<'hir> Visitor<'hir> for Finder { type Result = ControlFlow<()>; fn visit_pat(&mut self, pat: &'hir hir::Pat<'hir>) -> Self::Result { if pat.hir_id == self.hir_id { return ControlFlow::Break(()); } hir::intravisit::walk_pat(self, pat) } fn visit_expr(&mut self, ex: &'hir hir::Expr<'hir>) -> Self::Result { if ex.hir_id == self.hir_id { return ControlFlow::Break(()); } hir::intravisit::walk_expr(self, ex) } } // The immediate HIR parent of the moved expression. We'll look for it to be a call. let mut parent = None; // The top-most loop where the moved expression could be moved to a new binding. let mut outer_most_loop: Option<&hir::Expr<'_>> = None; for (_, node) in tcx.hir_parent_iter(expr.hir_id) { let e = match node { hir::Node::Expr(e) => e, hir::Node::LetStmt(hir::LetStmt { els: Some(els), .. }) => { let mut finder = BreakFinder { found_breaks: vec![], found_continues: vec![] }; finder.visit_block(els); if !finder.found_breaks.is_empty() { // Don't suggest clone as it could be will likely end in an infinite // loop. // let Some(_) = foo(non_copy.clone()) else { break; } // --- ^^^^^^^^ ----- can_suggest_clone = false; } continue; } _ => continue, }; if let Some(&hir_id) = local_hir_id { if (Finder { hir_id }).visit_expr(e).is_break() { // The current scope includes the declaration of the binding we're accessing, we // can't look up any further for loops. break; } } if parent.is_none() { parent = Some(e); } match e.kind { hir::ExprKind::Let(_) => { match tcx.parent_hir_node(e.hir_id) { hir::Node::Expr(hir::Expr { kind: hir::ExprKind::If(cond, ..), .. }) => { if (Finder { hir_id: expr.hir_id }).visit_expr(cond).is_break() { // The expression where the move error happened is in a `while let` // condition Don't suggest clone as it will likely end in an // infinite loop. // while let Some(_) = foo(non_copy.clone()) { } // --------- ^^^^^^^^ can_suggest_clone = false; } } _ => {} } } hir::ExprKind::Loop(..) => { outer_most_loop = Some(e); } _ => {} } } let loop_count: usize = tcx .hir_parent_iter(expr.hir_id) .map(|(_, node)| match node { hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Loop(..), .. }) => 1, _ => 0, }) .sum(); let sm = tcx.sess.source_map(); if let Some(in_loop) = outer_most_loop { let mut finder = BreakFinder { found_breaks: vec![], found_continues: vec![] }; finder.visit_expr(in_loop); // All of the spans for `break` and `continue` expressions. let spans = finder .found_breaks .iter() .chain(finder.found_continues.iter()) .map(|(_, span)| *span) .filter(|span| { !matches!( span.desugaring_kind(), Some(DesugaringKind::ForLoop | DesugaringKind::WhileLoop) ) }) .collect::<Vec<Span>>(); // All of the spans for the loops above the expression with the move error. let loop_spans: Vec<_> = tcx .hir_parent_iter(expr.hir_id) .filter_map(|(_, node)| match node { hir::Node::Expr(hir::Expr { span, kind: hir::ExprKind::Loop(..), .. }) => { Some(*span) } _ => None, }) .collect(); // It is possible that a user written `break` or `continue` is in the wrong place. We // point them out at the user for them to make a determination. (#92531) if !spans.is_empty() && loop_count > 1 { // Getting fancy: if the spans of the loops *do not* overlap, we only use the line // number when referring to them. If there *are* overlaps (multiple loops on the // same line) then we use the more verbose span output (`file.rs:col:ll`). let mut lines: Vec<_> = loop_spans.iter().map(|sp| sm.lookup_char_pos(sp.lo()).line).collect(); lines.sort(); lines.dedup(); let fmt_span = |span: Span| { if lines.len() == loop_spans.len() { format!("line {}", sm.lookup_char_pos(span.lo()).line) } else { sm.span_to_diagnostic_string(span) } }; let mut spans: MultiSpan = spans.into(); // Point at all the `continue`s and explicit `break`s in the relevant loops. for (desc, elements) in [ ("`break` exits", &finder.found_breaks), ("`continue` advances", &finder.found_continues), ] { for (destination, sp) in elements { if let Ok(hir_id) = destination.target_id && let hir::Node::Expr(expr) = tcx.hir_node(hir_id) && !matches!( sp.desugaring_kind(), Some(DesugaringKind::ForLoop | DesugaringKind::WhileLoop) ) { spans.push_span_label( *sp, format!("this {desc} the loop at {}", fmt_span(expr.span)), ); } } } // Point at all the loops that are between this move and the parent item. for span in loop_spans { spans.push_span_label(sm.guess_head_span(span), ""); } // note: verify that your loop breaking logic is correct // --> $DIR/nested-loop-moved-value-wrong-continue.rs:41:17 // | // 28 | for foo in foos { // | --------------- // ... // 33 | for bar in &bars { // | ---------------- // ... // 41 | continue; // | ^^^^^^^^ this `continue` advances the loop at line 33 err.span_note(spans, "verify that your loop breaking logic is correct"); } if let Some(parent) = parent && let hir::ExprKind::MethodCall(..) | hir::ExprKind::Call(..) = parent.kind { // FIXME: We could check that the call's *parent* takes `&mut val` to make the // suggestion more targeted to the `mk_iter(val).next()` case. Maybe do that only to // check for whether to suggest `let value` or `let mut value`. let span = in_loop.span; if !finder.found_breaks.is_empty() && let Ok(value) = sm.span_to_snippet(parent.span) { // We know with high certainty that this move would affect the early return of a // loop, so we suggest moving the expression with the move out of the loop. let indent = if let Some(indent) = sm.indentation_before(span) { format!("\n{indent}") } else { " ".to_string() }; err.multipart_suggestion( "consider moving the expression out of the loop so it is only moved once", vec![ (span.shrink_to_lo(), format!("let mut value = {value};{indent}")), (parent.span, "value".to_string()), ], Applicability::MaybeIncorrect, ); } } } can_suggest_clone } /// We have `S { foo: val, ..base }`, and we suggest instead writing /// `S { foo: val, bar: base.bar.clone(), .. }` when valid. fn suggest_cloning_on_functional_record_update( &self, err: &mut Diag<'_>, ty: Ty<'tcx>, expr: &hir::Expr<'_>, ) { let typeck_results = self.infcx.tcx.typeck(self.mir_def_id()); let hir::ExprKind::Struct(struct_qpath, fields, hir::StructTailExpr::Base(base)) = expr.kind else { return; }; let hir::QPath::Resolved(_, path) = struct_qpath else { return }; let hir::def::Res::Def(_, def_id) = path.res else { return }; let Some(expr_ty) = typeck_results.node_type_opt(expr.hir_id) else { return }; let ty::Adt(def, args) = expr_ty.kind() else { return }; let hir::ExprKind::Path(hir::QPath::Resolved(None, path)) = base.kind else { return }; let (hir::def::Res::Local(_) | hir::def::Res::Def( DefKind::Const | DefKind::ConstParam | DefKind::Static { .. } | DefKind::AssocConst, _, )) = path.res else { return; }; let Ok(base_str) = self.infcx.tcx.sess.source_map().span_to_snippet(base.span) else { return; }; // 1. look for the fields of type `ty`. // 2. check if they are clone and add them to suggestion // 3. check if there are any values left to `..` and remove it if not // 4. emit suggestion to clone the field directly as `bar: base.bar.clone()` let mut final_field_count = fields.len(); let Some(variant) = def.variants().iter().find(|variant| variant.def_id == def_id) else { // When we have an enum, look for the variant that corresponds to the variant the user // wrote. return; }; let mut sugg = vec![]; for field in &variant.fields { // In practice unless there are more than one field with the same type, we'll be // suggesting a single field at a type, because we don't aggregate multiple borrow // checker errors involving the functional record update syntax into a single one. let field_ty = field.ty(self.infcx.tcx, args); let ident = field.ident(self.infcx.tcx); if field_ty == ty && fields.iter().all(|field| field.ident.name != ident.name) { // Suggest adding field and cloning it. sugg.push(format!("{ident}: {base_str}.{ident}.clone()")); final_field_count += 1; } } let (span, sugg) = match fields { [.., last] => ( if final_field_count == variant.fields.len() { // We'll remove the `..base` as there aren't any fields left. last.span.shrink_to_hi().with_hi(base.span.hi()) } else { last.span.shrink_to_hi() }, format!(", {}", sugg.join(", ")), ), // Account for no fields in suggestion span. [] => ( expr.span.with_lo(struct_qpath.span().hi()), if final_field_count == variant.fields.len() { // We'll remove the `..base` as there aren't any fields left. format!(" {{ {} }}", sugg.join(", ")) } else { format!(" {{ {}, ..{base_str} }}", sugg.join(", ")) }, ), }; let prefix = if !self.implements_clone(ty) { let msg = format!("`{ty}` doesn't implement `Copy` or `Clone`"); if let ty::Adt(def, _) = ty.kind() { err.span_note(self.infcx.tcx.def_span(def.did()), msg); } else { err.note(msg); } format!("if `{ty}` implemented `Clone`, you could ") } else { String::new() }; let msg = format!( "{prefix}clone the value from the field instead of using the functional record update \ syntax", ); err.span_suggestion_verbose(span, msg, sugg, Applicability::MachineApplicable); } pub(crate) fn suggest_cloning( &self, err: &mut Diag<'_>, place: PlaceRef<'tcx>, ty: Ty<'tcx>, expr: &'tcx hir::Expr<'tcx>, use_spans: Option<UseSpans<'tcx>>, ) { if let hir::ExprKind::Struct(_, _, hir::StructTailExpr::Base(_)) = expr.kind { // We have `S { foo: val, ..base }`. In `check_aggregate_rvalue` we have a single // `Location` that covers both the `S { ... }` literal, all of its fields and the // `base`. If the move happens because of `S { foo: val, bar: base.bar }` the `expr` // will already be correct. Instead, we see if we can suggest writing. self.suggest_cloning_on_functional_record_update(err, ty, expr); return; } if self.implements_clone(ty) { if self.in_move_closure(expr) { if let Some(name) = self.describe_place(place) { self.suggest_clone_of_captured_var_in_move_closure(err, &name, use_spans); } } else { self.suggest_cloning_inner(err, ty, expr); } } else if let ty::Adt(def, args) = ty.kind() && let Some(local_did) = def.did().as_local() && def.variants().iter().all(|variant| { variant .fields .iter() .all(|field| self.implements_clone(field.ty(self.infcx.tcx, args))) }) { let ty_span = self.infcx.tcx.def_span(def.did()); let mut span: MultiSpan = ty_span.into(); let mut derive_clone = false; self.infcx.tcx.for_each_relevant_impl( self.infcx.tcx.lang_items().clone_trait().unwrap(), ty, |def_id| { if self.infcx.tcx.is_automatically_derived(def_id) { derive_clone = true; span.push_span_label( self.infcx.tcx.def_span(def_id), "derived `Clone` adds implicit bounds on type parameters", ); if let Some(generics) = self.infcx.tcx.hir_get_generics(local_did) { for param in generics.params { if let hir::GenericParamKind::Type { .. } = param.kind { span.push_span_label( param.span, format!( "introduces an implicit `{}: Clone` bound", param.name.ident() ), ); } } } } }, ); let msg = if !derive_clone { span.push_span_label( ty_span, format!( "consider {}implementing `Clone` for this type", if derive_clone { "manually " } else { "" } ), ); format!("if `{ty}` implemented `Clone`, you could clone the value") } else { format!("if all bounds were met, you could clone the value") }; span.push_span_label(expr.span, "you could clone this value"); err.span_note(span, msg); if derive_clone { err.help("consider manually implementing `Clone` to avoid undesired bounds"); } } else if let ty::Param(param) = ty.kind() && let Some(_clone_trait_def) = self.infcx.tcx.lang_items().clone_trait() && let generics = self.infcx.tcx.generics_of(self.mir_def_id()) && let generic_param = generics.type_param(*param, self.infcx.tcx) && let param_span = self.infcx.tcx.def_span(generic_param.def_id) && if let Some(UseSpans::FnSelfUse { kind, .. }) = use_spans && let CallKind::FnCall { fn_trait_id, self_ty } = kind && let ty::Param(_) = self_ty.kind() && ty == self_ty && self.infcx.tcx.fn_trait_kind_from_def_id(fn_trait_id).is_some() { // Do not suggest `F: FnOnce() + Clone`. false } else { true } { let mut span: MultiSpan = param_span.into(); span.push_span_label( param_span, "consider constraining this type parameter with `Clone`", ); span.push_span_label(expr.span, "you could clone this value"); err.span_help( span, format!("if `{ty}` implemented `Clone`, you could clone the value"), ); } else if let ty::Adt(_, _) = ty.kind() && let Some(clone_trait) = self.infcx.tcx.lang_items().clone_trait() { // For cases like `Option<NonClone>`, where `Option<T>: Clone` if `T: Clone`, we point // at the types that should be `Clone`. let ocx = ObligationCtxt::new_with_diagnostics(self.infcx); let cause = ObligationCause::misc(expr.span, self.mir_def_id()); ocx.register_bound(cause, self.infcx.param_env, ty, clone_trait); let errors = ocx.evaluate_obligations_error_on_ambiguity(); if errors.iter().all(|error| { match error.obligation.predicate.as_clause().and_then(|c| c.as_trait_clause()) { Some(clause) => match clause.self_ty().skip_binder().kind() { ty::Adt(def, _) => def.did().is_local() && clause.def_id() == clone_trait, _ => false, }, None => false, } }) { let mut type_spans = vec![]; let mut types = FxIndexSet::default(); for clause in errors .iter() .filter_map(|e| e.obligation.predicate.as_clause()) .filter_map(|c| c.as_trait_clause()) { let ty::Adt(def, _) = clause.self_ty().skip_binder().kind() else { continue }; type_spans.push(self.infcx.tcx.def_span(def.did())); types.insert( self.infcx .tcx .short_string(clause.self_ty().skip_binder(), &mut err.long_ty_path()), ); } let mut span: MultiSpan = type_spans.clone().into(); for sp in type_spans { span.push_span_label(sp, "consider implementing `Clone` for this type"); } span.push_span_label(expr.span, "you could clone this value"); let types: Vec<_> = types.into_iter().collect(); let msg = match &types[..] { [only] => format!("`{only}`"), [head @ .., last] => format!( "{} and `{last}`", head.iter().map(|t| format!("`{t}`")).collect::<Vec<_>>().join(", ") ), [] => unreachable!(), }; err.span_note( span, format!("if {msg} implemented `Clone`, you could clone the value"), ); } } } pub(crate) fn implements_clone(&self, ty: Ty<'tcx>) -> bool { let Some(clone_trait_def) = self.infcx.tcx.lang_items().clone_trait() else { return false }; self.infcx .type_implements_trait(clone_trait_def, [ty], self.infcx.param_env) .must_apply_modulo_regions() } /// Given an expression, check if it is a method call `foo.clone()`, where `foo` and /// `foo.clone()` both have the same type, returning the span for `.clone()` if so. pub(crate) fn clone_on_reference(&self, expr: &hir::Expr<'_>) -> Option<Span> { let typeck_results = self.infcx.tcx.typeck(self.mir_def_id()); if let hir::ExprKind::MethodCall(segment, rcvr, args, span) = expr.kind && let Some(expr_ty) = typeck_results.node_type_opt(expr.hir_id) && let Some(rcvr_ty) = typeck_results.node_type_opt(rcvr.hir_id) && rcvr_ty == expr_ty && segment.ident.name == sym::clone && args.is_empty() { Some(span) } else { None } } fn in_move_closure(&self, expr: &hir::Expr<'_>) -> bool { for (_, node) in self.infcx.tcx.hir_parent_iter(expr.hir_id) { if let hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Closure(closure), .. }) = node && let hir::CaptureBy::Value { .. } = closure.capture_clause { // `move || x.clone()` will not work. FIXME: suggest `let y = x.clone(); move || y` return true; } } false } fn suggest_cloning_inner( &self, err: &mut Diag<'_>, ty: Ty<'tcx>, expr: &hir::Expr<'_>, ) -> bool { let tcx = self.infcx.tcx; if let Some(_) = self.clone_on_reference(expr) { // Avoid redundant clone suggestion already suggested in `explain_captures`. // See `tests/ui/moves/needs-clone-through-deref.rs` return false; } // We don't want to suggest `.clone()` in a move closure, since the value has already been // captured. if self.in_move_closure(expr) { return false; } // We also don't want to suggest cloning a closure itself, since the value has already been // captured. if let hir::ExprKind::Closure(_) = expr.kind { return false; } // Try to find predicates on *generic params* that would allow copying `ty` let mut suggestion = if let Some(symbol) = tcx.hir_maybe_get_struct_pattern_shorthand_field(expr) { format!(": {symbol}.clone()") } else { ".clone()".to_owned() }; let mut sugg = Vec::with_capacity(2); let mut inner_expr = expr; let mut is_raw_ptr = false; let typeck_result = self.infcx.tcx.typeck(self.mir_def_id()); // Remove uses of `&` and `*` when suggesting `.clone()`. while let hir::ExprKind::AddrOf(.., inner) | hir::ExprKind::Unary(hir::UnOp::Deref, inner) = &inner_expr.kind { if let hir::ExprKind::AddrOf(_, hir::Mutability::Mut, _) = inner_expr.kind { // We assume that `&mut` refs are desired for their side-effects, so cloning the // value wouldn't do what the user wanted. return false; } inner_expr = inner; if let Some(inner_type) = typeck_result.node_type_opt(inner.hir_id) { if matches!(inner_type.kind(), ty::RawPtr(..)) { is_raw_ptr = true; break; } } } // Cloning the raw pointer doesn't make sense in some cases and would cause a type mismatch // error. (see #126863) if inner_expr.span.lo() != expr.span.lo() && !is_raw_ptr { // Remove "(*" or "(&" sugg.push((expr.span.with_hi(inner_expr.span.lo()), String::new())); } // Check whether `expr` is surrounded by parentheses or not. let span = if inner_expr.span.hi() != expr.span.hi() { // Account for `(*x)` to suggest `x.clone()`. if is_raw_ptr { expr.span.shrink_to_hi() } else { // Remove the close parenthesis ")" expr.span.with_lo(inner_expr.span.hi()) } } else { if is_raw_ptr { sugg.push((expr.span.shrink_to_lo(), "(".to_string())); suggestion = ").clone()".to_string(); } expr.span.shrink_to_hi() }; sugg.push((span, suggestion)); let msg = if let ty::Adt(def, _) = ty.kind() && [tcx.get_diagnostic_item(sym::Arc), tcx.get_diagnostic_item(sym::Rc)] .contains(&Some(def.did())) { "clone the value to increment its reference count" } else { "consider cloning the value if the performance cost is acceptable" }; err.multipart_suggestion_verbose(msg, sugg, Applicability::MachineApplicable); true } fn suggest_adding_bounds(&self, err: &mut Diag<'_>, ty: Ty<'tcx>, def_id: DefId, span: Span) { let tcx = self.infcx.tcx; let generics = tcx.generics_of(self.mir_def_id()); let Some(hir_generics) = tcx .typeck_root_def_id(self.mir_def_id().to_def_id()) .as_local() .and_then(|def_id| tcx.hir_get_generics(def_id)) else { return; }; // Try to find predicates on *generic params* that would allow copying `ty` let ocx = ObligationCtxt::new_with_diagnostics(self.infcx); let cause = ObligationCause::misc(span, self.mir_def_id()); ocx.register_bound(cause, self.infcx.param_env, ty, def_id); let errors = ocx.evaluate_obligations_error_on_ambiguity(); // Only emit suggestion if all required predicates are on generic let predicates: Result<Vec<_>, _> = errors .into_iter() .map(|err| match err.obligation.predicate.kind().skip_binder() { PredicateKind::Clause(ty::ClauseKind::Trait(predicate)) => { match *predicate.self_ty().kind() { ty::Param(param_ty) => Ok(( generics.type_param(param_ty, tcx), predicate.trait_ref.print_trait_sugared().to_string(), Some(predicate.trait_ref.def_id), )), _ => Err(()), } } _ => Err(()), }) .collect(); if let Ok(predicates) = predicates { suggest_constraining_type_params( tcx, hir_generics, err, predicates.iter().map(|(param, constraint, def_id)| { (param.name.as_str(), &**constraint, *def_id) }), None, ); } } pub(crate) fn report_move_out_while_borrowed( &mut self, location: Location, (place, span): (Place<'tcx>, Span), borrow: &BorrowData<'tcx>, ) { debug!( "report_move_out_while_borrowed: location={:?} place={:?} span={:?} borrow={:?}", location, place, span, borrow ); let value_msg = self.describe_any_place(place.as_ref()); let borrow_msg = self.describe_any_place(borrow.borrowed_place.as_ref()); let borrow_spans = self.retrieve_borrow_spans(borrow); let borrow_span = borrow_spans.args_or_use(); let move_spans = self.move_spans(place.as_ref(), location); let span = move_spans.args_or_use(); let mut err = self.cannot_move_when_borrowed( span, borrow_span, &self.describe_any_place(place.as_ref()), &borrow_msg, &value_msg, ); self.note_due_to_edition_2024_opaque_capture_rules(borrow, &mut err); borrow_spans.var_path_only_subdiag(&mut err, crate::InitializationRequiringAction::Borrow); move_spans.var_subdiag(&mut err, None, |kind, var_span| { use crate::session_diagnostics::CaptureVarCause::*; match kind { hir::ClosureKind::Coroutine(_) => MoveUseInCoroutine { var_span }, hir::ClosureKind::Closure | hir::ClosureKind::CoroutineClosure(_) => { MoveUseInClosure { var_span } } } }); self.explain_why_borrow_contains_point(location, borrow, None) .add_explanation_to_diagnostic(&self, &mut err, "", Some(borrow_span), None); self.suggest_copy_for_type_in_cloned_ref(&mut err, place); let typeck_results = self.infcx.tcx.typeck(self.mir_def_id()); if let Some(expr) = self.find_expr(borrow_span) { // This is a borrow span, so we want to suggest cloning the referent. if let hir::ExprKind::AddrOf(_, _, borrowed_expr) = expr.kind && let Some(ty) = typeck_results.expr_ty_opt(borrowed_expr) { self.suggest_cloning(&mut err, place.as_ref(), ty, borrowed_expr, Some(move_spans)); } else if typeck_results.expr_adjustments(expr).first().is_some_and(|adj| { matches!( adj.kind, ty::adjustment::Adjust::Borrow(ty::adjustment::AutoBorrow::Ref( ty::adjustment::AutoBorrowMutability::Not | ty::adjustment::AutoBorrowMutability::Mut { allow_two_phase_borrow: ty::adjustment::AllowTwoPhase::No } )) ) }) && let Some(ty) = typeck_results.expr_ty_opt(expr) { self.suggest_cloning(&mut err, place.as_ref(), ty, expr, Some(move_spans)); } } self.buffer_error(err); } pub(crate) fn report_use_while_mutably_borrowed( &self, location: Location, (place, _span): (Place<'tcx>, Span), borrow: &BorrowData<'tcx>, ) -> Diag<'infcx> { let borrow_spans = self.retrieve_borrow_spans(borrow); let borrow_span = borrow_spans.args_or_use(); // Conflicting borrows are reported separately, so only check for move // captures. let use_spans = self.move_spans(place.as_ref(), location); let span = use_spans.var_or_use(); // If the attempted use is in a closure then we do not care about the path span of the // place we are currently trying to use we call `var_span_label` on `borrow_spans` to // annotate if the existing borrow was in a closure. let mut err = self.cannot_use_when_mutably_borrowed( span, &self.describe_any_place(place.as_ref()), borrow_span, &self.describe_any_place(borrow.borrowed_place.as_ref()), ); self.note_due_to_edition_2024_opaque_capture_rules(borrow, &mut err); borrow_spans.var_subdiag(&mut err, Some(borrow.kind), |kind, var_span| { use crate::session_diagnostics::CaptureVarCause::*; let place = &borrow.borrowed_place; let desc_place = self.describe_any_place(place.as_ref()); match kind { hir::ClosureKind::Coroutine(_) => { BorrowUsePlaceCoroutine { place: desc_place, var_span, is_single_var: true } } hir::ClosureKind::Closure | hir::ClosureKind::CoroutineClosure(_) => { BorrowUsePlaceClosure { place: desc_place, var_span, is_single_var: true } } } }); self.explain_why_borrow_contains_point(location, borrow, None) .add_explanation_to_diagnostic(&self, &mut err, "", None, None); err } pub(crate) fn report_conflicting_borrow( &self, location: Location, (place, span): (Place<'tcx>, Span), gen_borrow_kind: BorrowKind, issued_borrow: &BorrowData<'tcx>, ) -> Diag<'infcx> { let issued_spans = self.retrieve_borrow_spans(issued_borrow); let issued_span = issued_spans.args_or_use(); let borrow_spans = self.borrow_spans(span, location); let span = borrow_spans.args_or_use(); let container_name = if issued_spans.for_coroutine() || borrow_spans.for_coroutine() { "coroutine" } else { "closure" }; let (desc_place, msg_place, msg_borrow, union_type_name) = self.describe_place_for_conflicting_borrow(place, issued_borrow.borrowed_place); let explanation = self.explain_why_borrow_contains_point(location, issued_borrow, None); let second_borrow_desc = if explanation.is_explained() { "second " } else { "" }; // FIXME: supply non-"" `opt_via` when appropriate let first_borrow_desc; let mut err = match (gen_borrow_kind, issued_borrow.kind) { ( BorrowKind::Shared | BorrowKind::Fake(FakeBorrowKind::Deep), BorrowKind::Mut { kind: MutBorrowKind::Default | MutBorrowKind::TwoPhaseBorrow }, ) => { first_borrow_desc = "mutable "; let mut err = self.cannot_reborrow_already_borrowed( span, &desc_place, &msg_place, "immutable", issued_span, "it", "mutable", &msg_borrow, None, ); self.suggest_slice_method_if_applicable( &mut err, place, issued_borrow.borrowed_place, span, issued_span, ); err } ( BorrowKind::Mut { kind: MutBorrowKind::Default | MutBorrowKind::TwoPhaseBorrow }, BorrowKind::Shared | BorrowKind::Fake(FakeBorrowKind::Deep), ) => { first_borrow_desc = "immutable "; let mut err = self.cannot_reborrow_already_borrowed( span, &desc_place, &msg_place, "mutable", issued_span, "it", "immutable", &msg_borrow, None, ); self.suggest_slice_method_if_applicable( &mut err, place, issued_borrow.borrowed_place, span, issued_span, ); self.suggest_binding_for_closure_capture_self(&mut err, &issued_spans); self.suggest_using_closure_argument_instead_of_capture( &mut err, issued_borrow.borrowed_place, &issued_spans, ); err } ( BorrowKind::Mut { kind: MutBorrowKind::Default | MutBorrowKind::TwoPhaseBorrow }, BorrowKind::Mut { kind: MutBorrowKind::Default | MutBorrowKind::TwoPhaseBorrow }, ) => { first_borrow_desc = "first "; let mut err = self.cannot_mutably_borrow_multiply( span, &desc_place, &msg_place, issued_span, &msg_borrow, None, ); self.suggest_slice_method_if_applicable( &mut err, place, issued_borrow.borrowed_place, span, issued_span, ); self.suggest_using_closure_argument_instead_of_capture( &mut err, issued_borrow.borrowed_place, &issued_spans, ); self.explain_iterator_advancement_in_for_loop_if_applicable( &mut err, span, &issued_spans, ); err } ( BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture }, BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture }, ) => { first_borrow_desc = "first "; self.cannot_uniquely_borrow_by_two_closures(span, &desc_place, issued_span, None) } (BorrowKind::Mut { .. }, BorrowKind::Fake(FakeBorrowKind::Shallow)) => { if let Some(immutable_section_description) = self.classify_immutable_section(issued_borrow.assigned_place) { let mut err = self.cannot_mutate_in_immutable_section( span, issued_span, &desc_place, immutable_section_description, "mutably borrow", ); borrow_spans.var_subdiag( &mut err, Some(BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture }), |kind, var_span| { use crate::session_diagnostics::CaptureVarCause::*; match kind { hir::ClosureKind::Coroutine(_) => BorrowUsePlaceCoroutine { place: desc_place, var_span, is_single_var: true, }, hir::ClosureKind::Closure | hir::ClosureKind::CoroutineClosure(_) => BorrowUsePlaceClosure { place: desc_place, var_span, is_single_var: true, }, } }, ); return err; } else { first_borrow_desc = "immutable "; self.cannot_reborrow_already_borrowed( span, &desc_place, &msg_place, "mutable", issued_span, "it", "immutable", &msg_borrow, None, ) } } (BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture }, _) => { first_borrow_desc = "first "; self.cannot_uniquely_borrow_by_one_closure( span, container_name, &desc_place, "", issued_span, "it", "", None, ) } ( BorrowKind::Shared | BorrowKind::Fake(FakeBorrowKind::Deep), BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture }, ) => { first_borrow_desc = "first "; self.cannot_reborrow_already_uniquely_borrowed( span, container_name, &desc_place, "", "immutable", issued_span, "", None, second_borrow_desc, ) } (BorrowKind::Mut { .. }, BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture }) => { first_borrow_desc = "first "; self.cannot_reborrow_already_uniquely_borrowed( span, container_name, &desc_place, "", "mutable", issued_span, "", None, second_borrow_desc, ) } ( BorrowKind::Shared | BorrowKind::Fake(FakeBorrowKind::Deep), BorrowKind::Shared | BorrowKind::Fake(_), ) | ( BorrowKind::Fake(FakeBorrowKind::Shallow), BorrowKind::Mut { .. } | BorrowKind::Shared | BorrowKind::Fake(_), ) => { unreachable!() } }; self.note_due_to_edition_2024_opaque_capture_rules(issued_borrow, &mut err); if issued_spans == borrow_spans { borrow_spans.var_subdiag(&mut err, Some(gen_borrow_kind), |kind, var_span| { use crate::session_diagnostics::CaptureVarCause::*; match kind { hir::ClosureKind::Coroutine(_) => BorrowUsePlaceCoroutine { place: desc_place, var_span, is_single_var: false, }, hir::ClosureKind::Closure | hir::ClosureKind::CoroutineClosure(_) => { BorrowUsePlaceClosure { place: desc_place, var_span, is_single_var: false } } } }); } else { issued_spans.var_subdiag(&mut err, Some(issued_borrow.kind), |kind, var_span| { use crate::session_diagnostics::CaptureVarCause::*; let borrow_place = &issued_borrow.borrowed_place; let borrow_place_desc = self.describe_any_place(borrow_place.as_ref()); match kind { hir::ClosureKind::Coroutine(_) => { FirstBorrowUsePlaceCoroutine { place: borrow_place_desc, var_span } } hir::ClosureKind::Closure | hir::ClosureKind::CoroutineClosure(_) => { FirstBorrowUsePlaceClosure { place: borrow_place_desc, var_span } } } }); borrow_spans.var_subdiag(&mut err, Some(gen_borrow_kind), |kind, var_span| { use crate::session_diagnostics::CaptureVarCause::*; match kind { hir::ClosureKind::Coroutine(_) => { SecondBorrowUsePlaceCoroutine { place: desc_place, var_span } } hir::ClosureKind::Closure | hir::ClosureKind::CoroutineClosure(_) => { SecondBorrowUsePlaceClosure { place: desc_place, var_span } } } }); } if union_type_name != "" { err.note(format!( "{msg_place} is a field of the union `{union_type_name}`, so it overlaps the field {msg_borrow}", )); } explanation.add_explanation_to_diagnostic( &self, &mut err, first_borrow_desc, None, Some((issued_span, span)), ); self.suggest_using_local_if_applicable(&mut err, location, issued_borrow, explanation); self.suggest_copy_for_type_in_cloned_ref(&mut err, place); err } fn suggest_copy_for_type_in_cloned_ref(&self, err: &mut Diag<'infcx>, place: Place<'tcx>) { let tcx = self.infcx.tcx; let Some(body_id) = tcx.hir_node(self.mir_hir_id()).body_id() else { return }; struct FindUselessClone<'tcx> { tcx: TyCtxt<'tcx>, typeck_results: &'tcx ty::TypeckResults<'tcx>, clones: Vec<&'tcx hir::Expr<'tcx>>, } impl<'tcx> FindUselessClone<'tcx> { fn new(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self { Self { tcx, typeck_results: tcx.typeck(def_id), clones: vec![] } } } impl<'tcx> Visitor<'tcx> for FindUselessClone<'tcx> { fn visit_expr(&mut self, ex: &'tcx hir::Expr<'tcx>) { if let hir::ExprKind::MethodCall(..) = ex.kind && let Some(method_def_id) = self.typeck_results.type_dependent_def_id(ex.hir_id) && self.tcx.is_lang_item(self.tcx.parent(method_def_id), LangItem::Clone) { self.clones.push(ex); } hir::intravisit::walk_expr(self, ex); } } let mut expr_finder = FindUselessClone::new(tcx, self.mir_def_id()); let body = tcx.hir_body(body_id).value; expr_finder.visit_expr(body); struct Holds<'tcx> { ty: Ty<'tcx>, } impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for Holds<'tcx> { type Result = std::ops::ControlFlow<()>; fn visit_ty(&mut self, t: Ty<'tcx>) -> Self::Result { if t == self.ty { return ControlFlow::Break(()); } t.super_visit_with(self) } } let mut types_to_constrain = FxIndexSet::default(); let local_ty = self.body.local_decls[place.local].ty; let typeck_results = tcx.typeck(self.mir_def_id()); let clone = tcx.require_lang_item(LangItem::Clone, body.span); for expr in expr_finder.clones { if let hir::ExprKind::MethodCall(_, rcvr, _, span) = expr.kind && let Some(rcvr_ty) = typeck_results.node_type_opt(rcvr.hir_id) && let Some(ty) = typeck_results.node_type_opt(expr.hir_id) && rcvr_ty == ty && let ty::Ref(_, inner, _) = rcvr_ty.kind() && let inner = inner.peel_refs() && (Holds { ty: inner }).visit_ty(local_ty).is_break() && let None = self.infcx.type_implements_trait_shallow(clone, inner, self.infcx.param_env) { err.span_label( span, format!( "this call doesn't do anything, the result is still `{rcvr_ty}` \ because `{inner}` doesn't implement `Clone`", ), ); types_to_constrain.insert(inner); } } for ty in types_to_constrain { self.suggest_adding_bounds_or_derive(err, ty, clone, body.span); } } pub(crate) fn suggest_adding_bounds_or_derive( &self, err: &mut Diag<'_>, ty: Ty<'tcx>, trait_def_id: DefId, span: Span, ) { self.suggest_adding_bounds(err, ty, trait_def_id, span); if let ty::Adt(..) = ty.kind() { // The type doesn't implement the trait. let trait_ref = ty::Binder::dummy(ty::TraitRef::new(self.infcx.tcx, trait_def_id, [ty])); let obligation = Obligation::new( self.infcx.tcx, ObligationCause::dummy(), self.infcx.param_env, trait_ref, ); self.infcx.err_ctxt().suggest_derive( &obligation, err, trait_ref.upcast(self.infcx.tcx), ); } } #[instrument(level = "debug", skip(self, err))] fn suggest_using_local_if_applicable( &self, err: &mut Diag<'_>, location: Location, issued_borrow: &BorrowData<'tcx>, explanation: BorrowExplanation<'tcx>, ) { let used_in_call = matches!( explanation, BorrowExplanation::UsedLater( _, LaterUseKind::Call | LaterUseKind::Other, _call_span, _ ) ); if !used_in_call { debug!("not later used in call"); return; } if matches!( self.body.local_decls[issued_borrow.borrowed_place.local].local_info(), LocalInfo::IfThenRescopeTemp { .. } ) { // A better suggestion will be issued by the `if_let_rescope` lint return; } let use_span = if let BorrowExplanation::UsedLater(_, LaterUseKind::Other, use_span, _) = explanation { Some(use_span) } else { None }; let outer_call_loc = if let TwoPhaseActivation::ActivatedAt(loc) = issued_borrow.activation_location { loc } else { issued_borrow.reserve_location }; let outer_call_stmt = self.body.stmt_at(outer_call_loc); let inner_param_location = location; let Some(inner_param_stmt) = self.body.stmt_at(inner_param_location).left() else { debug!("`inner_param_location` {:?} is not for a statement", inner_param_location); return; }; let Some(&inner_param) = inner_param_stmt.kind.as_assign().map(|(p, _)| p) else { debug!( "`inner_param_location` {:?} is not for an assignment: {:?}", inner_param_location, inner_param_stmt ); return; }; let inner_param_uses = find_all_local_uses::find(self.body, inner_param.local); let Some((inner_call_loc, inner_call_term)) = inner_param_uses.into_iter().find_map(|loc| { let Either::Right(term) = self.body.stmt_at(loc) else { debug!("{:?} is a statement, so it can't be a call", loc); return None; }; let TerminatorKind::Call { args, .. } = &term.kind else { debug!("not a call: {:?}", term); return None; }; debug!("checking call args for uses of inner_param: {:?}", args); args.iter() .map(|a| &a.node) .any(|a| a == &Operand::Move(inner_param)) .then_some((loc, term)) }) else { debug!("no uses of inner_param found as a by-move call arg"); return; }; debug!("===> outer_call_loc = {:?}, inner_call_loc = {:?}", outer_call_loc, inner_call_loc); let inner_call_span = inner_call_term.source_info.span; let outer_call_span = match use_span { Some(span) => span, None => outer_call_stmt.either(|s| s.source_info, |t| t.source_info).span, }; if outer_call_span == inner_call_span || !outer_call_span.contains(inner_call_span) { // FIXME: This stops the suggestion in some cases where it should be emitted. // Fix the spans for those cases so it's emitted correctly. debug!( "outer span {:?} does not strictly contain inner span {:?}", outer_call_span, inner_call_span ); return; } err.span_help( inner_call_span, format!( "try adding a local storing this{}...", if use_span.is_some() { "" } else { " argument" } ), ); err.span_help( outer_call_span, format!( "...and then using that local {}", if use_span.is_some() { "here" } else { "as the argument to this call" } ), ); } pub(crate) fn find_expr(&self, span: Span) -> Option<&'tcx hir::Expr<'tcx>> { let tcx = self.infcx.tcx; let body_id = tcx.hir_node(self.mir_hir_id()).body_id()?; let mut expr_finder = FindExprBySpan::new(span, tcx); expr_finder.visit_expr(tcx.hir_body(body_id).value); expr_finder.result } fn suggest_slice_method_if_applicable( &self, err: &mut Diag<'_>, place: Place<'tcx>, borrowed_place: Place<'tcx>, span: Span, issued_span: Span, ) { let tcx = self.infcx.tcx; let has_split_at_mut = |ty: Ty<'tcx>| { let ty = ty.peel_refs(); match ty.kind() { ty::Array(..) | ty::Slice(..) => true, ty::Adt(def, _) if tcx.get_diagnostic_item(sym::Vec) == Some(def.did()) => true, _ if ty == tcx.types.str_ => true, _ => false, } }; if let ([ProjectionElem::Index(index1)], [ProjectionElem::Index(index2)]) | ( [ProjectionElem::Deref, ProjectionElem::Index(index1)], [ProjectionElem::Deref, ProjectionElem::Index(index2)], ) = (&place.projection[..], &borrowed_place.projection[..]) { let decl1 = &self.body.local_decls[*index1]; let decl2 = &self.body.local_decls[*index2]; let mut note_default_suggestion = || { err.help( "consider using `.split_at_mut(position)` or similar method to obtain two \ mutable non-overlapping sub-slices", ) .help( "consider using `.swap(index_1, index_2)` to swap elements at the specified \ indices", ); }; let Some(index1) = self.find_expr(decl1.source_info.span) else { note_default_suggestion(); return; }; let Some(index2) = self.find_expr(decl2.source_info.span) else { note_default_suggestion(); return; }; let sm = tcx.sess.source_map(); let Ok(index1_str) = sm.span_to_snippet(index1.span) else { note_default_suggestion(); return; }; let Ok(index2_str) = sm.span_to_snippet(index2.span) else { note_default_suggestion(); return; }; let Some(object) = tcx.hir_parent_id_iter(index1.hir_id).find_map(|id| { if let hir::Node::Expr(expr) = tcx.hir_node(id) && let hir::ExprKind::Index(obj, ..) = expr.kind { Some(obj) } else { None } }) else { note_default_suggestion(); return; }; let Ok(obj_str) = sm.span_to_snippet(object.span) else { note_default_suggestion(); return; }; let Some(swap_call) = tcx.hir_parent_id_iter(object.hir_id).find_map(|id| { if let hir::Node::Expr(call) = tcx.hir_node(id) && let hir::ExprKind::Call(callee, ..) = call.kind && let hir::ExprKind::Path(qpath) = callee.kind && let hir::QPath::Resolved(None, res) = qpath && let hir::def::Res::Def(_, did) = res.res && tcx.is_diagnostic_item(sym::mem_swap, did) { Some(call) } else { None } }) else { let hir::Node::Expr(parent) = tcx.parent_hir_node(index1.hir_id) else { return }; let hir::ExprKind::Index(_, idx1, _) = parent.kind else { return }; let hir::Node::Expr(parent) = tcx.parent_hir_node(index2.hir_id) else { return }; let hir::ExprKind::Index(_, idx2, _) = parent.kind else { return }; if !idx1.equivalent_for_indexing(idx2) { err.help("use `.split_at_mut(position)` to obtain two mutable non-overlapping sub-slices"); } return; }; err.span_suggestion( swap_call.span, "use `.swap()` to swap elements at the specified indices instead", format!("{obj_str}.swap({index1_str}, {index2_str})"), Applicability::MachineApplicable, ); return; } let place_ty = PlaceRef::ty(&place.as_ref(), self.body, tcx).ty; let borrowed_place_ty = PlaceRef::ty(&borrowed_place.as_ref(), self.body, tcx).ty; if !has_split_at_mut(place_ty) && !has_split_at_mut(borrowed_place_ty) { // Only mention `split_at_mut` on `Vec`, array and slices. return; } let Some(index1) = self.find_expr(span) else { return }; let hir::Node::Expr(parent) = tcx.parent_hir_node(index1.hir_id) else { return }; let hir::ExprKind::Index(_, idx1, _) = parent.kind else { return }; let Some(index2) = self.find_expr(issued_span) else { return }; let hir::Node::Expr(parent) = tcx.parent_hir_node(index2.hir_id) else { return }; let hir::ExprKind::Index(_, idx2, _) = parent.kind else { return }; if idx1.equivalent_for_indexing(idx2) { // `let a = &mut foo[0]` and `let b = &mut foo[0]`? Don't mention `split_at_mut` return; } err.help("use `.split_at_mut(position)` to obtain two mutable non-overlapping sub-slices"); } /// Suggest using `while let` for call `next` on an iterator in a for loop. /// /// For example: /// ```ignore (illustrative) /// /// for x in iter { /// ... /// iter.next() /// } /// ``` pub(crate) fn explain_iterator_advancement_in_for_loop_if_applicable( &self, err: &mut Diag<'_>, span: Span, issued_spans: &UseSpans<'tcx>, ) { let issue_span = issued_spans.args_or_use(); let tcx = self.infcx.tcx; let Some(body_id) = tcx.hir_node(self.mir_hir_id()).body_id() else { return }; let typeck_results = tcx.typeck(self.mir_def_id()); struct ExprFinder<'hir> { tcx: TyCtxt<'hir>, issue_span: Span, expr_span: Span, body_expr: Option<&'hir hir::Expr<'hir>> = None, loop_bind: Option<&'hir Ident> = None, loop_span: Option<Span> = None, head_span: Option<Span> = None, pat_span: Option<Span> = None, head: Option<&'hir hir::Expr<'hir>> = None, } impl<'hir> Visitor<'hir> for ExprFinder<'hir> { fn visit_expr(&mut self, ex: &'hir hir::Expr<'hir>) { // Try to find // let result = match IntoIterator::into_iter(<head>) { // mut iter => { // [opt_ident]: loop { // match Iterator::next(&mut iter) { // None => break, // Some(<pat>) => <body>, // }; // } // } // }; // corresponding to the desugaring of a for loop `for <pat> in <head> { <body> }`. if let hir::ExprKind::Call(path, [arg]) = ex.kind && let hir::ExprKind::Path(qpath) = path.kind && self.tcx.qpath_is_lang_item(qpath, LangItem::IntoIterIntoIter) && arg.span.contains(self.issue_span) && ex.span.desugaring_kind() == Some(DesugaringKind::ForLoop) { // Find `IntoIterator::into_iter(<head>)` self.head = Some(arg); } if let hir::ExprKind::Loop( hir::Block { stmts: [stmt, ..], .. }, _, hir::LoopSource::ForLoop, _, ) = ex.kind && let hir::StmtKind::Expr(hir::Expr { kind: hir::ExprKind::Match(call, [_, bind, ..], _), span: head_span, .. }) = stmt.kind && let hir::ExprKind::Call(path, _args) = call.kind && let hir::ExprKind::Path(qpath) = path.kind && self.tcx.qpath_is_lang_item(qpath, LangItem::IteratorNext) && let hir::PatKind::Struct(qpath, [field, ..], _) = bind.pat.kind && self.tcx.qpath_is_lang_item(qpath, LangItem::OptionSome) && call.span.contains(self.issue_span) { // Find `<pat>` and the span for the whole `for` loop. if let PatField { pat: hir::Pat { kind: hir::PatKind::Binding(_, _, ident, ..), .. }, .. } = field { self.loop_bind = Some(ident); } self.head_span = Some(*head_span); self.pat_span = Some(bind.pat.span); self.loop_span = Some(stmt.span); } if let hir::ExprKind::MethodCall(body_call, recv, ..) = ex.kind && body_call.ident.name == sym::next && recv.span.source_equal(self.expr_span) { self.body_expr = Some(ex); } hir::intravisit::walk_expr(self, ex); } } let mut finder = ExprFinder { tcx, expr_span: span, issue_span, .. }; finder.visit_expr(tcx.hir_body(body_id).value); if let Some(body_expr) = finder.body_expr && let Some(loop_span) = finder.loop_span && let Some(def_id) = typeck_results.type_dependent_def_id(body_expr.hir_id) && let Some(trait_did) = tcx.trait_of_assoc(def_id) && tcx.is_diagnostic_item(sym::Iterator, trait_did) { if let Some(loop_bind) = finder.loop_bind { err.note(format!( "a for loop advances the iterator for you, the result is stored in `{}`", loop_bind.name, )); } else { err.note( "a for loop advances the iterator for you, the result is stored in its pattern", ); } let msg = "if you want to call `next` on a iterator within the loop, consider using \ `while let`"; if let Some(head) = finder.head && let Some(pat_span) = finder.pat_span && loop_span.contains(body_expr.span) && loop_span.contains(head.span) { let sm = self.infcx.tcx.sess.source_map(); let mut sugg = vec![]; if let hir::ExprKind::Path(hir::QPath::Resolved(None, _)) = head.kind { // A bare path doesn't need a `let` assignment, it's already a simple // binding access. // As a new binding wasn't added, we don't need to modify the advancing call. sugg.push((loop_span.with_hi(pat_span.lo()), "while let Some(".to_string())); sugg.push(( pat_span.shrink_to_hi().with_hi(head.span.lo()), ") = ".to_string(), )); sugg.push((head.span.shrink_to_hi(), ".next()".to_string())); } else { // Needs a new a `let` binding. let indent = if let Some(indent) = sm.indentation_before(loop_span) { format!("\n{indent}") } else { " ".to_string() }; let Ok(head_str) = sm.span_to_snippet(head.span) else { err.help(msg); return; }; sugg.push(( loop_span.with_hi(pat_span.lo()), format!("let iter = {head_str};{indent}while let Some("), )); sugg.push(( pat_span.shrink_to_hi().with_hi(head.span.hi()), ") = iter.next()".to_string(), )); // As a new binding was added, we should change how the iterator is advanced to // use the newly introduced binding. if let hir::ExprKind::MethodCall(_, recv, ..) = body_expr.kind && let hir::ExprKind::Path(hir::QPath::Resolved(None, ..)) = recv.kind { // As we introduced a `let iter = <head>;`, we need to change where the // already borrowed value was accessed from `<recv>.next()` to // `iter.next()`. sugg.push((recv.span, "iter".to_string())); } } err.multipart_suggestion(msg, sugg, Applicability::MaybeIncorrect); } else { err.help(msg); } } } /// Suggest using closure argument instead of capture. /// /// For example: /// ```ignore (illustrative) /// struct S; /// /// impl S { /// fn call(&mut self, f: impl Fn(&mut Self)) { /* ... */ } /// fn x(&self) {} /// } /// /// let mut v = S; /// v.call(|this: &mut S| v.x()); /// // ^\ ^-- help: try using the closure argument: `this` /// // *-- error: cannot borrow `v` as mutable because it is also borrowed as immutable /// ``` fn suggest_using_closure_argument_instead_of_capture( &self, err: &mut Diag<'_>, borrowed_place: Place<'tcx>, issued_spans: &UseSpans<'tcx>, ) { let &UseSpans::ClosureUse { capture_kind_span, .. } = issued_spans else { return }; let tcx = self.infcx.tcx; // Get the type of the local that we are trying to borrow let local = borrowed_place.local; let local_ty = self.body.local_decls[local].ty; // Get the body the error happens in let Some(body_id) = tcx.hir_node(self.mir_hir_id()).body_id() else { return }; let body_expr = tcx.hir_body(body_id).value; struct ClosureFinder<'hir> { tcx: TyCtxt<'hir>, borrow_span: Span, res: Option<(&'hir hir::Expr<'hir>, &'hir hir::Closure<'hir>)>, /// The path expression with the `borrow_span` span error_path: Option<(&'hir hir::Expr<'hir>, &'hir hir::QPath<'hir>)>, } impl<'hir> Visitor<'hir> for ClosureFinder<'hir> { type NestedFilter = OnlyBodies; fn maybe_tcx(&mut self) -> Self::MaybeTyCtxt { self.tcx } fn visit_expr(&mut self, ex: &'hir hir::Expr<'hir>) { if let hir::ExprKind::Path(qpath) = &ex.kind && ex.span == self.borrow_span { self.error_path = Some((ex, qpath)); } if let hir::ExprKind::Closure(closure) = ex.kind && ex.span.contains(self.borrow_span) // To support cases like `|| { v.call(|this| v.get()) }` // FIXME: actually support such cases (need to figure out how to move from the // capture place to original local). && self.res.as_ref().is_none_or(|(prev_res, _)| prev_res.span.contains(ex.span)) { self.res = Some((ex, closure)); } hir::intravisit::walk_expr(self, ex); } } // Find the closure that most tightly wraps `capture_kind_span` let mut finder = ClosureFinder { tcx, borrow_span: capture_kind_span, res: None, error_path: None }; finder.visit_expr(body_expr); let Some((closure_expr, closure)) = finder.res else { return }; let typeck_results = tcx.typeck(self.mir_def_id()); // Check that the parent of the closure is a method call, // with receiver matching with local's type (modulo refs) if let hir::Node::Expr(parent) = tcx.parent_hir_node(closure_expr.hir_id) && let hir::ExprKind::MethodCall(_, recv, ..) = parent.kind { let recv_ty = typeck_results.expr_ty(recv); if recv_ty.peel_refs() != local_ty { return; } } // Get closure's arguments let ty::Closure(_, args) = typeck_results.expr_ty(closure_expr).kind() else { /* hir::Closure can be a coroutine too */ return; }; let sig = args.as_closure().sig(); let tupled_params = tcx.instantiate_bound_regions_with_erased( sig.inputs().iter().next().unwrap().map_bound(|&b| b), ); let ty::Tuple(params) = tupled_params.kind() else { return }; // Find the first argument with a matching type and get its identifier. let Some(this_name) = params.iter().zip(tcx.hir_body_param_idents(closure.body)).find_map( |(param_ty, ident)| { // FIXME: also support deref for stuff like `Rc` arguments if param_ty.peel_refs() == local_ty { ident } else { None } }, ) else { return; }; let spans; if let Some((_path_expr, qpath)) = finder.error_path && let hir::QPath::Resolved(_, path) = qpath && let hir::def::Res::Local(local_id) = path.res { // Find all references to the problematic variable in this closure body struct VariableUseFinder { local_id: hir::HirId, spans: Vec<Span>, } impl<'hir> Visitor<'hir> for VariableUseFinder { fn visit_expr(&mut self, ex: &'hir hir::Expr<'hir>) { if let hir::ExprKind::Path(qpath) = &ex.kind && let hir::QPath::Resolved(_, path) = qpath && let hir::def::Res::Local(local_id) = path.res && local_id == self.local_id { self.spans.push(ex.span); } hir::intravisit::walk_expr(self, ex); } } let mut finder = VariableUseFinder { local_id, spans: Vec::new() }; finder.visit_expr(tcx.hir_body(closure.body).value); spans = finder.spans; } else { spans = vec![capture_kind_span]; } err.multipart_suggestion( "try using the closure argument", iter::zip(spans, iter::repeat(this_name.to_string())).collect(), Applicability::MaybeIncorrect, ); } fn suggest_binding_for_closure_capture_self( &self, err: &mut Diag<'_>, issued_spans: &UseSpans<'tcx>, ) { let UseSpans::ClosureUse { capture_kind_span, .. } = issued_spans else { return }; struct ExpressionFinder<'tcx> { capture_span: Span, closure_change_spans: Vec<Span> = vec![], closure_arg_span: Option<Span> = None, in_closure: bool = false, suggest_arg: String = String::new(), tcx: TyCtxt<'tcx>, closure_local_id: Option<hir::HirId> = None, closure_call_changes: Vec<(Span, String)> = vec![], } impl<'hir> Visitor<'hir> for ExpressionFinder<'hir> { fn visit_expr(&mut self, e: &'hir hir::Expr<'hir>) { if e.span.contains(self.capture_span) && let hir::ExprKind::Closure(&hir::Closure { kind: hir::ClosureKind::Closure, body, fn_arg_span, fn_decl: hir::FnDecl { inputs, .. }, .. }) = e.kind && let hir::Node::Expr(body) = self.tcx.hir_node(body.hir_id) { self.suggest_arg = "this: &Self".to_string(); if inputs.len() > 0 { self.suggest_arg.push_str(", "); } self.in_closure = true; self.closure_arg_span = fn_arg_span; self.visit_expr(body); self.in_closure = false; } if let hir::Expr { kind: hir::ExprKind::Path(path), .. } = e && let hir::QPath::Resolved(_, hir::Path { segments: [seg], .. }) = path && seg.ident.name == kw::SelfLower && self.in_closure { self.closure_change_spans.push(e.span); } hir::intravisit::walk_expr(self, e); } fn visit_local(&mut self, local: &'hir hir::LetStmt<'hir>) { if let hir::Pat { kind: hir::PatKind::Binding(_, hir_id, _ident, _), .. } = local.pat && let Some(init) = local.init && let &hir::Expr { kind: hir::ExprKind::Closure(&hir::Closure { kind: hir::ClosureKind::Closure, .. }), .. } = init && init.span.contains(self.capture_span) { self.closure_local_id = Some(*hir_id); } hir::intravisit::walk_local(self, local); } fn visit_stmt(&mut self, s: &'hir hir::Stmt<'hir>) { if let hir::StmtKind::Semi(e) = s.kind && let hir::ExprKind::Call( hir::Expr { kind: hir::ExprKind::Path(path), .. }, args, ) = e.kind && let hir::QPath::Resolved(_, hir::Path { segments: [seg], .. }) = path && let Res::Local(hir_id) = seg.res && Some(hir_id) == self.closure_local_id { let (span, arg_str) = if args.len() > 0 { (args[0].span.shrink_to_lo(), "self, ".to_string()) } else { let span = e.span.trim_start(seg.ident.span).unwrap_or(e.span); (span, "(self)".to_string()) }; self.closure_call_changes.push((span, arg_str)); } hir::intravisit::walk_stmt(self, s); } } if let hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(_fn_sig, body_id), .. }) = self.infcx.tcx.hir_node(self.mir_hir_id()) && let hir::Node::Expr(expr) = self.infcx.tcx.hir_node(body_id.hir_id) { let mut finder = ExpressionFinder { capture_span: *capture_kind_span, tcx: self.infcx.tcx, .. }; finder.visit_expr(expr); if finder.closure_change_spans.is_empty() || finder.closure_call_changes.is_empty() { return; } let sm = self.infcx.tcx.sess.source_map(); let sugg = finder .closure_arg_span .map(|span| (sm.next_point(span.shrink_to_lo()).shrink_to_hi(), finder.suggest_arg)) .into_iter() .chain( finder.closure_change_spans.into_iter().map(|span| (span, "this".to_string())), ) .chain(finder.closure_call_changes) .collect(); err.multipart_suggestion_verbose( "try explicitly passing `&Self` into the closure as an argument", sugg, Applicability::MachineApplicable, ); } } /// Returns the description of the root place for a conflicting borrow and the full /// descriptions of the places that caused the conflict. /// /// In the simplest case, where there are no unions involved, if a mutable borrow of `x` is /// attempted while a shared borrow is live, then this function will return: /// ``` /// ("x", "", "") /// # ; /// ``` /// In the simple union case, if a mutable borrow of a union field `x.z` is attempted while /// a shared borrow of another field `x.y`, then this function will return: /// ``` /// ("x", "x.z", "x.y") /// # ; /// ``` /// In the more complex union case, where the union is a field of a struct, then if a mutable /// borrow of a union field in a struct `x.u.z` is attempted while a shared borrow of /// another field `x.u.y`, then this function will return: /// ``` /// ("x.u", "x.u.z", "x.u.y") /// # ; /// ``` /// This is used when creating error messages like below: /// /// ```text /// cannot borrow `a.u` (via `a.u.z.c`) as immutable because it is also borrowed as /// mutable (via `a.u.s.b`) [E0502] /// ``` fn describe_place_for_conflicting_borrow( &self, first_borrowed_place: Place<'tcx>, second_borrowed_place: Place<'tcx>, ) -> (String, String, String, String) { // Define a small closure that we can use to check if the type of a place // is a union. let union_ty = |place_base| { // Need to use fn call syntax `PlaceRef::ty` to determine the type of `place_base`; // using a type annotation in the closure argument instead leads to a lifetime error. let ty = PlaceRef::ty(&place_base, self.body, self.infcx.tcx).ty; ty.ty_adt_def().filter(|adt| adt.is_union()).map(|_| ty) }; // Start with an empty tuple, so we can use the functions on `Option` to reduce some // code duplication (particularly around returning an empty description in the failure // case). Some(()) .filter(|_| { // If we have a conflicting borrow of the same place, then we don't want to add // an extraneous "via x.y" to our diagnostics, so filter out this case. first_borrowed_place != second_borrowed_place }) .and_then(|_| { // We're going to want to traverse the first borrowed place to see if we can find // field access to a union. If we find that, then we will keep the place of the // union being accessed and the field that was being accessed so we can check the // second borrowed place for the same union and an access to a different field. for (place_base, elem) in first_borrowed_place.iter_projections().rev() { match elem { ProjectionElem::Field(field, _) if union_ty(place_base).is_some() => { return Some((place_base, field)); } _ => {} } } None }) .and_then(|(target_base, target_field)| { // With the place of a union and a field access into it, we traverse the second // borrowed place and look for an access to a different field of the same union. for (place_base, elem) in second_borrowed_place.iter_projections().rev() { if let ProjectionElem::Field(field, _) = elem && let Some(union_ty) = union_ty(place_base) { if field != target_field && place_base == target_base { return Some(( self.describe_any_place(place_base), self.describe_any_place(first_borrowed_place.as_ref()), self.describe_any_place(second_borrowed_place.as_ref()), union_ty.to_string(), )); } } } None }) .unwrap_or_else(|| { // If we didn't find a field access into a union, or both places match, then // only return the description of the first place. ( self.describe_any_place(first_borrowed_place.as_ref()), "".to_string(), "".to_string(), "".to_string(), ) }) } /// This means that some data referenced by `borrow` needs to live /// past the point where the StorageDeadOrDrop of `place` occurs. /// This is usually interpreted as meaning that `place` has too /// short a lifetime. (But sometimes it is more useful to report /// it as a more direct conflict between the execution of a /// `Drop::drop` with an aliasing borrow.) #[instrument(level = "debug", skip(self))] pub(crate) fn report_borrowed_value_does_not_live_long_enough( &mut self, location: Location, borrow: &BorrowData<'tcx>, place_span: (Place<'tcx>, Span), kind: Option<WriteKind>, ) { let drop_span = place_span.1; let borrowed_local = borrow.borrowed_place.local; let borrow_spans = self.retrieve_borrow_spans(borrow); let borrow_span = borrow_spans.var_or_use_path_span(); let proper_span = self.body.local_decls[borrowed_local].source_info.span; if self.access_place_error_reported.contains(&(Place::from(borrowed_local), borrow_span)) { debug!( "suppressing access_place error when borrow doesn't live long enough for {:?}", borrow_span ); return; } self.access_place_error_reported.insert((Place::from(borrowed_local), borrow_span)); if self.body.local_decls[borrowed_local].is_ref_to_thread_local() { let err = self.report_thread_local_value_does_not_live_long_enough(drop_span, borrow_span); self.buffer_error(err); return; } if let StorageDeadOrDrop::Destructor(dropped_ty) = self.classify_drop_access_kind(borrow.borrowed_place.as_ref()) { // If a borrow of path `B` conflicts with drop of `D` (and // we're not in the uninteresting case where `B` is a // prefix of `D`), then report this as a more interesting // destructor conflict. if !borrow.borrowed_place.as_ref().is_prefix_of(place_span.0.as_ref()) { self.report_borrow_conflicts_with_destructor( location, borrow, place_span, kind, dropped_ty, ); return; } } let place_desc = self.describe_place(borrow.borrowed_place.as_ref()); let kind_place = kind.filter(|_| place_desc.is_some()).map(|k| (k, place_span.0)); let explanation = self.explain_why_borrow_contains_point(location, borrow, kind_place); debug!(?place_desc, ?explanation); let mut err = match (place_desc, explanation) { // If the outlives constraint comes from inside the closure, // for example: // // let x = 0; // let y = &x; // Box::new(|| y) as Box<Fn() -> &'static i32> // // then just use the normal error. The closure isn't escaping // and `move` will not help here. ( Some(name), BorrowExplanation::UsedLater(_, LaterUseKind::ClosureCapture, var_or_use_span, _), ) if borrow_spans.for_coroutine() || borrow_spans.for_closure() => self .report_escaping_closure_capture( borrow_spans, borrow_span, &RegionName { name: self.synthesize_region_name(), source: RegionNameSource::Static, }, ConstraintCategory::CallArgument(None), var_or_use_span, &format!("`{name}`"), "block", ), ( Some(name), BorrowExplanation::MustBeValidFor { category: category @ (ConstraintCategory::Return(_) | ConstraintCategory::CallArgument(_) | ConstraintCategory::OpaqueType), from_closure: false, ref region_name, span, .. }, ) if borrow_spans.for_coroutine() || borrow_spans.for_closure() => self .report_escaping_closure_capture( borrow_spans, borrow_span, region_name, category, span, &format!("`{name}`"), "function", ), ( name, BorrowExplanation::MustBeValidFor { category: ConstraintCategory::Assignment, from_closure: false, region_name: RegionName { source: RegionNameSource::AnonRegionFromUpvar(upvar_span, upvar_name), .. }, span, .. }, ) => self.report_escaping_data(borrow_span, &name, upvar_span, upvar_name, span), (Some(name), explanation) => self.report_local_value_does_not_live_long_enough( location, &name, borrow, drop_span, borrow_spans, explanation, ), (None, explanation) => self.report_temporary_value_does_not_live_long_enough( location, borrow, drop_span, borrow_spans, proper_span, explanation, ), }; self.note_due_to_edition_2024_opaque_capture_rules(borrow, &mut err); self.buffer_error(err); } #[tracing::instrument(level = "debug", skip(self, explanation))] fn report_local_value_does_not_live_long_enough( &self, location: Location, name: &str, borrow: &BorrowData<'tcx>, drop_span: Span, borrow_spans: UseSpans<'tcx>, explanation: BorrowExplanation<'tcx>, ) -> Diag<'infcx> { let borrow_span = borrow_spans.var_or_use_path_span(); if let BorrowExplanation::MustBeValidFor { category, span, ref opt_place_desc, from_closure: false, .. } = explanation && let Err(diag) = self.try_report_cannot_return_reference_to_local( borrow, borrow_span, span, category, opt_place_desc.as_ref(), ) { return diag; } let name = format!("`{name}`"); let mut err = self.path_does_not_live_long_enough(borrow_span, &name); if let Some(annotation) = self.annotate_argument_and_return_for_borrow(borrow) { let region_name = annotation.emit(self, &mut err); err.span_label( borrow_span, format!("{name} would have to be valid for `{region_name}`..."), ); err.span_label( drop_span, format!( "...but {name} will be dropped here, when the {} returns", self.infcx .tcx .opt_item_name(self.mir_def_id().to_def_id()) .map(|name| format!("function `{name}`")) .unwrap_or_else(|| { match &self.infcx.tcx.def_kind(self.mir_def_id()) { DefKind::Closure if self .infcx .tcx .is_coroutine(self.mir_def_id().to_def_id()) => { "enclosing coroutine" } DefKind::Closure => "enclosing closure", kind => bug!("expected closure or coroutine, found {:?}", kind), } .to_string() }) ), ); err.note( "functions cannot return a borrow to data owned within the function's scope, \ functions can only return borrows to data passed as arguments", ); err.note( "to learn more, visit <https://doc.rust-lang.org/book/ch04-02-\ references-and-borrowing.html#dangling-references>", ); if let BorrowExplanation::MustBeValidFor { .. } = explanation { } else { explanation.add_explanation_to_diagnostic(&self, &mut err, "", None, None); } } else { err.span_label(borrow_span, "borrowed value does not live long enough"); err.span_label(drop_span, format!("{name} dropped here while still borrowed")); borrow_spans.args_subdiag(&mut err, |args_span| { crate::session_diagnostics::CaptureArgLabel::Capture { is_within: borrow_spans.for_coroutine(), args_span, } }); explanation.add_explanation_to_diagnostic(&self, &mut err, "", Some(borrow_span), None); // Detect buffer reuse pattern if let BorrowExplanation::UsedLater(_dropped_local, _, _, _) = explanation { // Check all locals at the borrow location to find Vec<&T> types for (local, local_decl) in self.body.local_decls.iter_enumerated() { if let ty::Adt(adt_def, args) = local_decl.ty.kind() && self.infcx.tcx.is_diagnostic_item(sym::Vec, adt_def.did()) && args.len() > 0 { let vec_inner_ty = args.type_at(0); // Check if Vec contains references if vec_inner_ty.is_ref() { let local_place = local.into(); if let Some(local_name) = self.describe_place(local_place) { err.span_label( local_decl.source_info.span, format!("variable `{local_name}` declared here"), ); err.note( format!( "`{local_name}` is a collection that stores borrowed references, \ but {name} does not live long enough to be stored in it" ) ); err.help( "buffer reuse with borrowed references requires unsafe code or restructuring" ); break; } } } } } } err } fn report_borrow_conflicts_with_destructor( &mut self, location: Location, borrow: &BorrowData<'tcx>, (place, drop_span): (Place<'tcx>, Span), kind: Option<WriteKind>, dropped_ty: Ty<'tcx>, ) { debug!( "report_borrow_conflicts_with_destructor(\ {:?}, {:?}, ({:?}, {:?}), {:?}\ )", location, borrow, place, drop_span, kind, ); let borrow_spans = self.retrieve_borrow_spans(borrow); let borrow_span = borrow_spans.var_or_use(); let mut err = self.cannot_borrow_across_destructor(borrow_span); let what_was_dropped = match self.describe_place(place.as_ref()) { Some(name) => format!("`{name}`"), None => String::from("temporary value"), }; let label = match self.describe_place(borrow.borrowed_place.as_ref()) { Some(borrowed) => format!( "here, drop of {what_was_dropped} needs exclusive access to `{borrowed}`, \ because the type `{dropped_ty}` implements the `Drop` trait" ), None => format!( "here is drop of {what_was_dropped}; whose type `{dropped_ty}` implements the `Drop` trait" ), }; err.span_label(drop_span, label); // Only give this note and suggestion if they could be relevant. let explanation = self.explain_why_borrow_contains_point(location, borrow, kind.map(|k| (k, place))); match explanation { BorrowExplanation::UsedLater { .. } | BorrowExplanation::UsedLaterWhenDropped { .. } => { err.note("consider using a `let` binding to create a longer lived value"); } _ => {} } explanation.add_explanation_to_diagnostic(&self, &mut err, "", None, None); self.buffer_error(err); } fn report_thread_local_value_does_not_live_long_enough( &self, drop_span: Span, borrow_span: Span, ) -> Diag<'infcx> { debug!( "report_thread_local_value_does_not_live_long_enough(\ {:?}, {:?}\ )", drop_span, borrow_span ); // `TerminatorKind::Return`'s span (the `drop_span` here) `lo` can be subtly wrong and point // at a single character after the end of the function. This is somehow relied upon in // existing diagnostics, and changing this in `rustc_mir_build` makes diagnostics worse in // general. We fix these here. let sm = self.infcx.tcx.sess.source_map(); let end_of_function = if drop_span.is_empty() && let Ok(adjusted_span) = sm.span_extend_prev_while(drop_span, |c| c == '}') { adjusted_span } else { drop_span }; self.thread_local_value_does_not_live_long_enough(borrow_span) .with_span_label( borrow_span, "thread-local variables cannot be borrowed beyond the end of the function", ) .with_span_label(end_of_function, "end of enclosing function is here") } #[instrument(level = "debug", skip(self))] fn report_temporary_value_does_not_live_long_enough( &self, location: Location, borrow: &BorrowData<'tcx>, drop_span: Span, borrow_spans: UseSpans<'tcx>, proper_span: Span, explanation: BorrowExplanation<'tcx>, ) -> Diag<'infcx> { if let BorrowExplanation::MustBeValidFor { category, span, from_closure: false, .. } = explanation { if let Err(diag) = self.try_report_cannot_return_reference_to_local( borrow, proper_span, span, category, None, ) { return diag; } } let mut err = self.temporary_value_borrowed_for_too_long(proper_span); err.span_label(proper_span, "creates a temporary value which is freed while still in use"); err.span_label(drop_span, "temporary value is freed at the end of this statement"); match explanation { BorrowExplanation::UsedLater(..) | BorrowExplanation::UsedLaterInLoop(..) | BorrowExplanation::UsedLaterWhenDropped { .. } => { // Only give this note and suggestion if it could be relevant. let sm = self.infcx.tcx.sess.source_map(); let mut suggested = false; let msg = "consider using a `let` binding to create a longer lived value"; /// We check that there's a single level of block nesting to ensure always correct /// suggestions. If we don't, then we only provide a free-form message to avoid /// misleading users in cases like `tests/ui/nll/borrowed-temporary-error.rs`. /// We could expand the analysis to suggest hoising all of the relevant parts of /// the users' code to make the code compile, but that could be too much. /// We found the `prop_expr` by the way to check whether the expression is a /// `FormatArguments`, which is a special case since it's generated by the /// compiler. struct NestedStatementVisitor<'tcx> { span: Span, current: usize, found: usize, prop_expr: Option<&'tcx hir::Expr<'tcx>>, call: Option<&'tcx hir::Expr<'tcx>>, } impl<'tcx> Visitor<'tcx> for NestedStatementVisitor<'tcx> { fn visit_block(&mut self, block: &'tcx hir::Block<'tcx>) { self.current += 1; walk_block(self, block); self.current -= 1; } fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) { if let hir::ExprKind::MethodCall(_, rcvr, _, _) = expr.kind { if self.span == rcvr.span.source_callsite() { self.call = Some(expr); } } if self.span == expr.span.source_callsite() { self.found = self.current; if self.prop_expr.is_none() { self.prop_expr = Some(expr); } } walk_expr(self, expr); } } let source_info = self.body.source_info(location); let proper_span = proper_span.source_callsite(); if let Some(scope) = self.body.source_scopes.get(source_info.scope) && let ClearCrossCrate::Set(scope_data) = &scope.local_data && let Some(id) = self.infcx.tcx.hir_node(scope_data.lint_root).body_id() && let hir::ExprKind::Block(block, _) = self.infcx.tcx.hir_body(id).value.kind { for stmt in block.stmts { let mut visitor = NestedStatementVisitor { span: proper_span, current: 0, found: 0, prop_expr: None, call: None, }; visitor.visit_stmt(stmt); let typeck_results = self.infcx.tcx.typeck(self.mir_def_id()); let expr_ty: Option<Ty<'_>> = visitor.prop_expr.map(|expr| typeck_results.expr_ty(expr).peel_refs()); if visitor.found == 0 && stmt.span.contains(proper_span) && let Some(p) = sm.span_to_margin(stmt.span) && let Ok(s) = sm.span_to_snippet(proper_span) { if let Some(call) = visitor.call && let hir::ExprKind::MethodCall(path, _, [], _) = call.kind && path.ident.name == sym::iter && let Some(ty) = expr_ty { err.span_suggestion_verbose( path.ident.span, format!( "consider consuming the `{ty}` when turning it into an \ `Iterator`", ), "into_iter", Applicability::MaybeIncorrect, ); } let mutability = if matches!(borrow.kind(), BorrowKind::Mut { .. }) { "mut " } else { "" }; let addition = format!("let {}binding = {};\n{}", mutability, s, " ".repeat(p)); err.multipart_suggestion_verbose( msg, vec![ (stmt.span.shrink_to_lo(), addition), (proper_span, "binding".to_string()), ], Applicability::MaybeIncorrect, ); suggested = true; break; } } } if !suggested { err.note(msg); } } _ => {} } explanation.add_explanation_to_diagnostic(&self, &mut err, "", None, None); borrow_spans.args_subdiag(&mut err, |args_span| { crate::session_diagnostics::CaptureArgLabel::Capture { is_within: borrow_spans.for_coroutine(), args_span, } }); err } fn try_report_cannot_return_reference_to_local( &self, borrow: &BorrowData<'tcx>, borrow_span: Span, return_span: Span, category: ConstraintCategory<'tcx>, opt_place_desc: Option<&String>, ) -> Result<(), Diag<'infcx>> { let return_kind = match category { ConstraintCategory::Return(_) => "return", ConstraintCategory::Yield => "yield", _ => return Ok(()), }; // FIXME use a better heuristic than Spans let reference_desc = if return_span == self.body.source_info(borrow.reserve_location).span { "reference to" } else { "value referencing" }; let (place_desc, note) = if let Some(place_desc) = opt_place_desc { let local_kind = if let Some(local) = borrow.borrowed_place.as_local() { match self.body.local_kind(local) { LocalKind::Temp if self.body.local_decls[local].is_user_variable() => { "local variable " } LocalKind::Arg if !self.upvars.is_empty() && local == ty::CAPTURE_STRUCT_LOCAL => { "variable captured by `move` " } LocalKind::Arg => "function parameter ", LocalKind::ReturnPointer | LocalKind::Temp => { bug!("temporary or return pointer with a name") } } } else { "local data " }; (format!("{local_kind}`{place_desc}`"), format!("`{place_desc}` is borrowed here")) } else { let local = borrow.borrowed_place.local; match self.body.local_kind(local) { LocalKind::Arg => ( "function parameter".to_string(), "function parameter borrowed here".to_string(), ), LocalKind::Temp if self.body.local_decls[local].is_user_variable() && !self.body.local_decls[local] .source_info .span .in_external_macro(self.infcx.tcx.sess.source_map()) => { ("local binding".to_string(), "local binding introduced here".to_string()) } LocalKind::ReturnPointer | LocalKind::Temp => { ("temporary value".to_string(), "temporary value created here".to_string()) } } }; let mut err = self.cannot_return_reference_to_local( return_span, return_kind, reference_desc, &place_desc, ); if return_span != borrow_span { err.span_label(borrow_span, note); let tcx = self.infcx.tcx; let return_ty = self.regioncx.universal_regions().unnormalized_output_ty; // to avoid panics if let Some(iter_trait) = tcx.get_diagnostic_item(sym::Iterator) && self .infcx .type_implements_trait(iter_trait, [return_ty], self.infcx.param_env) .must_apply_modulo_regions() { err.span_suggestion_hidden( return_span.shrink_to_hi(), "use `.collect()` to allocate the iterator", ".collect::<Vec<_>>()", Applicability::MaybeIncorrect, ); } } Err(err) } #[instrument(level = "debug", skip(self))] fn report_escaping_closure_capture( &self, use_span: UseSpans<'tcx>, var_span: Span, fr_name: &RegionName, category: ConstraintCategory<'tcx>, constraint_span: Span, captured_var: &str, scope: &str, ) -> Diag<'infcx> { let tcx = self.infcx.tcx; let args_span = use_span.args_or_use(); let (sugg_span, suggestion) = match tcx.sess.source_map().span_to_snippet(args_span) { Ok(string) => { let coro_prefix = if let Some(sub) = string.strip_prefix("async") { let trimmed_sub = sub.trim_end(); if trimmed_sub.ends_with("gen") { // `async` is 5 chars long. Some((trimmed_sub.len() + 5) as _) } else { // `async` is 5 chars long. Some(5) } } else if string.starts_with("gen") { // `gen` is 3 chars long Some(3) } else if string.starts_with("static") { // `static` is 6 chars long // This is used for `!Unpin` coroutines Some(6) } else { None }; if let Some(n) = coro_prefix { let pos = args_span.lo() + BytePos(n); (args_span.with_lo(pos).with_hi(pos), " move") } else { (args_span.shrink_to_lo(), "move ") } } Err(_) => (args_span, "move |<args>| <body>"), }; let kind = match use_span.coroutine_kind() { Some(coroutine_kind) => match coroutine_kind { CoroutineKind::Desugared(CoroutineDesugaring::Gen, kind) => match kind { CoroutineSource::Block => "gen block", CoroutineSource::Closure => "gen closure", CoroutineSource::Fn => { bug!("gen block/closure expected, but gen function found.") } }, CoroutineKind::Desugared(CoroutineDesugaring::AsyncGen, kind) => match kind { CoroutineSource::Block => "async gen block", CoroutineSource::Closure => "async gen closure", CoroutineSource::Fn => { bug!("gen block/closure expected, but gen function found.") } }, CoroutineKind::Desugared(CoroutineDesugaring::Async, async_kind) => { match async_kind { CoroutineSource::Block => "async block", CoroutineSource::Closure => "async closure", CoroutineSource::Fn => { bug!("async block/closure expected, but async function found.") } } } CoroutineKind::Coroutine(_) => "coroutine", }, None => "closure", }; let mut err = self.cannot_capture_in_long_lived_closure( args_span, kind, captured_var, var_span, scope, ); err.span_suggestion_verbose( sugg_span, format!( "to force the {kind} to take ownership of {captured_var} (and any \ other referenced variables), use the `move` keyword" ), suggestion, Applicability::MachineApplicable, ); match category { ConstraintCategory::Return(_) | ConstraintCategory::OpaqueType => { let msg = format!("{kind} is returned here"); err.span_note(constraint_span, msg); } ConstraintCategory::CallArgument(_) => { fr_name.highlight_region_name(&mut err); if matches!( use_span.coroutine_kind(), Some(CoroutineKind::Desugared(CoroutineDesugaring::Async, _)) ) { err.note( "async blocks are not executed immediately and must either take a \ reference or ownership of outside variables they use", ); } else { let msg = format!("{scope} requires argument type to outlive `{fr_name}`"); err.span_note(constraint_span, msg); } } _ => bug!( "report_escaping_closure_capture called with unexpected constraint \ category: `{:?}`", category ), } err } fn report_escaping_data( &self, borrow_span: Span, name: &Option<String>, upvar_span: Span, upvar_name: Symbol, escape_span: Span, ) -> Diag<'infcx> { let tcx = self.infcx.tcx; let escapes_from = tcx.def_descr(self.mir_def_id().to_def_id()); let mut err = borrowck_errors::borrowed_data_escapes_closure(tcx, escape_span, escapes_from); err.span_label( upvar_span, format!("`{upvar_name}` declared here, outside of the {escapes_from} body"), ); err.span_label(borrow_span, format!("borrow is only valid in the {escapes_from} body")); if let Some(name) = name { err.span_label( escape_span, format!("reference to `{name}` escapes the {escapes_from} body here"), ); } else { err.span_label(escape_span, format!("reference escapes the {escapes_from} body here")); } err } fn get_moved_indexes( &self, location: Location, mpi: MovePathIndex, ) -> (Vec<MoveSite>, Vec<Location>) { fn predecessor_locations<'tcx>( body: &mir::Body<'tcx>, location: Location, ) -> impl Iterator<Item = Location> { if location.statement_index == 0 { let predecessors = body.basic_blocks.predecessors()[location.block].to_vec(); Either::Left(predecessors.into_iter().map(move |bb| body.terminator_loc(bb))) } else { Either::Right(std::iter::once(Location { statement_index: location.statement_index - 1, ..location })) } } let mut mpis = vec![mpi]; let move_paths = &self.move_data.move_paths; mpis.extend(move_paths[mpi].parents(move_paths).map(|(mpi, _)| mpi)); let mut stack = Vec::new(); let mut back_edge_stack = Vec::new(); predecessor_locations(self.body, location).for_each(|predecessor| { if location.dominates(predecessor, self.dominators()) { back_edge_stack.push(predecessor) } else { stack.push(predecessor); } }); let mut reached_start = false; /* Check if the mpi is initialized as an argument */ let mut is_argument = false; for arg in self.body.args_iter() { if let Some(path) = self.move_data.rev_lookup.find_local(arg) { if mpis.contains(&path) { is_argument = true; } } } let mut visited = FxIndexSet::default(); let mut move_locations = FxIndexSet::default(); let mut reinits = vec![]; let mut result = vec![]; let mut dfs_iter = |result: &mut Vec<MoveSite>, location: Location, is_back_edge: bool| { debug!( "report_use_of_moved_or_uninitialized: (current_location={:?}, back_edge={})", location, is_back_edge ); if !visited.insert(location) { return true; } // check for moves let stmt_kind = self.body[location.block].statements.get(location.statement_index).map(|s| &s.kind); if let Some(StatementKind::StorageDead(..)) = stmt_kind { // This analysis only tries to find moves explicitly written by the user, so we // ignore the move-outs created by `StorageDead` and at the beginning of a // function. } else { // If we are found a use of a.b.c which was in error, then we want to look for // moves not only of a.b.c but also a.b and a. // // Note that the moves data already includes "parent" paths, so we don't have to // worry about the other case: that is, if there is a move of a.b.c, it is already // marked as a move of a.b and a as well, so we will generate the correct errors // there. for moi in &self.move_data.loc_map[location] { debug!("report_use_of_moved_or_uninitialized: moi={:?}", moi); let path = self.move_data.moves[*moi].path; if mpis.contains(&path) { debug!( "report_use_of_moved_or_uninitialized: found {:?}", move_paths[path].place ); result.push(MoveSite { moi: *moi, traversed_back_edge: is_back_edge }); move_locations.insert(location); // Strictly speaking, we could continue our DFS here. There may be // other moves that can reach the point of error. But it is kind of // confusing to highlight them. // // Example: // // ``` // let a = vec![]; // let b = a; // let c = a; // drop(a); // <-- current point of error // ``` // // Because we stop the DFS here, we only highlight `let c = a`, // and not `let b = a`. We will of course also report an error at // `let c = a` which highlights `let b = a` as the move. return true; } } } // check for inits let mut any_match = false; for ii in &self.move_data.init_loc_map[location] { let init = self.move_data.inits[*ii]; match init.kind { InitKind::Deep | InitKind::NonPanicPathOnly => { if mpis.contains(&init.path) { any_match = true; } } InitKind::Shallow => { if mpi == init.path { any_match = true; } } } } if any_match { reinits.push(location); return true; } false }; while let Some(location) = stack.pop() { if dfs_iter(&mut result, location, false) { continue; } let mut has_predecessor = false; predecessor_locations(self.body, location).for_each(|predecessor| { if location.dominates(predecessor, self.dominators()) { back_edge_stack.push(predecessor) } else { stack.push(predecessor); } has_predecessor = true; }); if !has_predecessor { reached_start = true; } } if (is_argument || !reached_start) && result.is_empty() { // Process back edges (moves in future loop iterations) only if // the move path is definitely initialized upon loop entry, // to avoid spurious "in previous iteration" errors. // During DFS, if there's a path from the error back to the start // of the function with no intervening init or move, then the // move path may be uninitialized at loop entry. while let Some(location) = back_edge_stack.pop() { if dfs_iter(&mut result, location, true) { continue; } predecessor_locations(self.body, location) .for_each(|predecessor| back_edge_stack.push(predecessor)); } } // Check if we can reach these reinits from a move location. let reinits_reachable = reinits .into_iter() .filter(|reinit| { let mut visited = FxIndexSet::default(); let mut stack = vec![*reinit]; while let Some(location) = stack.pop() { if !visited.insert(location) { continue; } if move_locations.contains(&location) { return true; } stack.extend(predecessor_locations(self.body, location)); } false }) .collect::<Vec<Location>>(); (result, reinits_reachable) } pub(crate) fn report_illegal_mutation_of_borrowed( &mut self, location: Location, (place, span): (Place<'tcx>, Span), loan: &BorrowData<'tcx>, ) { let loan_spans = self.retrieve_borrow_spans(loan); let loan_span = loan_spans.args_or_use(); let descr_place = self.describe_any_place(place.as_ref()); if let BorrowKind::Fake(_) = loan.kind && let Some(section) = self.classify_immutable_section(loan.assigned_place) { let mut err = self.cannot_mutate_in_immutable_section( span, loan_span, &descr_place, section, "assign", ); loan_spans.var_subdiag(&mut err, Some(loan.kind), |kind, var_span| { use crate::session_diagnostics::CaptureVarCause::*; match kind { hir::ClosureKind::Coroutine(_) => BorrowUseInCoroutine { var_span }, hir::ClosureKind::Closure | hir::ClosureKind::CoroutineClosure(_) => { BorrowUseInClosure { var_span } } } }); self.buffer_error(err); return; } let mut err = self.cannot_assign_to_borrowed(span, loan_span, &descr_place); self.note_due_to_edition_2024_opaque_capture_rules(loan, &mut err); loan_spans.var_subdiag(&mut err, Some(loan.kind), |kind, var_span| { use crate::session_diagnostics::CaptureVarCause::*; match kind { hir::ClosureKind::Coroutine(_) => BorrowUseInCoroutine { var_span }, hir::ClosureKind::Closure | hir::ClosureKind::CoroutineClosure(_) => { BorrowUseInClosure { var_span } } } }); self.explain_why_borrow_contains_point(location, loan, None) .add_explanation_to_diagnostic(&self, &mut err, "", None, None); self.explain_deref_coercion(loan, &mut err); self.buffer_error(err); } fn explain_deref_coercion(&mut self, loan: &BorrowData<'tcx>, err: &mut Diag<'_>) { let tcx = self.infcx.tcx; if let Some(Terminator { kind: TerminatorKind::Call { call_source, fn_span, .. }, .. }) = &self.body[loan.reserve_location.block].terminator && let Some((method_did, method_args)) = mir::find_self_call( tcx, self.body, loan.assigned_place.local, loan.reserve_location.block, ) && let CallKind::DerefCoercion { deref_target_span, deref_target_ty, .. } = call_kind( self.infcx.tcx, self.infcx.typing_env(self.infcx.param_env), method_did, method_args, *fn_span, call_source.from_hir_call(), self.infcx.tcx.fn_arg_idents(method_did)[0], ) { err.note(format!("borrow occurs due to deref coercion to `{deref_target_ty}`")); if let Some(deref_target_span) = deref_target_span { err.span_note(deref_target_span, "deref defined here"); } } } /// Reports an illegal reassignment; for example, an assignment to /// (part of) a non-`mut` local that occurs potentially after that /// local has already been initialized. `place` is the path being /// assigned; `err_place` is a place providing a reason why /// `place` is not mutable (e.g., the non-`mut` local `x` in an /// assignment to `x.f`). pub(crate) fn report_illegal_reassignment( &mut self, (place, span): (Place<'tcx>, Span), assigned_span: Span, err_place: Place<'tcx>, ) { let (from_arg, local_decl) = match err_place.as_local() { Some(local) => { (self.body.local_kind(local) == LocalKind::Arg, Some(&self.body.local_decls[local])) } None => (false, None), }; // If root local is initialized immediately (everything apart from let // PATTERN;) then make the error refer to that local, rather than the // place being assigned later. let (place_description, assigned_span) = match local_decl { Some(LocalDecl { local_info: ClearCrossCrate::Set( box LocalInfo::User(BindingForm::Var(VarBindingForm { opt_match_place: None, .. })) | box LocalInfo::StaticRef { .. } | box LocalInfo::Boring, ), .. }) | None => (self.describe_any_place(place.as_ref()), assigned_span), Some(decl) => (self.describe_any_place(err_place.as_ref()), decl.source_info.span), }; let mut err = self.cannot_reassign_immutable(span, &place_description, from_arg); let msg = if from_arg { "cannot assign to immutable argument" } else { "cannot assign twice to immutable variable" }; if span != assigned_span && !from_arg { err.span_label(assigned_span, format!("first assignment to {place_description}")); } if let Some(decl) = local_decl && decl.can_be_made_mutable() { let is_for_loop = matches!( decl.local_info(), LocalInfo::User(BindingForm::Var(VarBindingForm { opt_match_place: Some((_, match_span)), .. })) if matches!(match_span.desugaring_kind(), Some(DesugaringKind::ForLoop)) ); let message = if is_for_loop && let Ok(binding_name) = self.infcx.tcx.sess.source_map().span_to_snippet(decl.source_info.span) { format!("(mut {}) ", binding_name) } else { "mut ".to_string() }; err.span_suggestion_verbose( decl.source_info.span.shrink_to_lo(), "consider making this binding mutable", message, Applicability::MachineApplicable, ); if !from_arg && !is_for_loop && matches!( decl.local_info(), LocalInfo::User(BindingForm::Var(VarBindingForm { opt_match_place: Some((Some(_), _)), .. })) ) { err.span_suggestion_verbose( decl.source_info.span.shrink_to_lo(), "to modify the original value, take a borrow instead", "ref mut ".to_string(), Applicability::MaybeIncorrect, ); } } err.span_label(span, msg); self.buffer_error(err); } fn classify_drop_access_kind(&self, place: PlaceRef<'tcx>) -> StorageDeadOrDrop<'tcx> { let tcx = self.infcx.tcx; let (kind, _place_ty) = place.projection.iter().fold( (LocalStorageDead, PlaceTy::from_ty(self.body.local_decls[place.local].ty)), |(kind, place_ty), &elem| { ( match elem { ProjectionElem::Deref => match kind { StorageDeadOrDrop::LocalStorageDead | StorageDeadOrDrop::BoxedStorageDead => { assert!( place_ty.ty.is_box(), "Drop of value behind a reference or raw pointer" ); StorageDeadOrDrop::BoxedStorageDead } StorageDeadOrDrop::Destructor(_) => kind, }, ProjectionElem::OpaqueCast { .. } | ProjectionElem::Field(..) | ProjectionElem::Downcast(..) => { match place_ty.ty.kind() { ty::Adt(def, _) if def.has_dtor(tcx) => { // Report the outermost adt with a destructor match kind { StorageDeadOrDrop::Destructor(_) => kind, StorageDeadOrDrop::LocalStorageDead | StorageDeadOrDrop::BoxedStorageDead => { StorageDeadOrDrop::Destructor(place_ty.ty) } } } _ => kind, } } ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } | ProjectionElem::Index(_) | ProjectionElem::UnwrapUnsafeBinder(_) => kind, }, place_ty.projection_ty(tcx, elem), ) }, ); kind } /// Describe the reason for the fake borrow that was assigned to `place`. fn classify_immutable_section(&self, place: Place<'tcx>) -> Option<&'static str> { use rustc_middle::mir::visit::Visitor; struct FakeReadCauseFinder<'tcx> { place: Place<'tcx>, cause: Option<FakeReadCause>, } impl<'tcx> Visitor<'tcx> for FakeReadCauseFinder<'tcx> { fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) { match statement { Statement { kind: StatementKind::FakeRead(box (cause, place)), .. } if *place == self.place => { self.cause = Some(*cause); } _ => (), } } } let mut visitor = FakeReadCauseFinder { place, cause: None }; visitor.visit_body(self.body); match visitor.cause { Some(FakeReadCause::ForMatchGuard) => Some("match guard"), Some(FakeReadCause::ForIndex) => Some("indexing expression"), _ => None, } } /// Annotate argument and return type of function and closure with (synthesized) lifetime for /// borrow of local value that does not live long enough. fn annotate_argument_and_return_for_borrow( &self, borrow: &BorrowData<'tcx>, ) -> Option<AnnotatedBorrowFnSignature<'tcx>> { // Define a fallback for when we can't match a closure. let fallback = || { let is_closure = self.infcx.tcx.is_closure_like(self.mir_def_id().to_def_id()); if is_closure { None } else { let ty = self.infcx.tcx.type_of(self.mir_def_id()).instantiate_identity(); match ty.kind() { ty::FnDef(_, _) | ty::FnPtr(..) => self.annotate_fn_sig( self.mir_def_id(), self.infcx.tcx.fn_sig(self.mir_def_id()).instantiate_identity(), ), _ => None, } } }; // In order to determine whether we need to annotate, we need to check whether the reserve // place was an assignment into a temporary. // // If it was, we check whether or not that temporary is eventually assigned into the return // place. If it was, we can add annotations about the function's return type and arguments // and it'll make sense. let location = borrow.reserve_location; debug!("annotate_argument_and_return_for_borrow: location={:?}", location); if let Some(Statement { kind: StatementKind::Assign(box (reservation, _)), .. }) = &self.body[location.block].statements.get(location.statement_index) { debug!("annotate_argument_and_return_for_borrow: reservation={:?}", reservation); // Check that the initial assignment of the reserve location is into a temporary. let mut target = match reservation.as_local() { Some(local) if self.body.local_kind(local) == LocalKind::Temp => local, _ => return None, }; // Next, look through the rest of the block, checking if we are assigning the // `target` (that is, the place that contains our borrow) to anything. let mut annotated_closure = None; for stmt in &self.body[location.block].statements[location.statement_index + 1..] { debug!( "annotate_argument_and_return_for_borrow: target={:?} stmt={:?}", target, stmt ); if let StatementKind::Assign(box (place, rvalue)) = &stmt.kind && let Some(assigned_to) = place.as_local() { debug!( "annotate_argument_and_return_for_borrow: assigned_to={:?} \ rvalue={:?}", assigned_to, rvalue ); // Check if our `target` was captured by a closure. if let Rvalue::Aggregate(box AggregateKind::Closure(def_id, args), operands) = rvalue { let def_id = def_id.expect_local(); for operand in operands { let (Operand::Copy(assigned_from) | Operand::Move(assigned_from)) = operand else { continue; }; debug!( "annotate_argument_and_return_for_borrow: assigned_from={:?}", assigned_from ); // Find the local from the operand. let Some(assigned_from_local) = assigned_from.local_or_deref_local() else { continue; }; if assigned_from_local != target { continue; } // If a closure captured our `target` and then assigned // into a place then we should annotate the closure in // case it ends up being assigned into the return place. annotated_closure = self.annotate_fn_sig(def_id, args.as_closure().sig()); debug!( "annotate_argument_and_return_for_borrow: \ annotated_closure={:?} assigned_from_local={:?} \ assigned_to={:?}", annotated_closure, assigned_from_local, assigned_to ); if assigned_to == mir::RETURN_PLACE { // If it was assigned directly into the return place, then // return now. return annotated_closure; } else { // Otherwise, update the target. target = assigned_to; } } // If none of our closure's operands matched, then skip to the next // statement. continue; } // Otherwise, look at other types of assignment. let assigned_from = match rvalue { Rvalue::Ref(_, _, assigned_from) => assigned_from, Rvalue::Use(operand) => match operand { Operand::Copy(assigned_from) | Operand::Move(assigned_from) => { assigned_from } _ => continue, }, _ => continue, }; debug!( "annotate_argument_and_return_for_borrow: \ assigned_from={:?}", assigned_from, ); // Find the local from the rvalue. let Some(assigned_from_local) = assigned_from.local_or_deref_local() else { continue; }; debug!( "annotate_argument_and_return_for_borrow: \ assigned_from_local={:?}", assigned_from_local, ); // Check if our local matches the target - if so, we've assigned our // borrow to a new place. if assigned_from_local != target { continue; } // If we assigned our `target` into a new place, then we should // check if it was the return place. debug!( "annotate_argument_and_return_for_borrow: \ assigned_from_local={:?} assigned_to={:?}", assigned_from_local, assigned_to ); if assigned_to == mir::RETURN_PLACE { // If it was then return the annotated closure if there was one, // else, annotate this function. return annotated_closure.or_else(fallback); } // If we didn't assign into the return place, then we just update // the target. target = assigned_to; } } // Check the terminator if we didn't find anything in the statements. let terminator = &self.body[location.block].terminator(); debug!( "annotate_argument_and_return_for_borrow: target={:?} terminator={:?}", target, terminator ); if let TerminatorKind::Call { destination, target: Some(_), args, .. } = &terminator.kind && let Some(assigned_to) = destination.as_local() { debug!( "annotate_argument_and_return_for_borrow: assigned_to={:?} args={:?}", assigned_to, args ); for operand in args { let (Operand::Copy(assigned_from) | Operand::Move(assigned_from)) = &operand.node else { continue; }; debug!( "annotate_argument_and_return_for_borrow: assigned_from={:?}", assigned_from, ); if let Some(assigned_from_local) = assigned_from.local_or_deref_local() { debug!( "annotate_argument_and_return_for_borrow: assigned_from_local={:?}", assigned_from_local, ); if assigned_to == mir::RETURN_PLACE && assigned_from_local == target { return annotated_closure.or_else(fallback); } } } } } // If we haven't found an assignment into the return place, then we need not add // any annotations. debug!("annotate_argument_and_return_for_borrow: none found"); None } /// Annotate the first argument and return type of a function signature if they are /// references. fn annotate_fn_sig( &self, did: LocalDefId, sig: ty::PolyFnSig<'tcx>, ) -> Option<AnnotatedBorrowFnSignature<'tcx>> { debug!("annotate_fn_sig: did={:?} sig={:?}", did, sig); let is_closure = self.infcx.tcx.is_closure_like(did.to_def_id()); let fn_hir_id = self.infcx.tcx.local_def_id_to_hir_id(did); let fn_decl = self.infcx.tcx.hir_fn_decl_by_hir_id(fn_hir_id)?; // We need to work out which arguments to highlight. We do this by looking // at the return type, where there are three cases: // // 1. If there are named arguments, then we should highlight the return type and // highlight any of the arguments that are also references with that lifetime. // If there are no arguments that have the same lifetime as the return type, // then don't highlight anything. // 2. The return type is a reference with an anonymous lifetime. If this is // the case, then we can take advantage of (and teach) the lifetime elision // rules. // // We know that an error is being reported. So the arguments and return type // must satisfy the elision rules. Therefore, if there is a single argument // then that means the return type and first (and only) argument have the same // lifetime and the borrow isn't meeting that, we can highlight the argument // and return type. // // If there are multiple arguments then the first argument must be self (else // it would not satisfy the elision rules), so we can highlight self and the // return type. // 3. The return type is not a reference. In this case, we don't highlight // anything. let return_ty = sig.output(); match return_ty.skip_binder().kind() { ty::Ref(return_region, _, _) if return_region.is_named(self.infcx.tcx) && !is_closure => { // This is case 1 from above, return type is a named reference so we need to // search for relevant arguments. let mut arguments = Vec::new(); for (index, argument) in sig.inputs().skip_binder().iter().enumerate() { if let ty::Ref(argument_region, _, _) = argument.kind() && argument_region == return_region { // Need to use the `rustc_middle::ty` types to compare against the // `return_region`. Then use the `rustc_hir` type to get only // the lifetime span. match &fn_decl.inputs[index].kind { hir::TyKind::Ref(lifetime, _) => { // With access to the lifetime, we can get // the span of it. arguments.push((*argument, lifetime.ident.span)); } // Resolve `self` whose self type is `&T`. hir::TyKind::Path(hir::QPath::Resolved(None, path)) => { if let Res::SelfTyAlias { alias_to, .. } = path.res && let Some(alias_to) = alias_to.as_local() && let hir::Impl { self_ty, .. } = self .infcx .tcx .hir_node_by_def_id(alias_to) .expect_item() .expect_impl() && let hir::TyKind::Ref(lifetime, _) = self_ty.kind { arguments.push((*argument, lifetime.ident.span)); } } _ => { // Don't ICE though. It might be a type alias. } } } } // We need to have arguments. This shouldn't happen, but it's worth checking. if arguments.is_empty() { return None; } // We use a mix of the HIR and the Ty types to get information // as the HIR doesn't have full types for closure arguments. let return_ty = sig.output().skip_binder(); let mut return_span = fn_decl.output.span(); if let hir::FnRetTy::Return(ty) = &fn_decl.output && let hir::TyKind::Ref(lifetime, _) = ty.kind { return_span = lifetime.ident.span; } Some(AnnotatedBorrowFnSignature::NamedFunction { arguments, return_ty, return_span, }) } ty::Ref(_, _, _) if is_closure => { // This is case 2 from above but only for closures, return type is anonymous // reference so we select // the first argument. let argument_span = fn_decl.inputs.first()?.span; let argument_ty = sig.inputs().skip_binder().first()?; // Closure arguments are wrapped in a tuple, so we need to get the first // from that. if let ty::Tuple(elems) = argument_ty.kind() { let &argument_ty = elems.first()?; if let ty::Ref(_, _, _) = argument_ty.kind() { return Some(AnnotatedBorrowFnSignature::Closure { argument_ty, argument_span, }); } } None } ty::Ref(_, _, _) => { // This is also case 2 from above but for functions, return type is still an // anonymous reference so we select the first argument. let argument_span = fn_decl.inputs.first()?.span; let argument_ty = *sig.inputs().skip_binder().first()?; let return_span = fn_decl.output.span(); let return_ty = sig.output().skip_binder(); // We expect the first argument to be a reference. match argument_ty.kind() { ty::Ref(_, _, _) => {} _ => return None, } Some(AnnotatedBorrowFnSignature::AnonymousFunction { argument_ty, argument_span, return_ty, return_span, }) } _ => { // This is case 3 from above, return type is not a reference so don't highlight // anything. None } } } } #[derive(Debug)] enum AnnotatedBorrowFnSignature<'tcx> { NamedFunction { arguments: Vec<(Ty<'tcx>, Span)>, return_ty: Ty<'tcx>, return_span: Span, }, AnonymousFunction { argument_ty: Ty<'tcx>, argument_span: Span, return_ty: Ty<'tcx>, return_span: Span, }, Closure { argument_ty: Ty<'tcx>, argument_span: Span, }, } impl<'tcx> AnnotatedBorrowFnSignature<'tcx> { /// Annotate the provided diagnostic with information about borrow from the fn signature that /// helps explain. pub(crate) fn emit(&self, cx: &MirBorrowckCtxt<'_, '_, 'tcx>, diag: &mut Diag<'_>) -> String { match self { &AnnotatedBorrowFnSignature::Closure { argument_ty, argument_span } => { diag.span_label( argument_span, format!("has type `{}`", cx.get_name_for_ty(argument_ty, 0)), ); cx.get_region_name_for_ty(argument_ty, 0) } &AnnotatedBorrowFnSignature::AnonymousFunction { argument_ty, argument_span, return_ty, return_span, } => { let argument_ty_name = cx.get_name_for_ty(argument_ty, 0); diag.span_label(argument_span, format!("has type `{argument_ty_name}`")); let return_ty_name = cx.get_name_for_ty(return_ty, 0); let types_equal = return_ty_name == argument_ty_name; diag.span_label( return_span, format!( "{}has type `{}`", if types_equal { "also " } else { "" }, return_ty_name, ), ); diag.note( "argument and return type have the same lifetime due to lifetime elision rules", ); diag.note( "to learn more, visit <https://doc.rust-lang.org/book/ch10-03-\ lifetime-syntax.html#lifetime-elision>", ); cx.get_region_name_for_ty(return_ty, 0) } AnnotatedBorrowFnSignature::NamedFunction { arguments, return_ty, return_span } => { // Region of return type and arguments checked to be the same earlier. let region_name = cx.get_region_name_for_ty(*return_ty, 0); for (_, argument_span) in arguments { diag.span_label(*argument_span, format!("has lifetime `{region_name}`")); } diag.span_label(*return_span, format!("also has lifetime `{region_name}`",)); diag.help(format!( "use data from the highlighted arguments which match the `{region_name}` lifetime of \ the return type", )); region_name } } } } /// Detect whether one of the provided spans is a statement nested within the top-most visited expr struct ReferencedStatementsVisitor<'a>(&'a [Span]); impl<'v> Visitor<'v> for ReferencedStatementsVisitor<'_> { type Result = ControlFlow<()>; fn visit_stmt(&mut self, s: &'v hir::Stmt<'v>) -> Self::Result { match s.kind { hir::StmtKind::Semi(expr) if self.0.contains(&expr.span) => ControlFlow::Break(()), _ => ControlFlow::Continue(()), } } } /// Look for `break` expressions within any arbitrary expressions. We'll do this to infer /// whether this is a case where the moved value would affect the exit of a loop, making it /// unsuitable for a `.clone()` suggestion. struct BreakFinder { found_breaks: Vec<(hir::Destination, Span)>, found_continues: Vec<(hir::Destination, Span)>, } impl<'hir> Visitor<'hir> for BreakFinder { fn visit_expr(&mut self, ex: &'hir hir::Expr<'hir>) { match ex.kind { hir::ExprKind::Break(destination, _) if !ex.span.is_desugaring(DesugaringKind::ForLoop) => { self.found_breaks.push((destination, ex.span)); } hir::ExprKind::Continue(destination) => { self.found_continues.push((destination, ex.span)); } _ => {} } hir::intravisit::walk_expr(self, ex); } } /// Given a set of spans representing statements initializing the relevant binding, visit all the /// function expressions looking for branching code paths that *do not* initialize the binding. struct ConditionVisitor<'tcx> { tcx: TyCtxt<'tcx>, spans: Vec<Span>, name: String, errors: Vec<(Span, String)>, } impl<'v, 'tcx> Visitor<'v> for ConditionVisitor<'tcx> { fn visit_expr(&mut self, ex: &'v hir::Expr<'v>) { match ex.kind { hir::ExprKind::If(cond, body, None) => { // `if` expressions with no `else` that initialize the binding might be missing an // `else` arm. if ReferencedStatementsVisitor(&self.spans).visit_expr(body).is_break() { self.errors.push(( cond.span, format!( "if this `if` condition is `false`, {} is not initialized", self.name, ), )); self.errors.push(( ex.span.shrink_to_hi(), format!("an `else` arm might be missing here, initializing {}", self.name), )); } } hir::ExprKind::If(cond, body, Some(other)) => { // `if` expressions where the binding is only initialized in one of the two arms // might be missing a binding initialization. let a = ReferencedStatementsVisitor(&self.spans).visit_expr(body).is_break(); let b = ReferencedStatementsVisitor(&self.spans).visit_expr(other).is_break(); match (a, b) { (true, true) | (false, false) => {} (true, false) => { if other.span.is_desugaring(DesugaringKind::WhileLoop) { self.errors.push(( cond.span, format!( "if this condition isn't met and the `while` loop runs 0 \ times, {} is not initialized", self.name ), )); } else { self.errors.push(( body.span.shrink_to_hi().until(other.span), format!( "if the `if` condition is `false` and this `else` arm is \ executed, {} is not initialized", self.name ), )); } } (false, true) => { self.errors.push(( cond.span, format!( "if this condition is `true`, {} is not initialized", self.name ), )); } } } hir::ExprKind::Match(e, arms, loop_desugar) => { // If the binding is initialized in one of the match arms, then the other match // arms might be missing an initialization. let results: Vec<bool> = arms .iter() .map(|arm| ReferencedStatementsVisitor(&self.spans).visit_arm(arm).is_break()) .collect(); if results.iter().any(|x| *x) && !results.iter().all(|x| *x) { for (arm, seen) in arms.iter().zip(results) { if !seen { if loop_desugar == hir::MatchSource::ForLoopDesugar { self.errors.push(( e.span, format!( "if the `for` loop runs 0 times, {} is not initialized", self.name ), )); } else if let Some(guard) = &arm.guard { if matches!( self.tcx.hir_node(arm.body.hir_id), hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Ret(_), .. }) ) { continue; } self.errors.push(( arm.pat.span.to(guard.span), format!( "if this pattern and condition are matched, {} is not \ initialized", self.name ), )); } else { if matches!( self.tcx.hir_node(arm.body.hir_id), hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Ret(_), .. }) ) { continue; } self.errors.push(( arm.pat.span, format!( "if this pattern is matched, {} is not initialized", self.name ), )); } } } } } // FIXME: should we also account for binops, particularly `&&` and `||`? `try` should // also be accounted for. For now it is fine, as if we don't find *any* relevant // branching code paths, we point at the places where the binding *is* initialized for // *some* context. _ => {} } walk_expr(self, ex); } }
rust
github
https://github.com/rust-lang/rust
compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
#!/usr/bin/env python import base import re import sys import requests from termcolor import colored # Control whether the module is enabled or not ENABLED = True class style: BOLD = '\033[1m' END = '\033[0m' def banner(): print colored(style.BOLD + '\n---> Searching Scribd Docs\n' + style.END, 'blue') def main(email): req = requests.get('https://www.scribd.com/search?page=1&content_type=documents&query=%s' % (email)) m = re.findall('(?<=https://www.scribd.com/doc/)\w+', req.text.encode('UTF-8')) m = set(m) m = list(m) links = [] length = len(m) for lt in range(0, length - 1): links.append("https://www.scribd.com/doc/" + m[lt]) return links def output(data, email=""): if data: print "Found %s associated SCRIBD documents:\n" % len(data) for link in data: print link print "" print colored(style.BOLD + 'More results might be available, please follow this link:' + style.END) print "https://www.scribd.com/search?page=1&content_type=documents&query=" + email else: print colored('[-] No Associated Scribd Documents found.', 'red') if __name__ == "__main__": try: email = sys.argv[1] banner() result = main(email) output(result, email) except Exception as e: print e print "Please provide an email as argument"
unknown
codeparrot/codeparrot-clean
from __future__ import absolute_import import json, re, shutil, errno, os import slyd.errors from os.path import join, splitext from twisted.web.resource import NoResource, ForbiddenResource from jsonschema.exceptions import ValidationError from .resource import SlydJsonResource from .html import html4annotation from .errors import BaseHTTPError from .utils.projects import allowed_file_name, ProjectModifier def create_project_resource(spec_manager): return ProjectResource(spec_manager) def convert_template(template): """Converts the template annotated body for being used in the UI.""" template['annotated_body'] = html4annotation( template['annotated_body'], template['url'], proxy_resources=True) class ProjectSpec(object): resources = ('project', 'items', 'extractors') base_dir = '.' plugins = [] @classmethod def setup(cls, location, **kwargs): cls.base_dir = location def __init__(self, project_name, auth_info): self.project_dir = join(ProjectSpec.base_dir, project_name) self.project_name = project_name self.auth_info = auth_info self.user = auth_info['username'] self.spider_commands = { 'mv': self.rename_spider, 'rm': self.remove_spider, 'mvt': self.rename_template, 'rmt': self.remove_template, } def list_spiders(self): try: for fname in os.listdir(join(self.project_dir, "spiders")): if fname.endswith(".json"): yield splitext(fname)[0] except OSError as ex: if ex.errno != errno.ENOENT: raise def spider_with_templates(self, spider): spider_spec = self.resource('spiders', spider) templates = [] for template in spider_spec.get('template_names', []): try: templates.append(self.resource('spiders', spider, template)) except TypeError: self.remove_template(spider, template) spider_spec['templates'] = templates return spider_spec def spider_json(self, name): """Loads the spider spec for the given spider name.""" try: return self.resource('spiders', name) except IOError as ex: if ex.errno == errno.ENOENT: return({}) else: raise def template_json(self, spider_name, template_name): """Loads the given template. Also converts the annotated body of the template to be used by the annotation UI.""" try: template = self.resource('spiders', spider_name, template_name) convert_template(template) return template except IOError as ex: if ex.errno == errno.ENOENT: return({}) else: raise def rename_spider(self, from_name, to_name): if to_name == from_name: return if to_name in self.list_spiders(): raise IOError('Can\'t rename spider as a spider with the name, ' '"%s", already exists for this project.' % to_name) os.rename(self._rfilename('spiders', from_name), self._rfilename('spiders', to_name)) dirname = self._rdirname('spiders', from_name) if os.path.isdir(dirname): os.rename(dirname, self._rdirname('spiders', to_name)) def remove_spider(self, name): os.remove(self._rfilename('spiders', name)) def rename_template(self, spider_name, from_name, to_name): template = self.resource('spiders', spider_name, from_name) template['name'] = to_name self.savejson(template, ['spiders', spider_name, to_name]) self.remove_template(spider_name, from_name) spider = self.spider_json(spider_name) spider['template_names'].append(to_name) self.savejson(spider, ['spiders', spider_name]) def remove_template(self, spider_name, name): try: os.remove(self._rfilename('spiders', spider_name, name)) except OSError: pass spider = self.spider_json(spider_name) try: spider['template_names'].remove(name) except ValueError: pass self.savejson(spider, ['spiders', spider_name]) def _rfilename(self, *resources): return join(self.project_dir, *resources) + '.json' def _rdirname(self, *resources): return join(self.project_dir, *resources[0][:-1]) def _rfile(self, resources, mode='rb'): return open(self._rfilename(*resources), mode) def resource(self, *resources): with self._rfile(resources) as f: return json.load(f) def writejson(self, outf, *resources): """Write json for the resource specified Multiple arguments are joined (e.g. spider, spidername). If the file does not exist, an empty dict is written """ try: shutil.copyfileobj(self._rfile(resources), outf) except IOError as ex: if ex.errno == errno.ENOENT: outf.write('{}') else: raise def savejson(self, obj, *resources): # convert to json in a way that will make sense in diffs try: os.makedirs(self._rdirname(*resources)) except OSError: pass with self._rfile(*resources, mode='wb') as ouf: json.dump(obj, ouf, sort_keys=True, indent=4) def json(self, out): """Write spec as json to the file-like object This uses the file contents and avoids converting to python types """ # assumes " is not allowed in spider names template_dict = {r: 'SPEC:%s' % r for r in self.resources} template_dict['spiders'] = {s: 'SPIDER:%s' % s for s in self.list_spiders()} json_template = json.dumps(template_dict) last = 0 for match in re.finditer('"(SPEC|SPIDER):([^"]+)"', json_template): out.write(json_template[last:match.start()]) mtype, resource = match.groups() if mtype == 'SPEC': self.writejson(out, resource) else: self.writejson(out, 'spiders', resource) last = match.end() out.write(json_template[last:]) class ProjectResource(SlydJsonResource, ProjectModifier): isLeaf = True errors = slyd.errors def __init__(self, spec_manager): SlydJsonResource.__init__(self) self.spec_manager = spec_manager def render(self, request): # make sure the path is safe for pathelement in request.postpath: if pathelement and not allowed_file_name(pathelement): resource_class = NoResource if request.method == 'GET' \ else ForbiddenResource resource = resource_class("Bad path element %r." % pathelement) return resource.render(request) return SlydJsonResource.render(self, request) def render_GET(self, request): project_spec = self.spec_manager.project_spec( request.project, request.auth_info) rpath = request.postpath if not rpath: project_spec.json(request) elif len(rpath) == 1 and rpath[0] == 'spiders': spiders = project_spec.list_spiders() request.write(json.dumps(list(spiders))) else: try: if rpath[0] == 'spiders' and len(rpath) == 2: spider = project_spec.spider_json(rpath[1]) request.write(json.dumps(spider)) elif rpath[0] == 'spiders' and len(rpath) == 3: template = project_spec.template_json(rpath[1], rpath[2]) template['original_body'] = '' request.write(json.dumps(template)) else: project_spec.writejson(request, *rpath) # Trying to access non existent path except (KeyError, IndexError, TypeError): self.not_found() return '\n' def render_POST(self, request, merge=False): obj = self.read_json(request) project_spec = self.spec_manager.project_spec( request.project, request.auth_info) resource = None try: # validate the request path and data obj = self.verify_data(request.postpath, obj, project_spec) except (KeyError, IndexError): self.not_found() except (AssertionError, ValidationError) as ex: self.bad_request( "The %s data was not valid. Validation failed with the error: %s." % (resource or 'input', ex.message)) except BaseHTTPError as ex: self.error(ex.status, ex.title, ex.body) else: project_spec.savejson(obj, request.postpath) return ''
unknown
codeparrot/codeparrot-clean
import pytest import cattle import subprocess import sys import os import re # import yaml def _base(): return os.path.dirname(__file__) def _file(f): return os.path.join(_base(), '../../{}'.format(f)) class CatalogService(object): def __init__(self, catalog_bin): self.catalog_bin = catalog_bin def assert_retcode(self, ret_code, *args): p = self.call(*args) r_code = p.wait() assert r_code == ret_code def call(self, *args, **kw): cmd = [self.catalog_bin] cmd.extend(args) kw_args = { 'stdin': subprocess.PIPE, 'stdout': sys.stdout, 'stderr': sys.stderr, 'cwd': _base(), } kw_args.update(kw) return subprocess.Popen(cmd, **kw_args) @pytest.fixture(scope='session') def catalog_bin(): c = '/usr/bin/rancher-catalog-service' assert os.path.exists(c) return c @pytest.fixture(scope='session') def catalog_service(catalog_bin): return CatalogService(catalog_bin) @pytest.fixture(scope='session') def client(): url = 'http://localhost:8088/v1-catalog/schemas' return cattle.from_env(url=url) @pytest.fixture(scope='session') def templates(client): templates = client.list_template() assert len(templates) > 0 return templates @pytest.fixture(scope='session') def requests(): return requests.Session() @pytest.fixture(scope='session') def template_details(client, templates): for template in templates: template.versionDetails = {} for version, link in template.versionLinks.iteritems(): template.versionDetails[version] = client._get(link) return templates def test_validate_exits_normal(catalog_service): catalog_service.assert_retcode( 0, '-catalogUrl', _file('./'), '-validate', '-port', '18088') def test_stack_name(templates): hostname_label = re.compile(r'^[a-zA-Z0-9\-]{1,63}$') for template in templates: # stack_name must be a valid hostname label assert hostname_label.match(template.id.split(':')[-1].split('*')[-1]) def test_maintainers(templates): maintainer = re.compile(r'^([\S]+ ){2,5}<[^@]+@[^@]+\.[^@]+>$') for template in templates: # Maintainer will soon be a requirement # assert template.maintainer if template.maintainer: assert maintainer.match(template.maintainer) def test_versions(templates): for template in templates: # default version must be defined assert template.defaultVersion # template with default version must be defined assert template.versionLinks[template.defaultVersion] def test_template_questions(template_details): for template in template_details: for _, template in template.versionDetails.iteritems(): # there must exist a rancher-compose.yml file assert template.files['rancher-compose.yml'] # rancherConfig = yaml.load(template.files['rancher-compose.yml']) # there must exist at least one question # assert len(rancherConfig['.catalog']['questions']) > 0
unknown
codeparrot/codeparrot-clean
import numpy as np from numpy.testing import TestCase, assert_array_equal, run_module_suite from scipy.weave import size_check from scipy.weave.ast_tools import harvest_variables empty = np.array(()) class TestMakeSameLength(TestCase): def generic_check(self,x,y,desired): actual = size_check.make_same_length(x,y) desired = desired assert_array_equal(actual,desired) def test_scalar(self): x,y = (),() desired = empty,empty self.generic_check(x,y,desired) def test_x_scalar(self): x,y = (),(1,2) desired = np.array((1,1)), np.array((1,2)) self.generic_check(x,y,desired) def test_y_scalar(self): x,y = (1,2),() desired = np.array((1,2)), np.array((1,1)) self.generic_check(x,y,desired) def test_x_short(self): x,y = (1,2),(1,2,3) desired = np.array((1,1,2)), np.array((1,2,3)) self.generic_check(x,y,desired) def test_y_short(self): x,y = (1,2,3),(1,2) desired = np.array((1,2,3)), np.array((1,1,2)) self.generic_check(x,y,desired) class TestBinaryOpSize(TestCase): def generic_check(self,x,y,desired): actual = size_check.binary_op_size(x,y) desired = desired assert_array_equal(actual,desired) def generic_error_check(self,x,y): self.assertRaises(ValueError, size_check.binary_op_size, x, y) def desired_type(self,val): return np.array(val) def test_scalar(self): x,y = (),() desired = self.desired_type(()) self.generic_check(x,y,desired) def test_x1(self): x,y = (1,),() desired = self.desired_type((1,)) self.generic_check(x,y,desired) def test_y1(self): x,y = (),(1,) desired = self.desired_type((1,)) self.generic_check(x,y,desired) def test_x_y(self): x,y = (5,),(5,) desired = self.desired_type((5,)) self.generic_check(x,y,desired) def test_x_y2(self): x,y = (5,10),(5,10) desired = self.desired_type((5,10)) self.generic_check(x,y,desired) def test_x_y3(self): x,y = (5,10),(1,10) desired = self.desired_type((5,10)) self.generic_check(x,y,desired) def test_x_y4(self): x,y = (1,10),(5,10) desired = self.desired_type((5,10)) self.generic_check(x,y,desired) def test_x_y5(self): x,y = (5,1),(1,10) desired = self.desired_type((5,10)) self.generic_check(x,y,desired) def test_x_y6(self): x,y = (1,10),(5,1) desired = self.desired_type((5,10)) self.generic_check(x,y,desired) def test_x_y7(self): x,y = (5,4,3,2,1),(3,2,1) desired = self.desired_type((5,4,3,2,1)) self.generic_check(x,y,desired) def test_error1(self): x,y = (5,),(4,) self.generic_error_check(x,y) def test_error2(self): x,y = (5,5),(4,5) self.generic_error_check(x,y) class TestDummyArray(TestBinaryOpSize): def generic_check(self,x,y,desired): if type(x) is type(()): x = np.ones(x) if type(y) is type(()): y = np.ones(y) xx = size_check.dummy_array(x) yy = size_check.dummy_array(y) ops = ['+', '-', '/', '*', '<<', '>>'] for op in ops: actual = eval('xx' + op + 'yy') desired = desired assert_array_equal(actual,desired) def desired_type(self,val): return size_check.dummy_array(np.array(val),1) class TestDummyArrayIndexing(TestCase): def generic_check(self,ary,expr,desired): a = size_check.dummy_array(ary) actual = eval(expr).shape #print desired, actual assert_array_equal(actual,desired, expr) def generic_wrap(self,a,expr): desired = np.array(eval(expr).shape) try: self.generic_check(a,expr,desired) except IndexError: if 0 not in desired: msg = '%s raised IndexError in dummy_array, but forms\n' \ 'valid array shape -> %s' % (expr, str(desired)) raise AttributeError(msg) def generic_1d(self,expr): a = np.arange(10) self.generic_wrap(a,expr) def generic_2d(self,expr): a = np.ones((10,20)) self.generic_wrap(a,expr) def generic_3d(self,expr): a = np.ones((10,20,1)) self.generic_wrap(a,expr) def generic_1d_index(self,expr): a = np.arange(10) #print expr ,eval(expr) desired = np.array(()) self.generic_check(a,expr,desired) def test_1d_index_0(self): self.generic_1d_index('a[0]') def test_1d_index_1(self): self.generic_1d_index('a[4]') def test_1d_index_2(self): self.generic_1d_index('a[-4]') def test_1d_index_3(self): try: self.generic_1d('a[12]') except IndexError: pass def test_1d_index_calculated(self): self.generic_1d_index('a[0+1]') def test_1d_0(self): self.generic_1d('a[:]') def test_1d_1(self): self.generic_1d('a[1:]') def test_1d_2(self): self.generic_1d('a[-1:]') def test_1d_3(self): self.generic_1d('a[-11:]') def test_1d_4(self): self.generic_1d('a[:1]') def test_1d_5(self): self.generic_1d('a[:-1]') def test_1d_6(self): self.generic_1d('a[:-11]') def test_1d_7(self): self.generic_1d('a[1:5]') def test_1d_8(self): self.generic_1d('a[1:-5]') def test_1d_9(self): # don't support zero length slicing at the moment. try: self.generic_1d('a[-1:-5]') except IndexError: pass def test_1d_10(self): self.generic_1d('a[-5:-1]') def test_1d_stride_0(self): self.generic_1d('a[::1]') def test_1d_stride_1(self): self.generic_1d('a[::-1]') def test_1d_stride_2(self): self.generic_1d('a[1::1]') def test_1d_stride_3(self): self.generic_1d('a[1::-1]') def test_1d_stride_4(self): # don't support zero length slicing at the moment. try: self.generic_1d('a[1:5:-1]') except IndexError: pass def test_1d_stride_5(self): self.generic_1d('a[5:1:-1]') def test_1d_stride_6(self): self.generic_1d('a[:4:1]') def test_1d_stride_7(self): self.generic_1d('a[:4:-1]') def test_1d_stride_8(self): self.generic_1d('a[:-4:1]') def test_1d_stride_9(self): self.generic_1d('a[:-4:-1]') def test_1d_stride_10(self): self.generic_1d('a[:-3:2]') def test_1d_stride_11(self): self.generic_1d('a[:-3:-2]') def test_1d_stride_12(self): self.generic_1d('a[:-3:-7]') def test_1d_random(self): """ through a bunch of different indexes at it for good measure. """ import random choices = map(lambda x: `x`,range(50)) + range(50) + ['']*50 for i in range(100): try: beg = random.choice(choices) end = random.choice(choices) step = random.choice(choices) if step in ['0',0]: step = 'None' self.generic_1d('a[%s:%s:%s]' %(beg,end,step)) except IndexError: pass def test_2d_0(self): self.generic_2d('a[:]') def test_2d_1(self): self.generic_2d('a[:2]') def test_2d_2(self): self.generic_2d('a[:,:]') def test_2d_random(self): """ through a bunch of different indexes at it for good measure. """ import random choices = map(lambda x: `x`,range(50)) + range(50) + ['']*50 for i in range(100): try: beg = random.choice(choices) end = random.choice(choices) step = random.choice(choices) beg2 = random.choice(choices) end2 = random.choice(choices) step2 = random.choice(choices) if step in ['0',0]: step = 'None' if step2 in ['0',0]: step2 = 'None' expr = 'a[%s:%s:%s,%s:%s:%s]' %(beg,end,step,beg2,end2,step2) self.generic_2d(expr) except IndexError: pass def test_3d_random(self): """ through a bunch of different indexes at it for good measure. """ import random choices = map(lambda x: `x`,range(50)) + range(50) + ['']*50 for i in range(100): try: idx = [] for i in range(9): val = random.choice(choices) if (i+1) % 3 == 0 and val in ['0',0]: val = 'None' idx.append(val) expr = 'a[%s:%s:%s,%s:%s:%s,%s:%s:%s]' % tuple(idx) self.generic_3d(expr) except IndexError: pass class TestReduction(TestCase): def test_1d_0(self): a = np.ones((5,)) actual = size_check.reduction(a,0) desired = size_check.dummy_array((),1) assert_array_equal(actual.shape,desired.shape) def test_2d_0(self): a = np.ones((5,10)) actual = size_check.reduction(a,0) desired = size_check.dummy_array((10,),1) assert_array_equal(actual.shape,desired.shape) def test_2d_1(self): a = np.ones((5,10)) actual = size_check.reduction(a,1) desired = size_check.dummy_array((5,),1) assert_array_equal(actual.shape,desired.shape) def test_3d_0(self): a = np.ones((5,6,7)) actual = size_check.reduction(a,1) desired = size_check.dummy_array((5,7),1) assert_array_equal(actual.shape,desired.shape) def test_error0(self): a = np.ones((5,)) try: actual = size_check.reduction(a,-2) except ValueError: pass def test_error1(self): a = np.ones((5,)) try: actual = size_check.reduction(a,1) except ValueError: pass class TestExpressions(TestCase): def generic_check(self,expr,desired,**kw): import parser ast_list = parser.expr(expr).tolist() args = harvest_variables(ast_list) loc = locals().update(kw) for var in args: s='%s = size_check.dummy_array(%s)'% (var,var) exec(s,loc) try: actual = eval(expr,locals()).shape except: actual = 'failed' if actual is 'failed' and desired is 'failed': return try: assert_array_equal(actual,desired, expr) except: print 'EXPR:',expr print 'ACTUAL:',actual print 'DESIRED:',desired def generic_wrap(self,expr,**kw): try: x = np.array(eval(expr,kw)) try: desired = x.shape except: desired = np.zeros(()) except: desired = 'failed' self.generic_check(expr,desired,**kw) def test_generic_1d(self): a = np.arange(10) expr = 'a[:]' self.generic_wrap(expr,a=a) expr = 'a[:] + a' self.generic_wrap(expr,a=a) bad_expr = 'a[4:] + a' self.generic_wrap(bad_expr,a=a) a = np.arange(10) b = np.ones((1,10)) expr = 'a + b' self.generic_wrap(expr,a=a,b=b) bad_expr = 'a[:5] + b' self.generic_wrap(bad_expr,a=a,b=b) def test_single_index(self): a = np.arange(10) expr = 'a[5] + a[3]' self.generic_wrap(expr,a=a) def test_calculated_index(self): a = np.arange(10) nx = 0 expr = 'a[5] + a[nx+3]' size_check.check_expr(expr,locals()) def test_calculated_index2(self): a = np.arange(10) nx = 0 expr = 'a[1:5] + a[nx+1:5+nx]' size_check.check_expr(expr,locals()) def generic_2d(self,expr): a = np.ones((10,20)) self.generic_wrap(a,expr) def generic_3d(self,expr): a = np.ones((10,20,1)) self.generic_wrap(a,expr) if __name__ == "__main__": run_module_suite()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import re from module.plugins.internal.MultiHoster import MultiHoster from module.plugins.internal.misc import json class RPNetBiz(MultiHoster): __name__ = "RPNetBiz" __type__ = "hoster" __version__ = "0.20" __status__ = "testing" __pattern__ = r'https?://.+rpnet\.biz' __config__ = [("activated" , "bool", "Activated" , True ), ("use_premium" , "bool", "Use premium account if available" , True ), ("fallback" , "bool", "Fallback to free download if premium fails" , False), ("chk_filesize", "bool", "Check file size" , True ), ("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 ), ("revertfailed", "bool", "Revert to standard download if fails" , True )] __description__ = """RPNet.biz multi-hoster plugin""" __license__ = "GPLv3" __authors__ = [("Dman", "dmanugm@gmail.com")] def setup(self): self.chunk_limit = -1 def handle_premium(self, pyfile): user, info = self.account.select() res = self.load("https://premium.rpnet.biz/client_api.php", get={'username': user, 'password': info['login']['password'], 'action' : "generate", 'links' : pyfile.url}) self.log_debug("JSON data: %s" % res) link_status = json.loads(res)['links'][0] #: Get the first link... since we only queried one #: Check if we only have an id as a HDD link if 'id' in link_status: self.log_debug("Need to wait at least 30 seconds before requery") self.wait(30) #: Wait for 30 seconds #: Lets query the server again asking for the status on the link, #: We need to keep doing this until we reach 100 attemps = 30 my_try = 0 while (my_try <= attemps): self.log_debug("Try: %d ; Max Tries: %d" % (my_try, attemps)) res = self.load("https://premium.rpnet.biz/client_api.php", get={'username': user, 'password': info['login']['password'], 'action' : "downloadInformation", 'id' : link_status['id']}) self.log_debug("JSON data hdd query: %s" % res) download_status = json.loads(res)['download'] if download_status['status'] == "100": link_status['generated'] = download_status['rpnet_link'] self.log_debug("Successfully downloaded to rpnet HDD: %s" % link_status['generated']) break else: self.log_debug("At %s%% for the file download" % download_status['status']) self.wait(30) my_try += 1 if my_try > attemps: #: We went over the limit! self.fail(_("Waited for about 15 minutes for download to finish but failed")) if 'generated' in link_status: self.link = link_status['generated'] return elif 'error' in link_status: self.fail(link_status['error']) else: self.fail(_("Something went wrong, not supposed to enter here"))
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import purchase # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.mail.tests.test_mail_base import TestMailBase from openerp.osv.orm import except_orm from openerp.tools.misc import mute_logger class test_portal(TestMailBase): def setUp(self): super(test_portal, self).setUp() cr, uid = self.cr, self.uid # Find Portal group group_portal = self.registry('ir.model.data').get_object(cr, uid, 'portal', 'group_portal') self.group_portal_id = group_portal.id # Create Chell (portal user) self.user_chell_id = self.res_users.create(cr, uid, {'name': 'Chell Gladys', 'login': 'chell', 'email': 'chell@gladys.portal', 'groups_id': [(6, 0, [self.group_portal_id])]}) self.user_chell = self.res_users.browse(cr, uid, self.user_chell_id) self.partner_chell_id = self.user_chell.partner_id.id # Create a PigsPortal group self.group_port_id = self.mail_group.create(cr, uid, {'name': 'PigsPortal', 'public': 'groups', 'group_public_id': self.group_portal_id}, {'mail_create_nolog': True}) # Set an email address for the user running the tests, used as Sender for outgoing mails self.res_users.write(cr, uid, uid, {'email': 'test@localhost'}) @mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm') def test_00_mail_access_rights(self): """ Test basic mail_message and mail_group access rights for portal users. """ cr, uid = self.cr, self.uid mail_compose = self.registry('mail.compose.message') # Prepare group: Pigs and PigsPortal pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message') port_msg_id = self.mail_group.message_post(cr, uid, self.group_port_id, body='Message') # Do: Chell browses Pigs -> ko, employee group chell_pigs = self.mail_group.browse(cr, self.user_chell_id, self.group_pigs_id) with self.assertRaises(except_orm): trigger_read = chell_pigs.name # Do: Chell posts a message on Pigs, crash because can not write on group or is not in the followers with self.assertRaises(except_orm): self.mail_group.message_post(cr, self.user_chell_id, self.group_pigs_id, body='Message') # Do: Chell is added into Pigs followers and browse it -> ok for messages, ko for partners (no read permission) self.mail_group.message_subscribe_users(cr, uid, [self.group_pigs_id], [self.user_chell_id]) chell_pigs = self.mail_group.browse(cr, self.user_chell_id, self.group_pigs_id) trigger_read = chell_pigs.name for message in chell_pigs.message_ids: trigger_read = message.subject for partner in chell_pigs.message_follower_ids: with self.assertRaises(except_orm): trigger_read = partner.name # Do: Chell comments Pigs, ok because he is now in the followers self.mail_group.message_post(cr, self.user_chell_id, self.group_pigs_id, body='I love Pigs') # Do: Chell creates a mail.compose.message record on Pigs, because he uses the wizard compose_id = mail_compose.create(cr, self.user_chell_id, {'subject': 'Subject', 'body': 'Body text', 'partner_ids': []}, {'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_pigs_id}) mail_compose.send_mail(cr, self.user_chell_id, [compose_id]) # Do: Chell replies to a Pigs message using the composer compose_id = mail_compose.create(cr, self.user_chell_id, {'subject': 'Subject', 'body': 'Body text'}, {'default_composition_mode': 'reply', 'default_parent_id': pigs_msg_id}) mail_compose.send_mail(cr, self.user_chell_id, [compose_id]) # Do: Chell browses PigsPortal -> ok because groups security, ko for partners (no read permission) chell_port = self.mail_group.browse(cr, self.user_chell_id, self.group_port_id) trigger_read = chell_port.name for message in chell_port.message_ids: trigger_read = message.subject for partner in chell_port.message_follower_ids: with self.assertRaises(except_orm): trigger_read = partner.name def test_10_mail_invite(self): cr, uid = self.cr, self.uid mail_invite = self.registry('mail.wizard.invite') base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='') # Carine Poilvache, with email, should receive emails for comments and emails partner_carine_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c'}) # Do: create a mail_wizard_invite, validate it self._init_mock_build_email() context = {'default_res_model': 'mail.group', 'default_res_id': self.group_pigs_id} mail_invite_id = mail_invite.create(cr, uid, {'partner_ids': [(4, partner_carine_id)]}, context) mail_invite.add_followers(cr, uid, [mail_invite_id]) # Test: Pigs followers should contain Admin and Bert group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id) follower_ids = [follower.id for follower in group_pigs.message_follower_ids] self.assertEqual(set(follower_ids), set([self.partner_admin_id, partner_carine_id]), 'Pigs followers after invite is incorrect') # Test: partner must have been prepared for signup partner_carine = self.res_partner.browse(cr, uid, partner_carine_id) self.assertTrue(partner_carine.signup_valid, 'partner has not been prepared for signup') self.assertTrue(base_url in partner_carine.signup_url, 'signup url is incorrect') self.assertTrue(cr.dbname in partner_carine.signup_url, 'signup url is incorrect') self.assertTrue(partner_carine.signup_token in partner_carine.signup_url, 'signup url is incorrect') # Test: (pretend to) send email and check subject, body self.assertEqual(len(self._build_email_kwargs_list), 1, 'sent email number incorrect, should be only for Bert') for sent_email in self._build_email_kwargs_list: self.assertEqual(sent_email.get('subject'), 'Invitation to follow Pigs', 'subject of invitation email is incorrect') self.assertTrue('You have been invited to follow Pigs' in sent_email.get('body'), 'body of invitation email is incorrect') self.assertTrue(partner_carine.signup_url in sent_email.get('body'), 'body of invitation email does not contain signup url') def test_20_message_read(self): cr, uid, group_port_id = self.cr, self.uid, self.group_port_id # Data: custom subtypes mt_group_public_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public', 'description': 'Group changed'}) self.ir_model_data.create(cr, uid, {'name': 'mt_group_public', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_id}) # Data: post messages with various subtypes msg1_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body1', type='comment', subtype='mail.mt_comment') msg2_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body2', type='comment', subtype='mail.mt_group_public') msg3_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body3', type='comment', subtype='mail.mt_comment') msg4_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body4', type='comment') msg5_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body5', type='notification') # Do: Chell search messages: should not see internal notes (comment without subtype) msg_ids = self.mail_message.search(cr, self.user_chell_id, [('model', '=', 'mail.group'), ('res_id', '=', group_port_id)]) self.assertEqual(set(msg_ids), set([msg1_id, msg2_id, msg3_id, msg5_id]), 'mail_message: portal user has access to messages he should not read') # Do: Chell read messages she can read self.mail_message.read(cr, self.user_chell_id, msg_ids, ['body', 'type', 'subtype_id']) # Do: Chell read a message she should not be able to read with self.assertRaises(except_orm): self.mail_message.read(cr, self.user_chell_id, [msg4_id], ['body', 'type', 'subtype_id'])
unknown
codeparrot/codeparrot-clean
/*------------------------------------------------------------------------- * * pg_stat_statements.c * Track statement planning and execution times as well as resource * usage across a whole database cluster. * * Execution costs are totaled for each distinct source query, and kept in * a shared hashtable. (We track only as many distinct queries as will fit * in the designated amount of shared memory.) * * Starting in Postgres 9.2, this module normalized query entries. As of * Postgres 14, the normalization is done by the core if compute_query_id is * enabled, or optionally by third-party modules. * * To facilitate presenting entries to users, we create "representative" query * strings in which constants are replaced with parameter symbols ($n), to * make it clearer what a normalized entry can represent. To save on shared * memory, and to avoid having to truncate oversized query strings, we store * these strings in a temporary external query-texts file. Offsets into this * file are kept in shared memory. * * Note about locking issues: to create or delete an entry in the shared * hashtable, one must hold pgss->lock exclusively. Modifying any field * in an entry except the counters requires the same. To look up an entry, * one must hold the lock shared. To read or update the counters within * an entry, one must hold the lock shared or exclusive (so the entry doesn't * disappear!) and also take the entry's mutex spinlock. * The shared state variable pgss->extent (the next free spot in the external * query-text file) should be accessed only while holding either the * pgss->mutex spinlock, or exclusive lock on pgss->lock. We use the mutex to * allow reserving file space while holding only shared lock on pgss->lock. * Rewriting the entire external query-text file, eg for garbage collection, * requires holding pgss->lock exclusively; this allows individual entries * in the file to be read or written while holding only shared lock. * * * Copyright (c) 2008-2026, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pg_stat_statements/pg_stat_statements.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include <math.h> #include <sys/stat.h> #include <unistd.h> #include "access/htup_details.h" #include "access/parallel.h" #include "catalog/pg_authid.h" #include "common/int.h" #include "executor/instrument.h" #include "funcapi.h" #include "jit/jit.h" #include "mb/pg_wchar.h" #include "miscadmin.h" #include "nodes/queryjumble.h" #include "optimizer/planner.h" #include "parser/analyze.h" #include "parser/scanner.h" #include "pgstat.h" #include "storage/fd.h" #include "storage/ipc.h" #include "storage/lwlock.h" #include "storage/shmem.h" #include "storage/spin.h" #include "tcop/utility.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/memutils.h" #include "utils/timestamp.h" PG_MODULE_MAGIC_EXT( .name = "pg_stat_statements", .version = PG_VERSION ); /* Location of permanent stats file (valid when database is shut down) */ #define PGSS_DUMP_FILE PGSTAT_STAT_PERMANENT_DIRECTORY "/pg_stat_statements.stat" /* * Location of external query text file. */ #define PGSS_TEXT_FILE PG_STAT_TMP_DIR "/pgss_query_texts.stat" /* Magic number identifying the stats file format */ static const uint32 PGSS_FILE_HEADER = 0x20250731; /* PostgreSQL major version number, changes in which invalidate all entries */ static const uint32 PGSS_PG_MAJOR_VERSION = PG_VERSION_NUM / 100; /* XXX: Should USAGE_EXEC reflect execution time and/or buffer usage? */ #define USAGE_EXEC(duration) (1.0) #define USAGE_INIT (1.0) /* including initial planning */ #define ASSUMED_MEDIAN_INIT (10.0) /* initial assumed median usage */ #define ASSUMED_LENGTH_INIT 1024 /* initial assumed mean query length */ #define USAGE_DECREASE_FACTOR (0.99) /* decreased every entry_dealloc */ #define STICKY_DECREASE_FACTOR (0.50) /* factor for sticky entries */ #define USAGE_DEALLOC_PERCENT 5 /* free this % of entries at once */ #define IS_STICKY(c) ((c.calls[PGSS_PLAN] + c.calls[PGSS_EXEC]) == 0) /* * Extension version number, for supporting older extension versions' objects */ typedef enum pgssVersion { PGSS_V1_0 = 0, PGSS_V1_1, PGSS_V1_2, PGSS_V1_3, PGSS_V1_8, PGSS_V1_9, PGSS_V1_10, PGSS_V1_11, PGSS_V1_12, PGSS_V1_13, } pgssVersion; typedef enum pgssStoreKind { PGSS_INVALID = -1, /* * PGSS_PLAN and PGSS_EXEC must be respectively 0 and 1 as they're used to * reference the underlying values in the arrays in the Counters struct, * and this order is required in pg_stat_statements_internal(). */ PGSS_PLAN = 0, PGSS_EXEC, } pgssStoreKind; #define PGSS_NUMKIND (PGSS_EXEC + 1) /* * Hashtable key that defines the identity of a hashtable entry. We separate * queries by user and by database even if they are otherwise identical. * * If you add a new key to this struct, make sure to teach pgss_store() to * zero the padding bytes. Otherwise, things will break, because pgss_hash is * created using HASH_BLOBS, and thus tag_hash is used to hash this. */ typedef struct pgssHashKey { Oid userid; /* user OID */ Oid dbid; /* database OID */ int64 queryid; /* query identifier */ bool toplevel; /* query executed at top level */ } pgssHashKey; /* * The actual stats counters kept within pgssEntry. */ typedef struct Counters { int64 calls[PGSS_NUMKIND]; /* # of times planned/executed */ double total_time[PGSS_NUMKIND]; /* total planning/execution time, * in msec */ double min_time[PGSS_NUMKIND]; /* minimum planning/execution time in * msec since min/max reset */ double max_time[PGSS_NUMKIND]; /* maximum planning/execution time in * msec since min/max reset */ double mean_time[PGSS_NUMKIND]; /* mean planning/execution time in * msec */ double sum_var_time[PGSS_NUMKIND]; /* sum of variances in * planning/execution time in msec */ int64 rows; /* total # of retrieved or affected rows */ int64 shared_blks_hit; /* # of shared buffer hits */ int64 shared_blks_read; /* # of shared disk blocks read */ int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */ int64 shared_blks_written; /* # of shared disk blocks written */ int64 local_blks_hit; /* # of local buffer hits */ int64 local_blks_read; /* # of local disk blocks read */ int64 local_blks_dirtied; /* # of local disk blocks dirtied */ int64 local_blks_written; /* # of local disk blocks written */ int64 temp_blks_read; /* # of temp blocks read */ int64 temp_blks_written; /* # of temp blocks written */ double shared_blk_read_time; /* time spent reading shared blocks, * in msec */ double shared_blk_write_time; /* time spent writing shared blocks, * in msec */ double local_blk_read_time; /* time spent reading local blocks, in * msec */ double local_blk_write_time; /* time spent writing local blocks, in * msec */ double temp_blk_read_time; /* time spent reading temp blocks, in msec */ double temp_blk_write_time; /* time spent writing temp blocks, in * msec */ double usage; /* usage factor */ int64 wal_records; /* # of WAL records generated */ int64 wal_fpi; /* # of WAL full page images generated */ uint64 wal_bytes; /* total amount of WAL generated in bytes */ int64 wal_buffers_full; /* # of times the WAL buffers became full */ int64 jit_functions; /* total number of JIT functions emitted */ double jit_generation_time; /* total time to generate jit code */ int64 jit_inlining_count; /* number of times inlining time has been * > 0 */ double jit_deform_time; /* total time to deform tuples in jit code */ int64 jit_deform_count; /* number of times deform time has been > * 0 */ double jit_inlining_time; /* total time to inline jit code */ int64 jit_optimization_count; /* number of times optimization time * has been > 0 */ double jit_optimization_time; /* total time to optimize jit code */ int64 jit_emission_count; /* number of times emission time has been * > 0 */ double jit_emission_time; /* total time to emit jit code */ int64 parallel_workers_to_launch; /* # of parallel workers planned * to be launched */ int64 parallel_workers_launched; /* # of parallel workers actually * launched */ int64 generic_plan_calls; /* number of calls using a generic plan */ int64 custom_plan_calls; /* number of calls using a custom plan */ } Counters; /* * Global statistics for pg_stat_statements */ typedef struct pgssGlobalStats { int64 dealloc; /* # of times entries were deallocated */ TimestampTz stats_reset; /* timestamp with all stats reset */ } pgssGlobalStats; /* * Statistics per statement * * Note: in event of a failure in garbage collection of the query text file, * we reset query_offset to zero and query_len to -1. This will be seen as * an invalid state by qtext_fetch(). */ typedef struct pgssEntry { pgssHashKey key; /* hash key of entry - MUST BE FIRST */ Counters counters; /* the statistics for this query */ Size query_offset; /* query text offset in external file */ int query_len; /* # of valid bytes in query string, or -1 */ int encoding; /* query text encoding */ TimestampTz stats_since; /* timestamp of entry allocation */ TimestampTz minmax_stats_since; /* timestamp of last min/max values reset */ slock_t mutex; /* protects the counters only */ } pgssEntry; /* * Global shared state */ typedef struct pgssSharedState { LWLock *lock; /* protects hashtable search/modification */ double cur_median_usage; /* current median usage in hashtable */ Size mean_query_len; /* current mean entry text length */ slock_t mutex; /* protects following fields only: */ Size extent; /* current extent of query file */ int n_writers; /* number of active writers to query file */ int gc_count; /* query file garbage collection cycle count */ pgssGlobalStats stats; /* global statistics for pgss */ } pgssSharedState; /*---- Local variables ----*/ /* Current nesting depth of planner/ExecutorRun/ProcessUtility calls */ static int nesting_level = 0; /* Saved hook values */ static shmem_request_hook_type prev_shmem_request_hook = NULL; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static post_parse_analyze_hook_type prev_post_parse_analyze_hook = NULL; static planner_hook_type prev_planner_hook = NULL; static ExecutorStart_hook_type prev_ExecutorStart = NULL; static ExecutorRun_hook_type prev_ExecutorRun = NULL; static ExecutorFinish_hook_type prev_ExecutorFinish = NULL; static ExecutorEnd_hook_type prev_ExecutorEnd = NULL; static ProcessUtility_hook_type prev_ProcessUtility = NULL; /* Links to shared memory state */ static pgssSharedState *pgss = NULL; static HTAB *pgss_hash = NULL; /*---- GUC variables ----*/ typedef enum { PGSS_TRACK_NONE, /* track no statements */ PGSS_TRACK_TOP, /* only top level statements */ PGSS_TRACK_ALL, /* all statements, including nested ones */ } PGSSTrackLevel; static const struct config_enum_entry track_options[] = { {"none", PGSS_TRACK_NONE, false}, {"top", PGSS_TRACK_TOP, false}, {"all", PGSS_TRACK_ALL, false}, {NULL, 0, false} }; static int pgss_max = 5000; /* max # statements to track */ static int pgss_track = PGSS_TRACK_TOP; /* tracking level */ static bool pgss_track_utility = true; /* whether to track utility commands */ static bool pgss_track_planning = false; /* whether to track planning * duration */ static bool pgss_save = true; /* whether to save stats across shutdown */ #define pgss_enabled(level) \ (!IsParallelWorker() && \ (pgss_track == PGSS_TRACK_ALL || \ (pgss_track == PGSS_TRACK_TOP && (level) == 0))) #define record_gc_qtexts() \ do { \ SpinLockAcquire(&pgss->mutex); \ pgss->gc_count++; \ SpinLockRelease(&pgss->mutex); \ } while(0) /*---- Function declarations ----*/ PG_FUNCTION_INFO_V1(pg_stat_statements_reset); PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_7); PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_11); PG_FUNCTION_INFO_V1(pg_stat_statements_1_2); PG_FUNCTION_INFO_V1(pg_stat_statements_1_3); PG_FUNCTION_INFO_V1(pg_stat_statements_1_8); PG_FUNCTION_INFO_V1(pg_stat_statements_1_9); PG_FUNCTION_INFO_V1(pg_stat_statements_1_10); PG_FUNCTION_INFO_V1(pg_stat_statements_1_11); PG_FUNCTION_INFO_V1(pg_stat_statements_1_12); PG_FUNCTION_INFO_V1(pg_stat_statements_1_13); PG_FUNCTION_INFO_V1(pg_stat_statements); PG_FUNCTION_INFO_V1(pg_stat_statements_info); static void pgss_shmem_request(void); static void pgss_shmem_startup(void); static void pgss_shmem_shutdown(int code, Datum arg); static void pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate); static PlannedStmt *pgss_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es); static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags); static void pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count); static void pgss_ExecutorFinish(QueryDesc *queryDesc); static void pgss_ExecutorEnd(QueryDesc *queryDesc); static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *qc); static void pgss_store(const char *query, int64 queryId, int query_location, int query_len, pgssStoreKind kind, double total_time, uint64 rows, const BufferUsage *bufusage, const WalUsage *walusage, const struct JitInstrumentation *jitusage, JumbleState *jstate, int parallel_workers_to_launch, int parallel_workers_launched, PlannedStmtOrigin planOrigin); static void pg_stat_statements_internal(FunctionCallInfo fcinfo, pgssVersion api_version, bool showtext); static Size pgss_memsize(void); static pgssEntry *entry_alloc(pgssHashKey *key, Size query_offset, int query_len, int encoding, bool sticky); static void entry_dealloc(void); static bool qtext_store(const char *query, int query_len, Size *query_offset, int *gc_count); static char *qtext_load_file(Size *buffer_size); static char *qtext_fetch(Size query_offset, int query_len, char *buffer, Size buffer_size); static bool need_gc_qtexts(void); static void gc_qtexts(void); static TimestampTz entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only); static char *generate_normalized_query(JumbleState *jstate, const char *query, int query_loc, int *query_len_p); static void fill_in_constant_lengths(JumbleState *jstate, const char *query, int query_loc); static int comp_location(const void *a, const void *b); /* * Module load callback */ void _PG_init(void) { /* * In order to create our shared memory area, we have to be loaded via * shared_preload_libraries. If not, fall out without hooking into any of * the main system. (We don't throw error here because it seems useful to * allow the pg_stat_statements functions to be created even when the * module isn't active. The functions must protect themselves against * being called then, however.) */ if (!process_shared_preload_libraries_in_progress) return; /* * Inform the postmaster that we want to enable query_id calculation if * compute_query_id is set to auto. */ EnableQueryId(); /* * Define (or redefine) custom GUC variables. */ DefineCustomIntVariable("pg_stat_statements.max", "Sets the maximum number of statements tracked by pg_stat_statements.", NULL, &pgss_max, 5000, 100, INT_MAX / 2, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomEnumVariable("pg_stat_statements.track", "Selects which statements are tracked by pg_stat_statements.", NULL, &pgss_track, PGSS_TRACK_TOP, track_options, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("pg_stat_statements.track_utility", "Selects whether utility commands are tracked by pg_stat_statements.", NULL, &pgss_track_utility, true, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("pg_stat_statements.track_planning", "Selects whether planning duration is tracked by pg_stat_statements.", NULL, &pgss_track_planning, false, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("pg_stat_statements.save", "Save pg_stat_statements statistics across server shutdowns.", NULL, &pgss_save, true, PGC_SIGHUP, 0, NULL, NULL, NULL); MarkGUCPrefixReserved("pg_stat_statements"); /* * Install hooks. */ prev_shmem_request_hook = shmem_request_hook; shmem_request_hook = pgss_shmem_request; prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = pgss_shmem_startup; prev_post_parse_analyze_hook = post_parse_analyze_hook; post_parse_analyze_hook = pgss_post_parse_analyze; prev_planner_hook = planner_hook; planner_hook = pgss_planner; prev_ExecutorStart = ExecutorStart_hook; ExecutorStart_hook = pgss_ExecutorStart; prev_ExecutorRun = ExecutorRun_hook; ExecutorRun_hook = pgss_ExecutorRun; prev_ExecutorFinish = ExecutorFinish_hook; ExecutorFinish_hook = pgss_ExecutorFinish; prev_ExecutorEnd = ExecutorEnd_hook; ExecutorEnd_hook = pgss_ExecutorEnd; prev_ProcessUtility = ProcessUtility_hook; ProcessUtility_hook = pgss_ProcessUtility; } /* * shmem_request hook: request additional shared resources. We'll allocate or * attach to the shared resources in pgss_shmem_startup(). */ static void pgss_shmem_request(void) { if (prev_shmem_request_hook) prev_shmem_request_hook(); RequestAddinShmemSpace(pgss_memsize()); RequestNamedLWLockTranche("pg_stat_statements", 1); } /* * shmem_startup hook: allocate or attach to shared memory, * then load any pre-existing statistics from file. * Also create and load the query-texts file, which is expected to exist * (even if empty) while the module is enabled. */ static void pgss_shmem_startup(void) { bool found; HASHCTL info; FILE *file = NULL; FILE *qfile = NULL; uint32 header; int32 num; int32 pgver; int32 i; int buffer_size; char *buffer = NULL; if (prev_shmem_startup_hook) prev_shmem_startup_hook(); /* reset in case this is a restart within the postmaster */ pgss = NULL; pgss_hash = NULL; /* * Create or attach to the shared memory state, including hash table */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); pgss = ShmemInitStruct("pg_stat_statements", sizeof(pgssSharedState), &found); if (!found) { /* First time through ... */ pgss->lock = &(GetNamedLWLockTranche("pg_stat_statements"))->lock; pgss->cur_median_usage = ASSUMED_MEDIAN_INIT; pgss->mean_query_len = ASSUMED_LENGTH_INIT; SpinLockInit(&pgss->mutex); pgss->extent = 0; pgss->n_writers = 0; pgss->gc_count = 0; pgss->stats.dealloc = 0; pgss->stats.stats_reset = GetCurrentTimestamp(); } info.keysize = sizeof(pgssHashKey); info.entrysize = sizeof(pgssEntry); pgss_hash = ShmemInitHash("pg_stat_statements hash", pgss_max, pgss_max, &info, HASH_ELEM | HASH_BLOBS); LWLockRelease(AddinShmemInitLock); /* * If we're in the postmaster (or a standalone backend...), set up a shmem * exit hook to dump the statistics to disk. */ if (!IsUnderPostmaster) on_shmem_exit(pgss_shmem_shutdown, (Datum) 0); /* * Done if some other process already completed our initialization. */ if (found) return; /* * Note: we don't bother with locks here, because there should be no other * processes running when this code is reached. */ /* Unlink query text file possibly left over from crash */ unlink(PGSS_TEXT_FILE); /* Allocate new query text temp file */ qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W); if (qfile == NULL) goto write_error; /* * If we were told not to load old statistics, we're done. (Note we do * not try to unlink any old dump file in this case. This seems a bit * questionable but it's the historical behavior.) */ if (!pgss_save) { FreeFile(qfile); return; } /* * Attempt to load old statistics from the dump file. */ file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R); if (file == NULL) { if (errno != ENOENT) goto read_error; /* No existing persisted stats file, so we're done */ FreeFile(qfile); return; } buffer_size = 2048; buffer = (char *) palloc(buffer_size); if (fread(&header, sizeof(uint32), 1, file) != 1 || fread(&pgver, sizeof(uint32), 1, file) != 1 || fread(&num, sizeof(int32), 1, file) != 1) goto read_error; if (header != PGSS_FILE_HEADER || pgver != PGSS_PG_MAJOR_VERSION) goto data_error; for (i = 0; i < num; i++) { pgssEntry temp; pgssEntry *entry; Size query_offset; if (fread(&temp, sizeof(pgssEntry), 1, file) != 1) goto read_error; /* Encoding is the only field we can easily sanity-check */ if (!PG_VALID_BE_ENCODING(temp.encoding)) goto data_error; /* Resize buffer as needed */ if (temp.query_len >= buffer_size) { buffer_size = Max(buffer_size * 2, temp.query_len + 1); buffer = repalloc(buffer, buffer_size); } if (fread(buffer, 1, temp.query_len + 1, file) != temp.query_len + 1) goto read_error; /* Should have a trailing null, but let's make sure */ buffer[temp.query_len] = '\0'; /* Skip loading "sticky" entries */ if (IS_STICKY(temp.counters)) continue; /* Store the query text */ query_offset = pgss->extent; if (fwrite(buffer, 1, temp.query_len + 1, qfile) != temp.query_len + 1) goto write_error; pgss->extent += temp.query_len + 1; /* make the hashtable entry (discards old entries if too many) */ entry = entry_alloc(&temp.key, query_offset, temp.query_len, temp.encoding, false); /* copy in the actual stats */ entry->counters = temp.counters; entry->stats_since = temp.stats_since; entry->minmax_stats_since = temp.minmax_stats_since; } /* Read global statistics for pg_stat_statements */ if (fread(&pgss->stats, sizeof(pgssGlobalStats), 1, file) != 1) goto read_error; pfree(buffer); FreeFile(file); FreeFile(qfile); /* * Remove the persisted stats file so it's not included in * backups/replication standbys, etc. A new file will be written on next * shutdown. * * Note: it's okay if the PGSS_TEXT_FILE is included in a basebackup, * because we remove that file on startup; it acts inversely to * PGSS_DUMP_FILE, in that it is only supposed to be around when the * server is running, whereas PGSS_DUMP_FILE is only supposed to be around * when the server is not running. Leaving the file creates no danger of * a newly restored database having a spurious record of execution costs, * which is what we're really concerned about here. */ unlink(PGSS_DUMP_FILE); return; read_error: ereport(LOG, (errcode_for_file_access(), errmsg("could not read file \"%s\": %m", PGSS_DUMP_FILE))); goto fail; data_error: ereport(LOG, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("ignoring invalid data in file \"%s\"", PGSS_DUMP_FILE))); goto fail; write_error: ereport(LOG, (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); fail: if (buffer) pfree(buffer); if (file) FreeFile(file); if (qfile) FreeFile(qfile); /* If possible, throw away the bogus file; ignore any error */ unlink(PGSS_DUMP_FILE); /* * Don't unlink PGSS_TEXT_FILE here; it should always be around while the * server is running with pg_stat_statements enabled */ } /* * shmem_shutdown hook: Dump statistics into file. * * Note: we don't bother with acquiring lock, because there should be no * other processes running when this is called. */ static void pgss_shmem_shutdown(int code, Datum arg) { FILE *file; char *qbuffer = NULL; Size qbuffer_size = 0; HASH_SEQ_STATUS hash_seq; int32 num_entries; pgssEntry *entry; /* Don't try to dump during a crash. */ if (code) return; /* Safety check ... shouldn't get here unless shmem is set up. */ if (!pgss || !pgss_hash) return; /* Don't dump if told not to. */ if (!pgss_save) return; file = AllocateFile(PGSS_DUMP_FILE ".tmp", PG_BINARY_W); if (file == NULL) goto error; if (fwrite(&PGSS_FILE_HEADER, sizeof(uint32), 1, file) != 1) goto error; if (fwrite(&PGSS_PG_MAJOR_VERSION, sizeof(uint32), 1, file) != 1) goto error; num_entries = hash_get_num_entries(pgss_hash); if (fwrite(&num_entries, sizeof(int32), 1, file) != 1) goto error; qbuffer = qtext_load_file(&qbuffer_size); if (qbuffer == NULL) goto error; /* * When serializing to disk, we store query texts immediately after their * entry data. Any orphaned query texts are thereby excluded. */ hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { int len = entry->query_len; char *qstr = qtext_fetch(entry->query_offset, len, qbuffer, qbuffer_size); if (qstr == NULL) continue; /* Ignore any entries with bogus texts */ if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 || fwrite(qstr, 1, len + 1, file) != len + 1) { /* note: we assume hash_seq_term won't change errno */ hash_seq_term(&hash_seq); goto error; } } /* Dump global statistics for pg_stat_statements */ if (fwrite(&pgss->stats, sizeof(pgssGlobalStats), 1, file) != 1) goto error; free(qbuffer); qbuffer = NULL; if (FreeFile(file)) { file = NULL; goto error; } /* * Rename file into place, so we atomically replace any old one. */ (void) durable_rename(PGSS_DUMP_FILE ".tmp", PGSS_DUMP_FILE, LOG); /* Unlink query-texts file; it's not needed while shutdown */ unlink(PGSS_TEXT_FILE); return; error: ereport(LOG, (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", PGSS_DUMP_FILE ".tmp"))); free(qbuffer); if (file) FreeFile(file); unlink(PGSS_DUMP_FILE ".tmp"); unlink(PGSS_TEXT_FILE); } /* * Post-parse-analysis hook: mark query with a queryId */ static void pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) { if (prev_post_parse_analyze_hook) prev_post_parse_analyze_hook(pstate, query, jstate); /* Safety check... */ if (!pgss || !pgss_hash || !pgss_enabled(nesting_level)) return; /* * If it's EXECUTE, clear the queryId so that stats will accumulate for * the underlying PREPARE. But don't do this if we're not tracking * utility statements, to avoid messing up another extension that might be * tracking them. */ if (query->utilityStmt) { if (pgss_track_utility && IsA(query->utilityStmt, ExecuteStmt)) { query->queryId = INT64CONST(0); return; } } /* * If query jumbling were able to identify any ignorable constants, we * immediately create a hash table entry for the query, so that we can * record the normalized form of the query string. If there were no such * constants, the normalized string would be the same as the query text * anyway, so there's no need for an early entry. */ if (jstate && jstate->clocations_count > 0) pgss_store(pstate->p_sourcetext, query->queryId, query->stmt_location, query->stmt_len, PGSS_INVALID, 0, 0, NULL, NULL, NULL, jstate, 0, 0, PLAN_STMT_UNKNOWN); } /* * Planner hook: forward to regular planner, but measure planning time * if needed. */ static PlannedStmt * pgss_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es) { PlannedStmt *result; /* * We can't process the query if no query_string is provided, as * pgss_store needs it. We also ignore query without queryid, as it would * be treated as a utility statement, which may not be the case. */ if (pgss_enabled(nesting_level) && pgss_track_planning && query_string && parse->queryId != INT64CONST(0)) { instr_time start; instr_time duration; BufferUsage bufusage_start, bufusage; WalUsage walusage_start, walusage; /* We need to track buffer usage as the planner can access them. */ bufusage_start = pgBufferUsage; /* * Similarly the planner could write some WAL records in some cases * (e.g. setting a hint bit with those being WAL-logged) */ walusage_start = pgWalUsage; INSTR_TIME_SET_CURRENT(start); nesting_level++; PG_TRY(); { if (prev_planner_hook) result = prev_planner_hook(parse, query_string, cursorOptions, boundParams, es); else result = standard_planner(parse, query_string, cursorOptions, boundParams, es); } PG_FINALLY(); { nesting_level--; } PG_END_TRY(); INSTR_TIME_SET_CURRENT(duration); INSTR_TIME_SUBTRACT(duration, start); /* calc differences of buffer counters. */ memset(&bufusage, 0, sizeof(BufferUsage)); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); /* calc differences of WAL counters. */ memset(&walusage, 0, sizeof(WalUsage)); WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start); pgss_store(query_string, parse->queryId, parse->stmt_location, parse->stmt_len, PGSS_PLAN, INSTR_TIME_GET_MILLISEC(duration), 0, &bufusage, &walusage, NULL, NULL, 0, 0, result->planOrigin); } else { /* * Even though we're not tracking plan time for this statement, we * must still increment the nesting level, to ensure that functions * evaluated during planning are not seen as top-level calls. */ nesting_level++; PG_TRY(); { if (prev_planner_hook) result = prev_planner_hook(parse, query_string, cursorOptions, boundParams, es); else result = standard_planner(parse, query_string, cursorOptions, boundParams, es); } PG_FINALLY(); { nesting_level--; } PG_END_TRY(); } return result; } /* * ExecutorStart hook: start up tracking if needed */ static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags) { if (prev_ExecutorStart) prev_ExecutorStart(queryDesc, eflags); else standard_ExecutorStart(queryDesc, eflags); /* * If query has queryId zero, don't track it. This prevents double * counting of optimizable statements that are directly contained in * utility statements. */ if (pgss_enabled(nesting_level) && queryDesc->plannedstmt->queryId != INT64CONST(0)) { /* * Set up to track total elapsed time in ExecutorRun. Make sure the * space is allocated in the per-query context so it will go away at * ExecutorEnd. */ if (queryDesc->totaltime == NULL) { MemoryContext oldcxt; oldcxt = MemoryContextSwitchTo(queryDesc->estate->es_query_cxt); queryDesc->totaltime = InstrAlloc(1, INSTRUMENT_ALL, false); MemoryContextSwitchTo(oldcxt); } } } /* * ExecutorRun hook: all we need do is track nesting depth */ static void pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count) { nesting_level++; PG_TRY(); { if (prev_ExecutorRun) prev_ExecutorRun(queryDesc, direction, count); else standard_ExecutorRun(queryDesc, direction, count); } PG_FINALLY(); { nesting_level--; } PG_END_TRY(); } /* * ExecutorFinish hook: all we need do is track nesting depth */ static void pgss_ExecutorFinish(QueryDesc *queryDesc) { nesting_level++; PG_TRY(); { if (prev_ExecutorFinish) prev_ExecutorFinish(queryDesc); else standard_ExecutorFinish(queryDesc); } PG_FINALLY(); { nesting_level--; } PG_END_TRY(); } /* * ExecutorEnd hook: store results if needed */ static void pgss_ExecutorEnd(QueryDesc *queryDesc) { int64 queryId = queryDesc->plannedstmt->queryId; if (queryId != INT64CONST(0) && queryDesc->totaltime && pgss_enabled(nesting_level)) { /* * Make sure stats accumulation is done. (Note: it's okay if several * levels of hook all do this.) */ InstrEndLoop(queryDesc->totaltime); pgss_store(queryDesc->sourceText, queryId, queryDesc->plannedstmt->stmt_location, queryDesc->plannedstmt->stmt_len, PGSS_EXEC, INSTR_TIME_GET_MILLISEC(queryDesc->totaltime->total), queryDesc->estate->es_total_processed, &queryDesc->totaltime->bufusage, &queryDesc->totaltime->walusage, queryDesc->estate->es_jit ? &queryDesc->estate->es_jit->instr : NULL, NULL, queryDesc->estate->es_parallel_workers_to_launch, queryDesc->estate->es_parallel_workers_launched, queryDesc->plannedstmt->planOrigin); } if (prev_ExecutorEnd) prev_ExecutorEnd(queryDesc); else standard_ExecutorEnd(queryDesc); } /* * ProcessUtility hook */ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *qc) { Node *parsetree = pstmt->utilityStmt; int64 saved_queryId = pstmt->queryId; int saved_stmt_location = pstmt->stmt_location; int saved_stmt_len = pstmt->stmt_len; bool enabled = pgss_track_utility && pgss_enabled(nesting_level); /* * Force utility statements to get queryId zero. We do this even in cases * where the statement contains an optimizable statement for which a * queryId could be derived (such as EXPLAIN or DECLARE CURSOR). For such * cases, runtime control will first go through ProcessUtility and then * the executor, and we don't want the executor hooks to do anything, * since we are already measuring the statement's costs at the utility * level. * * Note that this is only done if pg_stat_statements is enabled and * configured to track utility statements, in the unlikely possibility * that user configured another extension to handle utility statements * only. */ if (enabled) pstmt->queryId = INT64CONST(0); /* * If it's an EXECUTE statement, we don't track it and don't increment the * nesting level. This allows the cycles to be charged to the underlying * PREPARE instead (by the Executor hooks), which is much more useful. * * We also don't track execution of PREPARE. If we did, we would get one * hash table entry for the PREPARE (with hash calculated from the query * string), and then a different one with the same query string (but hash * calculated from the query tree) would be used to accumulate costs of * ensuing EXECUTEs. This would be confusing. Since PREPARE doesn't * actually run the planner (only parse+rewrite), its costs are generally * pretty negligible and it seems okay to just ignore it. */ if (enabled && !IsA(parsetree, ExecuteStmt) && !IsA(parsetree, PrepareStmt)) { instr_time start; instr_time duration; uint64 rows; BufferUsage bufusage_start, bufusage; WalUsage walusage_start, walusage; bufusage_start = pgBufferUsage; walusage_start = pgWalUsage; INSTR_TIME_SET_CURRENT(start); nesting_level++; PG_TRY(); { if (prev_ProcessUtility) prev_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc); else standard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc); } PG_FINALLY(); { nesting_level--; } PG_END_TRY(); /* * CAUTION: do not access the *pstmt data structure again below here. * If it was a ROLLBACK or similar, that data structure may have been * freed. We must copy everything we still need into local variables, * which we did above. * * For the same reason, we can't risk restoring pstmt->queryId to its * former value, which'd otherwise be a good idea. */ INSTR_TIME_SET_CURRENT(duration); INSTR_TIME_SUBTRACT(duration, start); /* * Track the total number of rows retrieved or affected by the utility * statements of COPY, FETCH, CREATE TABLE AS, CREATE MATERIALIZED * VIEW, REFRESH MATERIALIZED VIEW and SELECT INTO. */ rows = (qc && (qc->commandTag == CMDTAG_COPY || qc->commandTag == CMDTAG_FETCH || qc->commandTag == CMDTAG_SELECT || qc->commandTag == CMDTAG_REFRESH_MATERIALIZED_VIEW)) ? qc->nprocessed : 0; /* calc differences of buffer counters. */ memset(&bufusage, 0, sizeof(BufferUsage)); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); /* calc differences of WAL counters. */ memset(&walusage, 0, sizeof(WalUsage)); WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start); pgss_store(queryString, saved_queryId, saved_stmt_location, saved_stmt_len, PGSS_EXEC, INSTR_TIME_GET_MILLISEC(duration), rows, &bufusage, &walusage, NULL, NULL, 0, 0, pstmt->planOrigin); } else { /* * Even though we're not tracking execution time for this statement, * we must still increment the nesting level, to ensure that functions * evaluated within it are not seen as top-level calls. But don't do * so for EXECUTE; that way, when control reaches pgss_planner or * pgss_ExecutorStart, we will treat the costs as top-level if * appropriate. Likewise, don't bump for PREPARE, so that parse * analysis will treat the statement as top-level if appropriate. * * To be absolutely certain we don't mess up the nesting level, * evaluate the bump_level condition just once. */ bool bump_level = !IsA(parsetree, ExecuteStmt) && !IsA(parsetree, PrepareStmt); if (bump_level) nesting_level++; PG_TRY(); { if (prev_ProcessUtility) prev_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc); else standard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc); } PG_FINALLY(); { if (bump_level) nesting_level--; } PG_END_TRY(); } } /* * Store some statistics for a statement. * * If jstate is not NULL then we're trying to create an entry for which * we have no statistics as yet; we just want to record the normalized * query string. total_time, rows, bufusage and walusage are ignored in this * case. * * If kind is PGSS_PLAN or PGSS_EXEC, its value is used as the array position * for the arrays in the Counters field. */ static void pgss_store(const char *query, int64 queryId, int query_location, int query_len, pgssStoreKind kind, double total_time, uint64 rows, const BufferUsage *bufusage, const WalUsage *walusage, const struct JitInstrumentation *jitusage, JumbleState *jstate, int parallel_workers_to_launch, int parallel_workers_launched, PlannedStmtOrigin planOrigin) { pgssHashKey key; pgssEntry *entry; char *norm_query = NULL; int encoding = GetDatabaseEncoding(); Assert(query != NULL); /* Safety check... */ if (!pgss || !pgss_hash) return; /* * Nothing to do if compute_query_id isn't enabled and no other module * computed a query identifier. */ if (queryId == INT64CONST(0)) return; /* * Confine our attention to the relevant part of the string, if the query * is a portion of a multi-statement source string, and update query * location and length if needed. */ query = CleanQuerytext(query, &query_location, &query_len); /* Set up key for hashtable search */ /* clear padding */ memset(&key, 0, sizeof(pgssHashKey)); key.userid = GetUserId(); key.dbid = MyDatabaseId; key.queryid = queryId; key.toplevel = (nesting_level == 0); /* Lookup the hash table entry with shared lock. */ LWLockAcquire(pgss->lock, LW_SHARED); entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL); /* Create new entry, if not present */ if (!entry) { Size query_offset; int gc_count; bool stored; bool do_gc; /* * Create a new, normalized query string if caller asked. We don't * need to hold the lock while doing this work. (Note: in any case, * it's possible that someone else creates a duplicate hashtable entry * in the interval where we don't hold the lock below. That case is * handled by entry_alloc.) */ if (jstate) { LWLockRelease(pgss->lock); norm_query = generate_normalized_query(jstate, query, query_location, &query_len); LWLockAcquire(pgss->lock, LW_SHARED); } /* Append new query text to file with only shared lock held */ stored = qtext_store(norm_query ? norm_query : query, query_len, &query_offset, &gc_count); /* * Determine whether we need to garbage collect external query texts * while the shared lock is still held. This micro-optimization * avoids taking the time to decide this while holding exclusive lock. */ do_gc = need_gc_qtexts(); /* Need exclusive lock to make a new hashtable entry - promote */ LWLockRelease(pgss->lock); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); /* * A garbage collection may have occurred while we weren't holding the * lock. In the unlikely event that this happens, the query text we * stored above will have been garbage collected, so write it again. * This should be infrequent enough that doing it while holding * exclusive lock isn't a performance problem. */ if (!stored || pgss->gc_count != gc_count) stored = qtext_store(norm_query ? norm_query : query, query_len, &query_offset, NULL); /* If we failed to write to the text file, give up */ if (!stored) goto done; /* OK to create a new hashtable entry */ entry = entry_alloc(&key, query_offset, query_len, encoding, jstate != NULL); /* If needed, perform garbage collection while exclusive lock held */ if (do_gc) gc_qtexts(); } /* Increment the counts, except when jstate is not NULL */ if (!jstate) { Assert(kind == PGSS_PLAN || kind == PGSS_EXEC); /* * Grab the spinlock while updating the counters (see comment about * locking rules at the head of the file) */ SpinLockAcquire(&entry->mutex); /* "Unstick" entry if it was previously sticky */ if (IS_STICKY(entry->counters)) entry->counters.usage = USAGE_INIT; entry->counters.calls[kind] += 1; entry->counters.total_time[kind] += total_time; if (entry->counters.calls[kind] == 1) { entry->counters.min_time[kind] = total_time; entry->counters.max_time[kind] = total_time; entry->counters.mean_time[kind] = total_time; } else { /* * Welford's method for accurately computing variance. See * <http://www.johndcook.com/blog/standard_deviation/> */ double old_mean = entry->counters.mean_time[kind]; entry->counters.mean_time[kind] += (total_time - old_mean) / entry->counters.calls[kind]; entry->counters.sum_var_time[kind] += (total_time - old_mean) * (total_time - entry->counters.mean_time[kind]); /* * Calculate min and max time. min = 0 and max = 0 means that the * min/max statistics were reset */ if (entry->counters.min_time[kind] == 0 && entry->counters.max_time[kind] == 0) { entry->counters.min_time[kind] = total_time; entry->counters.max_time[kind] = total_time; } else { if (entry->counters.min_time[kind] > total_time) entry->counters.min_time[kind] = total_time; if (entry->counters.max_time[kind] < total_time) entry->counters.max_time[kind] = total_time; } } entry->counters.rows += rows; entry->counters.shared_blks_hit += bufusage->shared_blks_hit; entry->counters.shared_blks_read += bufusage->shared_blks_read; entry->counters.shared_blks_dirtied += bufusage->shared_blks_dirtied; entry->counters.shared_blks_written += bufusage->shared_blks_written; entry->counters.local_blks_hit += bufusage->local_blks_hit; entry->counters.local_blks_read += bufusage->local_blks_read; entry->counters.local_blks_dirtied += bufusage->local_blks_dirtied; entry->counters.local_blks_written += bufusage->local_blks_written; entry->counters.temp_blks_read += bufusage->temp_blks_read; entry->counters.temp_blks_written += bufusage->temp_blks_written; entry->counters.shared_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->shared_blk_read_time); entry->counters.shared_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->shared_blk_write_time); entry->counters.local_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->local_blk_read_time); entry->counters.local_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->local_blk_write_time); entry->counters.temp_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->temp_blk_read_time); entry->counters.temp_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->temp_blk_write_time); entry->counters.usage += USAGE_EXEC(total_time); entry->counters.wal_records += walusage->wal_records; entry->counters.wal_fpi += walusage->wal_fpi; entry->counters.wal_bytes += walusage->wal_bytes; entry->counters.wal_buffers_full += walusage->wal_buffers_full; if (jitusage) { entry->counters.jit_functions += jitusage->created_functions; entry->counters.jit_generation_time += INSTR_TIME_GET_MILLISEC(jitusage->generation_counter); if (INSTR_TIME_GET_MILLISEC(jitusage->deform_counter)) entry->counters.jit_deform_count++; entry->counters.jit_deform_time += INSTR_TIME_GET_MILLISEC(jitusage->deform_counter); if (INSTR_TIME_GET_MILLISEC(jitusage->inlining_counter)) entry->counters.jit_inlining_count++; entry->counters.jit_inlining_time += INSTR_TIME_GET_MILLISEC(jitusage->inlining_counter); if (INSTR_TIME_GET_MILLISEC(jitusage->optimization_counter)) entry->counters.jit_optimization_count++; entry->counters.jit_optimization_time += INSTR_TIME_GET_MILLISEC(jitusage->optimization_counter); if (INSTR_TIME_GET_MILLISEC(jitusage->emission_counter)) entry->counters.jit_emission_count++; entry->counters.jit_emission_time += INSTR_TIME_GET_MILLISEC(jitusage->emission_counter); } /* parallel worker counters */ entry->counters.parallel_workers_to_launch += parallel_workers_to_launch; entry->counters.parallel_workers_launched += parallel_workers_launched; /* plan cache counters */ if (planOrigin == PLAN_STMT_CACHE_GENERIC) entry->counters.generic_plan_calls++; else if (planOrigin == PLAN_STMT_CACHE_CUSTOM) entry->counters.custom_plan_calls++; SpinLockRelease(&entry->mutex); } done: LWLockRelease(pgss->lock); /* We postpone this clean-up until we're out of the lock */ if (norm_query) pfree(norm_query); } /* * Reset statement statistics corresponding to userid, dbid, and queryid. */ Datum pg_stat_statements_reset_1_7(PG_FUNCTION_ARGS) { Oid userid; Oid dbid; int64 queryid; userid = PG_GETARG_OID(0); dbid = PG_GETARG_OID(1); queryid = PG_GETARG_INT64(2); entry_reset(userid, dbid, queryid, false); PG_RETURN_VOID(); } Datum pg_stat_statements_reset_1_11(PG_FUNCTION_ARGS) { Oid userid; Oid dbid; int64 queryid; bool minmax_only; userid = PG_GETARG_OID(0); dbid = PG_GETARG_OID(1); queryid = PG_GETARG_INT64(2); minmax_only = PG_GETARG_BOOL(3); PG_RETURN_TIMESTAMPTZ(entry_reset(userid, dbid, queryid, minmax_only)); } /* * Reset statement statistics. */ Datum pg_stat_statements_reset(PG_FUNCTION_ARGS) { entry_reset(0, 0, 0, false); PG_RETURN_VOID(); } /* Number of output arguments (columns) for various API versions */ #define PG_STAT_STATEMENTS_COLS_V1_0 14 #define PG_STAT_STATEMENTS_COLS_V1_1 18 #define PG_STAT_STATEMENTS_COLS_V1_2 19 #define PG_STAT_STATEMENTS_COLS_V1_3 23 #define PG_STAT_STATEMENTS_COLS_V1_8 32 #define PG_STAT_STATEMENTS_COLS_V1_9 33 #define PG_STAT_STATEMENTS_COLS_V1_10 43 #define PG_STAT_STATEMENTS_COLS_V1_11 49 #define PG_STAT_STATEMENTS_COLS_V1_12 52 #define PG_STAT_STATEMENTS_COLS_V1_13 54 #define PG_STAT_STATEMENTS_COLS 54 /* maximum of above */ /* * Retrieve statement statistics. * * The SQL API of this function has changed multiple times, and will likely * do so again in future. To support the case where a newer version of this * loadable module is being used with an old SQL declaration of the function, * we continue to support the older API versions. For 1.2 and later, the * expected API version is identified by embedding it in the C name of the * function. Unfortunately we weren't bright enough to do that for 1.1. */ Datum pg_stat_statements_1_13(PG_FUNCTION_ARGS) { bool showtext = PG_GETARG_BOOL(0); pg_stat_statements_internal(fcinfo, PGSS_V1_13, showtext); return (Datum) 0; } Datum pg_stat_statements_1_12(PG_FUNCTION_ARGS) { bool showtext = PG_GETARG_BOOL(0); pg_stat_statements_internal(fcinfo, PGSS_V1_12, showtext); return (Datum) 0; } Datum pg_stat_statements_1_11(PG_FUNCTION_ARGS) { bool showtext = PG_GETARG_BOOL(0); pg_stat_statements_internal(fcinfo, PGSS_V1_11, showtext); return (Datum) 0; } Datum pg_stat_statements_1_10(PG_FUNCTION_ARGS) { bool showtext = PG_GETARG_BOOL(0); pg_stat_statements_internal(fcinfo, PGSS_V1_10, showtext); return (Datum) 0; } Datum pg_stat_statements_1_9(PG_FUNCTION_ARGS) { bool showtext = PG_GETARG_BOOL(0); pg_stat_statements_internal(fcinfo, PGSS_V1_9, showtext); return (Datum) 0; } Datum pg_stat_statements_1_8(PG_FUNCTION_ARGS) { bool showtext = PG_GETARG_BOOL(0); pg_stat_statements_internal(fcinfo, PGSS_V1_8, showtext); return (Datum) 0; } Datum pg_stat_statements_1_3(PG_FUNCTION_ARGS) { bool showtext = PG_GETARG_BOOL(0); pg_stat_statements_internal(fcinfo, PGSS_V1_3, showtext); return (Datum) 0; } Datum pg_stat_statements_1_2(PG_FUNCTION_ARGS) { bool showtext = PG_GETARG_BOOL(0); pg_stat_statements_internal(fcinfo, PGSS_V1_2, showtext); return (Datum) 0; } /* * Legacy entry point for pg_stat_statements() API versions 1.0 and 1.1. * This can be removed someday, perhaps. */ Datum pg_stat_statements(PG_FUNCTION_ARGS) { /* If it's really API 1.1, we'll figure that out below */ pg_stat_statements_internal(fcinfo, PGSS_V1_0, true); return (Datum) 0; } /* Common code for all versions of pg_stat_statements() */ static void pg_stat_statements_internal(FunctionCallInfo fcinfo, pgssVersion api_version, bool showtext) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; Oid userid = GetUserId(); bool is_allowed_role = false; char *qbuffer = NULL; Size qbuffer_size = 0; Size extent = 0; int gc_count = 0; HASH_SEQ_STATUS hash_seq; pgssEntry *entry; /* * Superusers or roles with the privileges of pg_read_all_stats members * are allowed */ is_allowed_role = has_privs_of_role(userid, ROLE_PG_READ_ALL_STATS); /* hash table must exist already */ if (!pgss || !pgss_hash) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\""))); InitMaterializedSRF(fcinfo, 0); /* * Check we have the expected number of output arguments. Aside from * being a good safety check, we need a kluge here to detect API version * 1.1, which was wedged into the code in an ill-considered way. */ switch (rsinfo->setDesc->natts) { case PG_STAT_STATEMENTS_COLS_V1_0: if (api_version != PGSS_V1_0) elog(ERROR, "incorrect number of output arguments"); break; case PG_STAT_STATEMENTS_COLS_V1_1: /* pg_stat_statements() should have told us 1.0 */ if (api_version != PGSS_V1_0) elog(ERROR, "incorrect number of output arguments"); api_version = PGSS_V1_1; break; case PG_STAT_STATEMENTS_COLS_V1_2: if (api_version != PGSS_V1_2) elog(ERROR, "incorrect number of output arguments"); break; case PG_STAT_STATEMENTS_COLS_V1_3: if (api_version != PGSS_V1_3) elog(ERROR, "incorrect number of output arguments"); break; case PG_STAT_STATEMENTS_COLS_V1_8: if (api_version != PGSS_V1_8) elog(ERROR, "incorrect number of output arguments"); break; case PG_STAT_STATEMENTS_COLS_V1_9: if (api_version != PGSS_V1_9) elog(ERROR, "incorrect number of output arguments"); break; case PG_STAT_STATEMENTS_COLS_V1_10: if (api_version != PGSS_V1_10) elog(ERROR, "incorrect number of output arguments"); break; case PG_STAT_STATEMENTS_COLS_V1_11: if (api_version != PGSS_V1_11) elog(ERROR, "incorrect number of output arguments"); break; case PG_STAT_STATEMENTS_COLS_V1_12: if (api_version != PGSS_V1_12) elog(ERROR, "incorrect number of output arguments"); break; case PG_STAT_STATEMENTS_COLS_V1_13: if (api_version != PGSS_V1_13) elog(ERROR, "incorrect number of output arguments"); break; default: elog(ERROR, "incorrect number of output arguments"); } /* * We'd like to load the query text file (if needed) while not holding any * lock on pgss->lock. In the worst case we'll have to do this again * after we have the lock, but it's unlikely enough to make this a win * despite occasional duplicated work. We need to reload if anybody * writes to the file (either a retail qtext_store(), or a garbage * collection) between this point and where we've gotten shared lock. If * a qtext_store is actually in progress when we look, we might as well * skip the speculative load entirely. */ if (showtext) { int n_writers; /* Take the mutex so we can examine variables */ SpinLockAcquire(&pgss->mutex); extent = pgss->extent; n_writers = pgss->n_writers; gc_count = pgss->gc_count; SpinLockRelease(&pgss->mutex); /* No point in loading file now if there are active writers */ if (n_writers == 0) qbuffer = qtext_load_file(&qbuffer_size); } /* * Get shared lock, load or reload the query text file if we must, and * iterate over the hashtable entries. * * With a large hash table, we might be holding the lock rather longer * than one could wish. However, this only blocks creation of new hash * table entries, and the larger the hash table the less likely that is to * be needed. So we can hope this is okay. Perhaps someday we'll decide * we need to partition the hash table to limit the time spent holding any * one lock. */ LWLockAcquire(pgss->lock, LW_SHARED); if (showtext) { /* * Here it is safe to examine extent and gc_count without taking the * mutex. Note that although other processes might change * pgss->extent just after we look at it, the strings they then write * into the file cannot yet be referenced in the hashtable, so we * don't care whether we see them or not. * * If qtext_load_file fails, we just press on; we'll return NULL for * every query text. */ if (qbuffer == NULL || pgss->extent != extent || pgss->gc_count != gc_count) { free(qbuffer); qbuffer = qtext_load_file(&qbuffer_size); } } hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { Datum values[PG_STAT_STATEMENTS_COLS]; bool nulls[PG_STAT_STATEMENTS_COLS]; int i = 0; Counters tmp; double stddev; int64 queryid = entry->key.queryid; TimestampTz stats_since; TimestampTz minmax_stats_since; memset(values, 0, sizeof(values)); memset(nulls, 0, sizeof(nulls)); values[i++] = ObjectIdGetDatum(entry->key.userid); values[i++] = ObjectIdGetDatum(entry->key.dbid); if (api_version >= PGSS_V1_9) values[i++] = BoolGetDatum(entry->key.toplevel); if (is_allowed_role || entry->key.userid == userid) { if (api_version >= PGSS_V1_2) values[i++] = Int64GetDatumFast(queryid); if (showtext) { char *qstr = qtext_fetch(entry->query_offset, entry->query_len, qbuffer, qbuffer_size); if (qstr) { char *enc; enc = pg_any_to_server(qstr, entry->query_len, entry->encoding); values[i++] = CStringGetTextDatum(enc); if (enc != qstr) pfree(enc); } else { /* Just return a null if we fail to find the text */ nulls[i++] = true; } } else { /* Query text not requested */ nulls[i++] = true; } } else { /* Don't show queryid */ if (api_version >= PGSS_V1_2) nulls[i++] = true; /* * Don't show query text, but hint as to the reason for not doing * so if it was requested */ if (showtext) values[i++] = CStringGetTextDatum("<insufficient privilege>"); else nulls[i++] = true; } /* copy counters to a local variable to keep locking time short */ SpinLockAcquire(&entry->mutex); tmp = entry->counters; SpinLockRelease(&entry->mutex); /* * The spinlock is not required when reading these two as they are * always updated when holding pgss->lock exclusively. */ stats_since = entry->stats_since; minmax_stats_since = entry->minmax_stats_since; /* Skip entry if unexecuted (ie, it's a pending "sticky" entry) */ if (IS_STICKY(tmp)) continue; /* Note that we rely on PGSS_PLAN being 0 and PGSS_EXEC being 1. */ for (int kind = 0; kind < PGSS_NUMKIND; kind++) { if (kind == PGSS_EXEC || api_version >= PGSS_V1_8) { values[i++] = Int64GetDatumFast(tmp.calls[kind]); values[i++] = Float8GetDatumFast(tmp.total_time[kind]); } if ((kind == PGSS_EXEC && api_version >= PGSS_V1_3) || api_version >= PGSS_V1_8) { values[i++] = Float8GetDatumFast(tmp.min_time[kind]); values[i++] = Float8GetDatumFast(tmp.max_time[kind]); values[i++] = Float8GetDatumFast(tmp.mean_time[kind]); /* * Note we are calculating the population variance here, not * the sample variance, as we have data for the whole * population, so Bessel's correction is not used, and we * don't divide by tmp.calls - 1. */ if (tmp.calls[kind] > 1) stddev = sqrt(tmp.sum_var_time[kind] / tmp.calls[kind]); else stddev = 0.0; values[i++] = Float8GetDatumFast(stddev); } } values[i++] = Int64GetDatumFast(tmp.rows); values[i++] = Int64GetDatumFast(tmp.shared_blks_hit); values[i++] = Int64GetDatumFast(tmp.shared_blks_read); if (api_version >= PGSS_V1_1) values[i++] = Int64GetDatumFast(tmp.shared_blks_dirtied); values[i++] = Int64GetDatumFast(tmp.shared_blks_written); values[i++] = Int64GetDatumFast(tmp.local_blks_hit); values[i++] = Int64GetDatumFast(tmp.local_blks_read); if (api_version >= PGSS_V1_1) values[i++] = Int64GetDatumFast(tmp.local_blks_dirtied); values[i++] = Int64GetDatumFast(tmp.local_blks_written); values[i++] = Int64GetDatumFast(tmp.temp_blks_read); values[i++] = Int64GetDatumFast(tmp.temp_blks_written); if (api_version >= PGSS_V1_1) { values[i++] = Float8GetDatumFast(tmp.shared_blk_read_time); values[i++] = Float8GetDatumFast(tmp.shared_blk_write_time); } if (api_version >= PGSS_V1_11) { values[i++] = Float8GetDatumFast(tmp.local_blk_read_time); values[i++] = Float8GetDatumFast(tmp.local_blk_write_time); } if (api_version >= PGSS_V1_10) { values[i++] = Float8GetDatumFast(tmp.temp_blk_read_time); values[i++] = Float8GetDatumFast(tmp.temp_blk_write_time); } if (api_version >= PGSS_V1_8) { char buf[256]; Datum wal_bytes; values[i++] = Int64GetDatumFast(tmp.wal_records); values[i++] = Int64GetDatumFast(tmp.wal_fpi); snprintf(buf, sizeof buf, UINT64_FORMAT, tmp.wal_bytes); /* Convert to numeric. */ wal_bytes = DirectFunctionCall3(numeric_in, CStringGetDatum(buf), ObjectIdGetDatum(0), Int32GetDatum(-1)); values[i++] = wal_bytes; } if (api_version >= PGSS_V1_12) { values[i++] = Int64GetDatumFast(tmp.wal_buffers_full); } if (api_version >= PGSS_V1_10) { values[i++] = Int64GetDatumFast(tmp.jit_functions); values[i++] = Float8GetDatumFast(tmp.jit_generation_time); values[i++] = Int64GetDatumFast(tmp.jit_inlining_count); values[i++] = Float8GetDatumFast(tmp.jit_inlining_time); values[i++] = Int64GetDatumFast(tmp.jit_optimization_count); values[i++] = Float8GetDatumFast(tmp.jit_optimization_time); values[i++] = Int64GetDatumFast(tmp.jit_emission_count); values[i++] = Float8GetDatumFast(tmp.jit_emission_time); } if (api_version >= PGSS_V1_11) { values[i++] = Int64GetDatumFast(tmp.jit_deform_count); values[i++] = Float8GetDatumFast(tmp.jit_deform_time); } if (api_version >= PGSS_V1_12) { values[i++] = Int64GetDatumFast(tmp.parallel_workers_to_launch); values[i++] = Int64GetDatumFast(tmp.parallel_workers_launched); } if (api_version >= PGSS_V1_13) { values[i++] = Int64GetDatumFast(tmp.generic_plan_calls); values[i++] = Int64GetDatumFast(tmp.custom_plan_calls); } if (api_version >= PGSS_V1_11) { values[i++] = TimestampTzGetDatum(stats_since); values[i++] = TimestampTzGetDatum(minmax_stats_since); } Assert(i == (api_version == PGSS_V1_0 ? PG_STAT_STATEMENTS_COLS_V1_0 : api_version == PGSS_V1_1 ? PG_STAT_STATEMENTS_COLS_V1_1 : api_version == PGSS_V1_2 ? PG_STAT_STATEMENTS_COLS_V1_2 : api_version == PGSS_V1_3 ? PG_STAT_STATEMENTS_COLS_V1_3 : api_version == PGSS_V1_8 ? PG_STAT_STATEMENTS_COLS_V1_8 : api_version == PGSS_V1_9 ? PG_STAT_STATEMENTS_COLS_V1_9 : api_version == PGSS_V1_10 ? PG_STAT_STATEMENTS_COLS_V1_10 : api_version == PGSS_V1_11 ? PG_STAT_STATEMENTS_COLS_V1_11 : api_version == PGSS_V1_12 ? PG_STAT_STATEMENTS_COLS_V1_12 : api_version == PGSS_V1_13 ? PG_STAT_STATEMENTS_COLS_V1_13 : -1 /* fail if you forget to update this assert */ )); tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } LWLockRelease(pgss->lock); free(qbuffer); } /* Number of output arguments (columns) for pg_stat_statements_info */ #define PG_STAT_STATEMENTS_INFO_COLS 2 /* * Return statistics of pg_stat_statements. */ Datum pg_stat_statements_info(PG_FUNCTION_ARGS) { pgssGlobalStats stats; TupleDesc tupdesc; Datum values[PG_STAT_STATEMENTS_INFO_COLS] = {0}; bool nulls[PG_STAT_STATEMENTS_INFO_COLS] = {0}; if (!pgss || !pgss_hash) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\""))); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); /* Read global statistics for pg_stat_statements */ SpinLockAcquire(&pgss->mutex); stats = pgss->stats; SpinLockRelease(&pgss->mutex); values[0] = Int64GetDatum(stats.dealloc); values[1] = TimestampTzGetDatum(stats.stats_reset); PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); } /* * Estimate shared memory space needed. */ static Size pgss_memsize(void) { Size size; size = MAXALIGN(sizeof(pgssSharedState)); size = add_size(size, hash_estimate_size(pgss_max, sizeof(pgssEntry))); return size; } /* * Allocate a new hashtable entry. * caller must hold an exclusive lock on pgss->lock * * "query" need not be null-terminated; we rely on query_len instead * * If "sticky" is true, make the new entry artificially sticky so that it will * probably still be there when the query finishes execution. We do this by * giving it a median usage value rather than the normal value. (Strictly * speaking, query strings are normalized on a best effort basis, though it * would be difficult to demonstrate this even under artificial conditions.) * * Note: despite needing exclusive lock, it's not an error for the target * entry to already exist. This is because pgss_store releases and * reacquires lock after failing to find a match; so someone else could * have made the entry while we waited to get exclusive lock. */ static pgssEntry * entry_alloc(pgssHashKey *key, Size query_offset, int query_len, int encoding, bool sticky) { pgssEntry *entry; bool found; /* Make space if needed */ while (hash_get_num_entries(pgss_hash) >= pgss_max) entry_dealloc(); /* Find or create an entry with desired hash code */ entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found); if (!found) { /* New entry, initialize it */ /* reset the statistics */ memset(&entry->counters, 0, sizeof(Counters)); /* set the appropriate initial usage count */ entry->counters.usage = sticky ? pgss->cur_median_usage : USAGE_INIT; /* re-initialize the mutex each time ... we assume no one using it */ SpinLockInit(&entry->mutex); /* ... and don't forget the query text metadata */ Assert(query_len >= 0); entry->query_offset = query_offset; entry->query_len = query_len; entry->encoding = encoding; entry->stats_since = GetCurrentTimestamp(); entry->minmax_stats_since = entry->stats_since; } return entry; } /* * qsort comparator for sorting into increasing usage order */ static int entry_cmp(const void *lhs, const void *rhs) { double l_usage = (*(pgssEntry *const *) lhs)->counters.usage; double r_usage = (*(pgssEntry *const *) rhs)->counters.usage; if (l_usage < r_usage) return -1; else if (l_usage > r_usage) return +1; else return 0; } /* * Deallocate least-used entries. * * Caller must hold an exclusive lock on pgss->lock. */ static void entry_dealloc(void) { HASH_SEQ_STATUS hash_seq; pgssEntry **entries; pgssEntry *entry; int nvictims; int i; Size tottextlen; int nvalidtexts; /* * Sort entries by usage and deallocate USAGE_DEALLOC_PERCENT of them. * While we're scanning the table, apply the decay factor to the usage * values, and update the mean query length. * * Note that the mean query length is almost immediately obsolete, since * we compute it before not after discarding the least-used entries. * Hopefully, that doesn't affect the mean too much; it doesn't seem worth * making two passes to get a more current result. Likewise, the new * cur_median_usage includes the entries we're about to zap. */ entries = palloc(hash_get_num_entries(pgss_hash) * sizeof(pgssEntry *)); i = 0; tottextlen = 0; nvalidtexts = 0; hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { entries[i++] = entry; /* "Sticky" entries get a different usage decay rate. */ if (IS_STICKY(entry->counters)) entry->counters.usage *= STICKY_DECREASE_FACTOR; else entry->counters.usage *= USAGE_DECREASE_FACTOR; /* In the mean length computation, ignore dropped texts. */ if (entry->query_len >= 0) { tottextlen += entry->query_len + 1; nvalidtexts++; } } /* Sort into increasing order by usage */ qsort(entries, i, sizeof(pgssEntry *), entry_cmp); /* Record the (approximate) median usage */ if (i > 0) pgss->cur_median_usage = entries[i / 2]->counters.usage; /* Record the mean query length */ if (nvalidtexts > 0) pgss->mean_query_len = tottextlen / nvalidtexts; else pgss->mean_query_len = ASSUMED_LENGTH_INIT; /* Now zap an appropriate fraction of lowest-usage entries */ nvictims = Max(10, i * USAGE_DEALLOC_PERCENT / 100); nvictims = Min(nvictims, i); for (i = 0; i < nvictims; i++) { hash_search(pgss_hash, &entries[i]->key, HASH_REMOVE, NULL); } pfree(entries); /* Increment the number of times entries are deallocated */ SpinLockAcquire(&pgss->mutex); pgss->stats.dealloc += 1; SpinLockRelease(&pgss->mutex); } /* * Given a query string (not necessarily null-terminated), allocate a new * entry in the external query text file and store the string there. * * If successful, returns true, and stores the new entry's offset in the file * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the * number of garbage collections that have occurred so far. * * On failure, returns false. * * At least a shared lock on pgss->lock must be held by the caller, so as * to prevent a concurrent garbage collection. Share-lock-holding callers * should pass a gc_count pointer to obtain the number of garbage collections, * so that they can recheck the count after obtaining exclusive lock to * detect whether a garbage collection occurred (and removed this entry). */ static bool qtext_store(const char *query, int query_len, Size *query_offset, int *gc_count) { Size off; int fd; /* * We use a spinlock to protect extent/n_writers/gc_count, so that * multiple processes may execute this function concurrently. */ SpinLockAcquire(&pgss->mutex); off = pgss->extent; pgss->extent += query_len + 1; pgss->n_writers++; if (gc_count) *gc_count = pgss->gc_count; SpinLockRelease(&pgss->mutex); *query_offset = off; /* * Don't allow the file to grow larger than what qtext_load_file can * (theoretically) handle. This has been seen to be reachable on 32-bit * platforms. */ if (unlikely(query_len >= MaxAllocHugeSize - off)) { errno = EFBIG; /* not quite right, but it'll do */ fd = -1; goto error; } /* Now write the data into the successfully-reserved part of the file */ fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDWR | O_CREAT | PG_BINARY); if (fd < 0) goto error; if (pg_pwrite(fd, query, query_len, off) != query_len) goto error; if (pg_pwrite(fd, "\0", 1, off + query_len) != 1) goto error; CloseTransientFile(fd); /* Mark our write complete */ SpinLockAcquire(&pgss->mutex); pgss->n_writers--; SpinLockRelease(&pgss->mutex); return true; error: ereport(LOG, (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); if (fd >= 0) CloseTransientFile(fd); /* Mark our write complete */ SpinLockAcquire(&pgss->mutex); pgss->n_writers--; SpinLockRelease(&pgss->mutex); return false; } /* * Read the external query text file into a malloc'd buffer. * * Returns NULL (without throwing an error) if unable to read, eg * file not there or insufficient memory. * * On success, the buffer size is also returned into *buffer_size. * * This can be called without any lock on pgss->lock, but in that case * the caller is responsible for verifying that the result is sane. */ static char * qtext_load_file(Size *buffer_size) { char *buf; int fd; struct stat stat; Size nread; fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDONLY | PG_BINARY); if (fd < 0) { if (errno != ENOENT) ereport(LOG, (errcode_for_file_access(), errmsg("could not read file \"%s\": %m", PGSS_TEXT_FILE))); return NULL; } /* Get file length */ if (fstat(fd, &stat)) { ereport(LOG, (errcode_for_file_access(), errmsg("could not stat file \"%s\": %m", PGSS_TEXT_FILE))); CloseTransientFile(fd); return NULL; } /* Allocate buffer; beware that off_t might be wider than size_t */ if (stat.st_size <= MaxAllocHugeSize) buf = (char *) malloc(stat.st_size); else buf = NULL; if (buf == NULL) { ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"), errdetail("Could not allocate enough memory to read file \"%s\".", PGSS_TEXT_FILE))); CloseTransientFile(fd); return NULL; } /* * OK, slurp in the file. Windows fails if we try to read more than * INT_MAX bytes at once, and other platforms might not like that either, * so read a very large file in 1GB segments. */ nread = 0; while (nread < stat.st_size) { int toread = Min(1024 * 1024 * 1024, stat.st_size - nread); /* * If we get a short read and errno doesn't get set, the reason is * probably that garbage collection truncated the file since we did * the fstat(), so we don't log a complaint --- but we don't return * the data, either, since it's most likely corrupt due to concurrent * writes from garbage collection. */ errno = 0; if (read(fd, buf + nread, toread) != toread) { if (errno) ereport(LOG, (errcode_for_file_access(), errmsg("could not read file \"%s\": %m", PGSS_TEXT_FILE))); free(buf); CloseTransientFile(fd); return NULL; } nread += toread; } if (CloseTransientFile(fd) != 0) ereport(LOG, (errcode_for_file_access(), errmsg("could not close file \"%s\": %m", PGSS_TEXT_FILE))); *buffer_size = nread; return buf; } /* * Locate a query text in the file image previously read by qtext_load_file(). * * We validate the given offset/length, and return NULL if bogus. Otherwise, * the result points to a null-terminated string within the buffer. */ static char * qtext_fetch(Size query_offset, int query_len, char *buffer, Size buffer_size) { /* File read failed? */ if (buffer == NULL) return NULL; /* Bogus offset/length? */ if (query_len < 0 || query_offset + query_len >= buffer_size) return NULL; /* As a further sanity check, make sure there's a trailing null */ if (buffer[query_offset + query_len] != '\0') return NULL; /* Looks OK */ return buffer + query_offset; } /* * Do we need to garbage-collect the external query text file? * * Caller should hold at least a shared lock on pgss->lock. */ static bool need_gc_qtexts(void) { Size extent; /* Read shared extent pointer */ SpinLockAcquire(&pgss->mutex); extent = pgss->extent; SpinLockRelease(&pgss->mutex); /* * Don't proceed if file does not exceed 512 bytes per possible entry. * * Here and in the next test, 32-bit machines have overflow hazards if * pgss_max and/or mean_query_len are large. Force the multiplications * and comparisons to be done in uint64 arithmetic to forestall trouble. */ if ((uint64) extent < (uint64) 512 * pgss_max) return false; /* * Don't proceed if file is less than about 50% bloat. Nothing can or * should be done in the event of unusually large query texts accounting * for file's large size. We go to the trouble of maintaining the mean * query length in order to prevent garbage collection from thrashing * uselessly. */ if ((uint64) extent < (uint64) pgss->mean_query_len * pgss_max * 2) return false; return true; } /* * Garbage-collect orphaned query texts in external file. * * This won't be called often in the typical case, since it's likely that * there won't be too much churn, and besides, a similar compaction process * occurs when serializing to disk at shutdown or as part of resetting. * Despite this, it seems prudent to plan for the edge case where the file * becomes unreasonably large, with no other method of compaction likely to * occur in the foreseeable future. * * The caller must hold an exclusive lock on pgss->lock. * * At the first sign of trouble we unlink the query text file to get a clean * slate (although existing statistics are retained), rather than risk * thrashing by allowing the same problem case to recur indefinitely. */ static void gc_qtexts(void) { char *qbuffer; Size qbuffer_size; FILE *qfile = NULL; HASH_SEQ_STATUS hash_seq; pgssEntry *entry; Size extent; int nentries; /* * When called from pgss_store, some other session might have proceeded * with garbage collection in the no-lock-held interim of lock strength * escalation. Check once more that this is actually necessary. */ if (!need_gc_qtexts()) return; /* * Load the old texts file. If we fail (out of memory, for instance), * invalidate query texts. Hopefully this is rare. It might seem better * to leave things alone on an OOM failure, but the problem is that the * file is only going to get bigger; hoping for a future non-OOM result is * risky and can easily lead to complete denial of service. */ qbuffer = qtext_load_file(&qbuffer_size); if (qbuffer == NULL) goto gc_fail; /* * We overwrite the query texts file in place, so as to reduce the risk of * an out-of-disk-space failure. Since the file is guaranteed not to get * larger, this should always work on traditional filesystems; though we * could still lose on copy-on-write filesystems. */ qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W); if (qfile == NULL) { ereport(LOG, (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); goto gc_fail; } extent = 0; nentries = 0; hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { int query_len = entry->query_len; char *qry = qtext_fetch(entry->query_offset, query_len, qbuffer, qbuffer_size); if (qry == NULL) { /* Trouble ... drop the text */ entry->query_offset = 0; entry->query_len = -1; /* entry will not be counted in mean query length computation */ continue; } if (fwrite(qry, 1, query_len + 1, qfile) != query_len + 1) { ereport(LOG, (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); hash_seq_term(&hash_seq); goto gc_fail; } entry->query_offset = extent; extent += query_len + 1; nentries++; } /* * Truncate away any now-unused space. If this fails for some odd reason, * we log it, but there's no need to fail. */ if (ftruncate(fileno(qfile), extent) != 0) ereport(LOG, (errcode_for_file_access(), errmsg("could not truncate file \"%s\": %m", PGSS_TEXT_FILE))); if (FreeFile(qfile)) { ereport(LOG, (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); qfile = NULL; goto gc_fail; } elog(DEBUG1, "pgss gc of queries file shrunk size from %zu to %zu", pgss->extent, extent); /* Reset the shared extent pointer */ pgss->extent = extent; /* * Also update the mean query length, to be sure that need_gc_qtexts() * won't still think we have a problem. */ if (nentries > 0) pgss->mean_query_len = extent / nentries; else pgss->mean_query_len = ASSUMED_LENGTH_INIT; free(qbuffer); /* * OK, count a garbage collection cycle. (Note: even though we have * exclusive lock on pgss->lock, we must take pgss->mutex for this, since * other processes may examine gc_count while holding only the mutex. * Also, we have to advance the count *after* we've rewritten the file, * else other processes might not realize they read a stale file.) */ record_gc_qtexts(); return; gc_fail: /* clean up resources */ if (qfile) FreeFile(qfile); free(qbuffer); /* * Since the contents of the external file are now uncertain, mark all * hashtable entries as having invalid texts. */ hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { entry->query_offset = 0; entry->query_len = -1; } /* * Destroy the query text file and create a new, empty one */ (void) unlink(PGSS_TEXT_FILE); qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W); if (qfile == NULL) ereport(LOG, (errcode_for_file_access(), errmsg("could not recreate file \"%s\": %m", PGSS_TEXT_FILE))); else FreeFile(qfile); /* Reset the shared extent pointer */ pgss->extent = 0; /* Reset mean_query_len to match the new state */ pgss->mean_query_len = ASSUMED_LENGTH_INIT; /* * Bump the GC count even though we failed. * * This is needed to make concurrent readers of file without any lock on * pgss->lock notice existence of new version of file. Once readers * subsequently observe a change in GC count with pgss->lock held, that * forces a safe reopen of file. Writers also require that we bump here, * of course. (As required by locking protocol, readers and writers don't * trust earlier file contents until gc_count is found unchanged after * pgss->lock acquired in shared or exclusive mode respectively.) */ record_gc_qtexts(); } #define SINGLE_ENTRY_RESET(e) \ if (e) { \ if (minmax_only) { \ /* When requested reset only min/max statistics of an entry */ \ for (int kind = 0; kind < PGSS_NUMKIND; kind++) \ { \ e->counters.max_time[kind] = 0; \ e->counters.min_time[kind] = 0; \ } \ e->minmax_stats_since = stats_reset; \ } \ else \ { \ /* Remove the key otherwise */ \ hash_search(pgss_hash, &e->key, HASH_REMOVE, NULL); \ num_remove++; \ } \ } /* * Reset entries corresponding to parameters passed. */ static TimestampTz entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only) { HASH_SEQ_STATUS hash_seq; pgssEntry *entry; FILE *qfile; int64 num_entries; int64 num_remove = 0; pgssHashKey key; TimestampTz stats_reset; if (!pgss || !pgss_hash) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\""))); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); num_entries = hash_get_num_entries(pgss_hash); stats_reset = GetCurrentTimestamp(); if (userid != 0 && dbid != 0 && queryid != INT64CONST(0)) { /* If all the parameters are available, use the fast path. */ memset(&key, 0, sizeof(pgssHashKey)); key.userid = userid; key.dbid = dbid; key.queryid = queryid; /* * Reset the entry if it exists, starting with the non-top-level * entry. */ key.toplevel = false; entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL); SINGLE_ENTRY_RESET(entry); /* Also reset the top-level entry if it exists. */ key.toplevel = true; entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL); SINGLE_ENTRY_RESET(entry); } else if (userid != 0 || dbid != 0 || queryid != INT64CONST(0)) { /* Reset entries corresponding to valid parameters. */ hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { if ((!userid || entry->key.userid == userid) && (!dbid || entry->key.dbid == dbid) && (!queryid || entry->key.queryid == queryid)) { SINGLE_ENTRY_RESET(entry); } } } else { /* Reset all entries. */ hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { SINGLE_ENTRY_RESET(entry); } } /* All entries are removed? */ if (num_entries != num_remove) goto release_lock; /* * Reset global statistics for pg_stat_statements since all entries are * removed. */ SpinLockAcquire(&pgss->mutex); pgss->stats.dealloc = 0; pgss->stats.stats_reset = stats_reset; SpinLockRelease(&pgss->mutex); /* * Write new empty query file, perhaps even creating a new one to recover * if the file was missing. */ qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W); if (qfile == NULL) { ereport(LOG, (errcode_for_file_access(), errmsg("could not create file \"%s\": %m", PGSS_TEXT_FILE))); goto done; } /* If ftruncate fails, log it, but it's not a fatal problem */ if (ftruncate(fileno(qfile), 0) != 0) ereport(LOG, (errcode_for_file_access(), errmsg("could not truncate file \"%s\": %m", PGSS_TEXT_FILE))); FreeFile(qfile); done: pgss->extent = 0; /* This counts as a query text garbage collection for our purposes */ record_gc_qtexts(); release_lock: LWLockRelease(pgss->lock); return stats_reset; } /* * Generate a normalized version of the query string that will be used to * represent all similar queries. * * Note that the normalized representation may well vary depending on * just which "equivalent" query is used to create the hashtable entry. * We assume this is OK. * * If query_loc > 0, then "query" has been advanced by that much compared to * the original string start, so we need to translate the provided locations * to compensate. (This lets us avoid re-scanning statements before the one * of interest, so it's worth doing.) * * *query_len_p contains the input string length, and is updated with * the result string length on exit. The resulting string might be longer * or shorter depending on what happens with replacement of constants. * * Returns a palloc'd string. */ static char * generate_normalized_query(JumbleState *jstate, const char *query, int query_loc, int *query_len_p) { char *norm_query; int query_len = *query_len_p; int norm_query_buflen, /* Space allowed for norm_query */ len_to_wrt, /* Length (in bytes) to write */ quer_loc = 0, /* Source query byte location */ n_quer_loc = 0, /* Normalized query byte location */ last_off = 0, /* Offset from start for previous tok */ last_tok_len = 0; /* Length (in bytes) of that tok */ int num_constants_replaced = 0; /* * Get constants' lengths (core system only gives us locations). Note * this also ensures the items are sorted by location. */ fill_in_constant_lengths(jstate, query, query_loc); /* * Allow for $n symbols to be longer than the constants they replace. * Constants must take at least one byte in text form, while a $n symbol * certainly isn't more than 11 bytes, even if n reaches INT_MAX. We * could refine that limit based on the max value of n for the current * query, but it hardly seems worth any extra effort to do so. */ norm_query_buflen = query_len + jstate->clocations_count * 10; /* Allocate result buffer */ norm_query = palloc(norm_query_buflen + 1); for (int i = 0; i < jstate->clocations_count; i++) { int off, /* Offset from start for cur tok */ tok_len; /* Length (in bytes) of that tok */ /* * If we have an external param at this location, but no lists are * being squashed across the query, then we skip here; this will make * us print the characters found in the original query that represent * the parameter in the next iteration (or after the loop is done), * which is a bit odd but seems to work okay in most cases. */ if (jstate->clocations[i].extern_param && !jstate->has_squashed_lists) continue; off = jstate->clocations[i].location; /* Adjust recorded location if we're dealing with partial string */ off -= query_loc; tok_len = jstate->clocations[i].length; if (tok_len < 0) continue; /* ignore any duplicates */ /* Copy next chunk (what precedes the next constant) */ len_to_wrt = off - last_off; len_to_wrt -= last_tok_len; Assert(len_to_wrt >= 0); memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt); n_quer_loc += len_to_wrt; /* * And insert a param symbol in place of the constant token; and, if * we have a squashable list, insert a placeholder comment starting * from the list's second value. */ n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d%s", num_constants_replaced + 1 + jstate->highest_extern_param_id, jstate->clocations[i].squashed ? " /*, ... */" : ""); num_constants_replaced++; /* move forward */ quer_loc = off + tok_len; last_off = off; last_tok_len = tok_len; } /* * We've copied up until the last ignorable constant. Copy over the * remaining bytes of the original query string. */ len_to_wrt = query_len - quer_loc; Assert(len_to_wrt >= 0); memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt); n_quer_loc += len_to_wrt; Assert(n_quer_loc <= norm_query_buflen); norm_query[n_quer_loc] = '\0'; *query_len_p = n_quer_loc; return norm_query; } /* * Given a valid SQL string and an array of constant-location records, * fill in the textual lengths of those constants. * * The constants may use any allowed constant syntax, such as float literals, * bit-strings, single-quoted strings and dollar-quoted strings. This is * accomplished by using the public API for the core scanner. * * It is the caller's job to ensure that the string is a valid SQL statement * with constants at the indicated locations. Since in practice the string * has already been parsed, and the locations that the caller provides will * have originated from within the authoritative parser, this should not be * a problem. * * Multiple constants can have the same location. We reset lengths of those * past the first to -1 so that they can later be ignored. * * If query_loc > 0, then "query" has been advanced by that much compared to * the original string start, so we need to translate the provided locations * to compensate. (This lets us avoid re-scanning statements before the one * of interest, so it's worth doing.) * * N.B. There is an assumption that a '-' character at a Const location begins * a negative numeric constant. This precludes there ever being another * reason for a constant to start with a '-'. */ static void fill_in_constant_lengths(JumbleState *jstate, const char *query, int query_loc) { LocationLen *locs; core_yyscan_t yyscanner; core_yy_extra_type yyextra; core_YYSTYPE yylval; YYLTYPE yylloc; /* * Sort the records by location so that we can process them in order while * scanning the query text. */ if (jstate->clocations_count > 1) qsort(jstate->clocations, jstate->clocations_count, sizeof(LocationLen), comp_location); locs = jstate->clocations; /* initialize the flex scanner --- should match raw_parser() */ yyscanner = scanner_init(query, &yyextra, &ScanKeywords, ScanKeywordTokens); /* Search for each constant, in sequence */ for (int i = 0; i < jstate->clocations_count; i++) { int loc; int tok; /* Ignore constants after the first one in the same location */ if (i > 0 && locs[i].location == locs[i - 1].location) { locs[i].length = -1; continue; } if (locs[i].squashed) continue; /* squashable list, ignore */ /* Adjust recorded location if we're dealing with partial string */ loc = locs[i].location - query_loc; Assert(loc >= 0); /* * We have a valid location for a constant that's not a dupe. Lex * tokens until we find the desired constant. */ for (;;) { tok = core_yylex(&yylval, &yylloc, yyscanner); /* We should not hit end-of-string, but if we do, behave sanely */ if (tok == 0) break; /* out of inner for-loop */ /* * We should find the token position exactly, but if we somehow * run past it, work with that. */ if (yylloc >= loc) { if (query[loc] == '-') { /* * It's a negative value - this is the one and only case * where we replace more than a single token. * * Do not compensate for the core system's special-case * adjustment of location to that of the leading '-' * operator in the event of a negative constant. It is * also useful for our purposes to start from the minus * symbol. In this way, queries like "select * from foo * where bar = 1" and "select * from foo where bar = -2" * will have identical normalized query strings. */ tok = core_yylex(&yylval, &yylloc, yyscanner); if (tok == 0) break; /* out of inner for-loop */ } /* * We now rely on the assumption that flex has placed a zero * byte after the text of the current token in scanbuf. */ locs[i].length = strlen(yyextra.scanbuf + loc); break; /* out of inner for-loop */ } } /* If we hit end-of-string, give up, leaving remaining lengths -1 */ if (tok == 0) break; } scanner_finish(yyscanner); } /* * comp_location: comparator for qsorting LocationLen structs by location */ static int comp_location(const void *a, const void *b) { int l = ((const LocationLen *) a)->location; int r = ((const LocationLen *) b)->location; return pg_cmp_s32(l, r); }
c
github
https://github.com/postgres/postgres
contrib/pg_stat_statements/pg_stat_statements.c
#!/usr/bin/python2.4 # Copyright 2009 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Ajax handlers. The ajax module does not get tested and as such only contains handlers with minimum logic in it. Any complex logic is to be delegated to ajax_impl module. """ # Suppress pylint invalid import order # pylint: disable-msg=C6203 from django import http from django import template from django.utils import simplejson from core import ajax_impl from core import permissions from core import views_impl _MIME_TYPE_AJAX = 'application/javascript' @permissions.ActivityOwner def UserAttendance(request, activity, program, attended): """View handler to record user attendance. Args: request: The view Http request object. activity: models.Activity to register attendance for. program: models.Program associated with the activity. attended: String 'True' or 'False' indicating attendance. Returns: Http response. """ # The decorator provides the argument 'program'. Do not rename. # Suppress pylint invalid import order # pylint: disable-msg=W0613 result = ajax_impl.UserAttendance(activity, request.POST['emails'], attended) content = simplejson.dumps(result) return http.HttpResponse(content, _MIME_TYPE_AJAX) def ValidateEmails(request): """Returns a JSON response validating the given emails. Args: request: The Http request object. Returns: Http response. """ data = ajax_impl.ValidateEmails(request.POST['emails']) content = simplejson.dumps(data) return http.HttpResponse(content, _MIME_TYPE_AJAX) def RegisterPopupForm(request, program_key, activity_key, users=None, notify='1', force_status='0'): """Function called to render the register popup dialog during registration. Wraps the popup html that needs to be displayed in a json object and returns the json content back to the client. Args: request: Request object provided for django view functions. program_key: The program key of the activity. activity_key: The key of the activity for which the registration popup needs to render the schedules and access points information for user choices. users: String of comma separated emails to register. Registers the user who makes the request if None. notify: Will not send email notifications when notify is '0'. force_status: Will force register users when it is '1' Returns: Returns a json object serialized as a string. The json object has a 'body' attribute which is the html the popup box needs to render to display the registration choices form. """ data = ajax_impl.RegisterPopupForm(request, program_key, activity_key, users, notify, force_status) template_name = 'register_popup_form_multi.html' if 'common_access_points' in data: template_name = 'register_popup_form_single.html' context = template.Context(data) body = template.loader.render_to_string(template_name, context_instance=context) content = simplejson.dumps({'body': body}) return http.HttpResponse(content, _MIME_TYPE_AJAX) def RegisterPopupFormMultiple(request, program_key, activity_key): """Function called to render the register popup dialog during registration. Wraps the popup html that needs to be displayed in a json object and returns the json content back to the client. Used when registering multiple users. Args: request: Request object provided for django view functions. program_key: The program key of the activity. activity_key: The key of the activity for which the registration popup needs to render the schedules and access points information for user choices. Returns: Returns a json object serialized as a string. The json object has a 'body' attribute which is the html the popup box needs to render to display the registration choices form. """ return RegisterPopupForm(request, program_key, activity_key, users=request.POST['emails'], notify=request.POST['notify'], force_status=request.POST['force_status']) @permissions.ActivityOwner def DeleteActivityPopupForm(unused_request, activity, program=None): """Function called to render the delete activity popup dialog. Wraps the popup html that needs to be displayed in a json object and returns the json content back to the client. Args: activity: Activity to be deleted. program: Parent program of the activity. Returns: Returns a json object serialized as a string. The json object has a 'body' attribute which is the html the popup box needs to render. """ # The decorator provides the argument 'program'. Do not rename. # Suppress pylint invalid import order # pylint: disable-msg=W0613 data = ajax_impl.DeleteActivityPopupForm(activity) context = template.Context(data) body = template.loader.render_to_string('delete_activity_popup_form.html', context_instance=context) content = simplejson.dumps({'body': body}) return http.HttpResponse(content, _MIME_TYPE_AJAX) @permissions.ProgramOwner def DeleteProgramPopupForm(unused_request, program): """Function called to render the delete program popup dialog. Wraps the popup html that needs to be displayed in a json object and returns the json content back to the client. Args: program: models.Program to be deleted. Returns: Returns a json object serialized as a string. The json object has a 'body' attribute which is the html the popup box needs to render. """ data = ajax_impl.DeleteProgramPopupForm(program) context = template.Context(data) body = template.loader.render_to_string('delete_program_popup_form.html', context_instance=context) content = simplejson.dumps({'body': body}) return http.HttpResponse(content, _MIME_TYPE_AJAX) def UserRegister(request): """Registers users in an activity with post choices. This function supports the bulk enroll functionality of the roster. Args: request: The request that contains user registration information. Returns: A json object containing a list of emanil addresses of users successfully enrolled under the key 'enrolled'. """ if request.method == 'POST': # Only POST is supported for now. registered = views_impl.UserRegister(request.POST, request.user) emails = [guser.email for guser in registered] content = simplejson.dumps({'enrolled': emails}) return http.HttpResponse(content, _MIME_TYPE_AJAX) return http.HttpResponse
unknown
codeparrot/codeparrot-clean
// SPDX-License-Identifier: GPL-2.0 /* * lib/minmax.c: windowed min/max tracker * * Kathleen Nichols' algorithm for tracking the minimum (or maximum) * value of a data stream over some fixed time interval. (E.g., * the minimum RTT over the past five minutes.) It uses constant * space and constant time per update yet almost always delivers * the same minimum as an implementation that has to keep all the * data in the window. * * The algorithm keeps track of the best, 2nd best & 3rd best min * values, maintaining an invariant that the measurement time of * the n'th best >= n-1'th best. It also makes sure that the three * values are widely separated in the time window since that bounds * the worse case error when that data is monotonically increasing * over the window. * * Upon getting a new min, we can forget everything earlier because * it has no value - the new min is <= everything else in the window * by definition and it's the most recent. So we restart fresh on * every new min and overwrites 2nd & 3rd choices. The same property * holds for 2nd & 3rd best. */ #include <linux/module.h> #include <linux/win_minmax.h> /* As time advances, update the 1st, 2nd, and 3rd choices. */ static u32 minmax_subwin_update(struct minmax *m, u32 win, const struct minmax_sample *val) { u32 dt = val->t - m->s[0].t; if (unlikely(dt > win)) { /* * Passed entire window without a new val so make 2nd * choice the new val & 3rd choice the new 2nd choice. * we may have to iterate this since our 2nd choice * may also be outside the window (we checked on entry * that the third choice was in the window). */ m->s[0] = m->s[1]; m->s[1] = m->s[2]; m->s[2] = *val; if (unlikely(val->t - m->s[0].t > win)) { m->s[0] = m->s[1]; m->s[1] = m->s[2]; m->s[2] = *val; } } else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) { /* * We've passed a quarter of the window without a new val * so take a 2nd choice from the 2nd quarter of the window. */ m->s[2] = m->s[1] = *val; } else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) { /* * We've passed half the window without finding a new val * so take a 3rd choice from the last half of the window */ m->s[2] = *val; } return m->s[0].v; } /* Check if new measurement updates the 1st, 2nd or 3rd choice max. */ u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas) { struct minmax_sample val = { .t = t, .v = meas }; if (unlikely(val.v >= m->s[0].v) || /* found new max? */ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ return minmax_reset(m, t, meas); /* forget earlier samples */ if (unlikely(val.v >= m->s[1].v)) m->s[2] = m->s[1] = val; else if (unlikely(val.v >= m->s[2].v)) m->s[2] = val; return minmax_subwin_update(m, win, &val); } EXPORT_SYMBOL(minmax_running_max); /* Check if new measurement updates the 1st, 2nd or 3rd choice min. */ u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas) { struct minmax_sample val = { .t = t, .v = meas }; if (unlikely(val.v <= m->s[0].v) || /* found new min? */ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ return minmax_reset(m, t, meas); /* forget earlier samples */ if (unlikely(val.v <= m->s[1].v)) m->s[2] = m->s[1] = val; else if (unlikely(val.v <= m->s[2].v)) m->s[2] = val; return minmax_subwin_update(m, win, &val); } EXPORT_SYMBOL(minmax_running_min);
c
github
https://github.com/torvalds/linux
lib/win_minmax.c
__author__ = "Mathusuthan N Kannan" __email__ = "mathkann@gmail.com" __copyright__ = "Copyright 2015, Mathusuthan N Kannan" __license__ = "The MIT License (MIT)" # Python's syntax makes it all too easy to write single-line expressions that are overly complicated and # difficult to read.Move complex expressions into helper functions, especially if you need to use the same logic # repeatedly. # The if/else expression provides a more readable alternative to using Boolean operators like # or / and in expressions. # Example 1 from urllib.parse import parse_qs my_values = parse_qs('red=5&blue=0&green=', keep_blank_values=True) print(repr(my_values)) # Example 2 print('Red: ', my_values.get('red')) print('Green: ', my_values.get('green')) print('Opacity: ', my_values.get('opacity')) # Example 3 - don't do this # For query string 'red=5&blue=0&green=' red = my_values.get('red', [''])[0] or 0 green = my_values.get('green', [''])[0] or 0 opacity = my_values.get('opacity', [''])[0] or 0 print('Red: %r' % red) print('Green: %r' % green) print('Opacity: %r' % opacity) # Example 4 - don't do this red = int(my_values.get('red', [''])[0] or 0) green = int(my_values.get('green', [''])[0] or 0) opacity = int(my_values.get('opacity', [''])[0] or 0) print('Red: %r' % red) print('Green: %r' % green) print('Opacity: %r' % opacity) # Example 5 - <true> if <cond> else <false> is way better red = my_values.get('red', ['']) red = int(red[0]) if red[0] else 0 green = my_values.get('green', ['']) green = int(green[0]) if green[0] else 0 opacity = my_values.get('opacity', ['']) opacity = int(opacity[0]) if opacity[0] else 0 print('Red: %r' % red) print('Green: %r' % green) print('Opacity: %r' % opacity) # Example 6 green = my_values.get('green', ['']) if green[0]: green = int(green[0]) else: green = 0 print('Green: %r' % green) # Example 7 - The right way def get_first_int(values, key, default=0): found = values.get(key, ['']) if found[0]: found = int(found[0]) else: found = default return found # Example 8 green = get_first_int(my_values, 'green') print('Green: %r' % green)
unknown
codeparrot/codeparrot-clean
--- MainSourceFile: identical_in_TU.cpp Diagnostics: - DiagnosticName: test-identical-insertion DiagnosticMessage: Message: Fix FilePath: $(path)/identical_in_TU.cpp FileOffset: 12 Replacements: - FilePath: $(path)/identical_in_TU.cpp Offset: 12 Length: 0 ReplacementText: '0' - FilePath: $(path)/identical_in_TU.cpp Offset: 12 Length: 0 ReplacementText: '0' ...
unknown
github
https://github.com/llvm/llvm-project
clang-tools-extra/test/clang-apply-replacements/Inputs/identical-in-TU/file1.yaml